[ { "id": 89412, "commit_id": "583a7ec15744b2ca8a9c56df484516111dbf783d", "repo": "sentry", "path": "tests/sentry/rules/history/test_preview.py", "file_name": "test_preview.py", "fun_name": "test_frequency_condition_alone", "commit_message": "feat(alert-preview): last triggered (#42098)\n\nAttaches `last_triggered` to group info. `preview` now returns a mapping\r\nof group_ids to triggers, updated tests to reflect that.", "code": "def test_frequency_condition_alone(self):\n prev_hour = timezone.now() - timedelta(hours=1)\n group = None\n for i in range(5):\n group = self.store_event(\n project_id=self.project.id, data={\"timestamp\": iso_format(prev_hour)}\n ).group\n conditions = [\n {\n \"id\": \"sentry.rules.conditions.event_frequency.EventFrequencyCondition\",\n \"value\": 4,\n \"interval\": \"5m\",\n }\n ]\n result = preview(self.project, conditions, [], *MATCH_ARGS)\n assert group.id in result\n\n conditions[0][\"value\"] = 5\n result = preview(self.project, conditions, [], *MATCH_ARGS)\n assert group.id not in result\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 230, "n_words": 57, "vocab_size": 39, "complexity": 2, "nloc": 19, "token_counts": 129, "n_ast_nodes": 207, "n_identifiers": 20, "random_cut": "def test_frequency_condition_alone(self):\n prev_hour = timezone.now() - timedelta(hours=1)\n group = None\n for i in range(5):\n group = self.store_event(\n project_id=self.project.id, data={\"timestamp\": iso_format(prev_hour)}\n ).group\n conditions = [\n {\n " }, { "id": 153032, "commit_id": "be2716f393fddd2f669f26616f80e051fc7ceee6", "repo": "modin", "path": "modin/pandas/test/test_series.py", "file_name": "test_series.py", "fun_name": "test_expanding", "commit_message": "TEST-#3655: Check that Modin is defaulting to Pandas. (#3656)\n\nCo-authored-by: Dmitry Chigarev <62142979+dchigarev@users.noreply.github.com>\r\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: mvashishtha ", "code": "def test_expanding(data):\n modin_series, _ = create_test_series(data) # noqa: F841\n with warns_that_defaulting_to_pandas():\n modin_series.expanding()\n\n\n@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 27, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 23, "n_ast_nodes": 67, "n_identifiers": 13, "random_cut": "def test_expanding(data):\n modin_series, _ = create_" }, { "id": 164047, "commit_id": "f46df091df3afea25a273f491d1f6b2c7d20b32c", "repo": "pandas", "path": "pandas/tests/frame/test_query_eval.py", "file_name": "test_query_eval.py", "fun_name": "setup_method", "commit_message": "TST: Remove unused fixtures (#45692)\n\n* TST: Remove unused fixtures\r\n\r\n* Undo a removed fixture\r\n\r\n* Add back other fixtures\r\n\r\n* Undo a file\r\n\r\n* Try undoing this?\r\n\r\n* Revert \"Try undoing this?\"\r\n\r\nThis reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.", "code": "def setup_method(self):\n self.df = DataFrame({\"A\": [1, 2, 3]})\n self.expected1 = self.df[self.df.A > 0]\n self.expected2 = self.df.A + 1\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 38, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 50, "n_ast_nodes": 78, "n_identifiers": 7, "random_cut": "def setup_method(self):\n self.df = DataFrame({\"A\": [1, 2, 3]})\n self.expected1 = self.df[self.df.A > 0]\n self.expected2 = self.df.A + 1" }, { "id": 208362, "commit_id": "87613c780ccd92c8b2694becfb50511a6052e8f1", "repo": "celery", "path": "t/integration/test_canvas.py", "file_name": "test_canvas.py", "fun_name": "test_chaining_upgraded_chords_mixed_canvas", "commit_message": "Fixed bug when chaining a chord with a group (#7919)\n\n* Reproduced Bug from Issue #5958\r\n\r\n* Fixed Issue #5958\r\n\r\n* Added unit test: test_chord__or__group_of_single_task()\r\n\r\n* Added unit test: test_chord_upgrade_on_chaining()\r\n\r\n* Added unit test: test_chain_of_chord__or__group_of_single_task()\r\n\r\n* Added unit test: test_chain_of_chord_upgrade_on_chaining()", "code": "def test_chaining_upgraded_chords_mixed_canvas(self, manager, subtests):\n \n try:\n manager.app.backend.ensure_chords_allowed()\n except NotImplementedError as e:\n raise pytest.skip(e.args[0])\n\n if not manager.app.conf.result_backend.startswith('redis'):\n raise pytest.skip('Requires redis result backend.')\n\n redis_connection = get_redis_connection()\n redis_key = 'echo_chamber'\n\n c = chain(\n chord(group([redis_echo.si('1', redis_key=redis_key),\n redis_echo.si('2', redis_key=redis_key),\n redis_echo.si('3', redis_key=redis_key)]),\n group([redis_echo.si('4', redis_key=redis_key),\n redis_echo.si('5', redis_key=redis_key),\n redis_echo.si('6', redis_key=redis_key)])),\n redis_echo.si('7', redis_key=redis_key),\n group(\n redis_echo.si('8', redis_key=redis_key),\n ),\n redis_echo.si('9', redis_key=redis_key),\n redis_echo.si('Done', redis_key='Done'),\n )\n\n with subtests.test(msg='Run the chain and wait for completion'):\n redis_connection.delete(redis_key, 'Done')\n c.delay().get(timeout=TIMEOUT)\n await_redis_list_message_length(1, redis_key='Done', timeout=10)\n\n with subtests.test(msg='All tasks are executed once'):\n actual = [sig.decode('utf-8') for sig in redis_connection.lrange(redis_key, 0, -1)]\n expected = [str(i) for i in range(1, 10)]\n with subtests.test(msg='All tasks are executed once'):\n assert sorted(actual) == sorted(expected)\n\n # Cleanup\n redis_connection.delete(redis_key, 'Done')\n\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 495, "n_words": 103, "vocab_size": 80, "complexity": 5, "nloc": 33, "token_counts": 321, "n_ast_nodes": 532, "n_identifiers": 41, "random_cut": "def test_chaining_upgraded_chords_mixed_canvas(self, manager, subtests):\n \n try:\n manager.app.backend.ensure_chords_allowed()\n except NotImplementedError as e:\n raise pytest.skip(e.args[0])\n\n if not manager.app.conf.result_backend.startswith('redis'):\n raise pytest.skip('Requires redis result backend.')\n\n redis_connection = get_redis_connection()\n redis_key = 'echo_chamber'\n\n c = chain(\n chord(group([redis_echo.si('1', redis_key=redis_key),\n redis_echo.si('2', redis_key=redis_key),\n redis_echo.si('3', redis_key=redis_key)]),\n group([redis_echo.si('4', redis_key=redis_key),\n redis_echo.si('5', redis_key=redis_key),\n redis_echo.si('6', redis_key=redis_key)])),\n redis_echo.si('7', redis_key=redis_key),\n group(\n redis_echo.si('8', redis_key=redis_key),\n ),\n redis_echo.si('9', redis_key=redis_key),\n redis_echo.si('Done', redis_key='Done'),\n )\n\n with subtests.test(msg='Run the chain and wait for completion'):\n redis_connection.delete(redis_key, 'Done')\n c.delay().get(timeout=TIMEOUT)\n await_redis_list_message_length(1, redis_key='Done', timeout=10)\n\n with subtests.test(msg='All tasks are executed once'):\n actual = [sig.decode('utf-8') for sig" }, { "id": 121216, "commit_id": "ae4aee762a6ab18b17d61b68d8ee32d2c4e3b957", "repo": "jax", "path": "jax/experimental/jax2tf/impl_no_xla.py", "file_name": "impl_no_xla.py", "fun_name": "_pad_spatial_dims", "commit_message": "[jax2tf] Fix conv1d padding; it's already normalized before the _pad_spatial_dims call. Enable non-XLA tests of conv1d.\n\nPiperOrigin-RevId: 461556553", "code": "def _pad_spatial_dims(x, x_shape, padding):\n \n # Add empty padding for batch and feature dimensions.\n no_pad = ((0, 0),)\n padding = tuple(padding)\n padding = no_pad + padding + no_pad\n x = tf.pad(x, padding)\n assert len(x.shape) == len(padding)\n x_shape = tuple(p0 + xs + p1 for xs, (p0, p1) in zip(x_shape, padding))\n jax2tf._assert_matching_abstract_shape(x, x_shape)\n return x, x_shape\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 54, "vocab_size": 40, "complexity": 2, "nloc": 9, "token_counts": 92, "n_ast_nodes": 141, "n_identifiers": 16, "random_cut": "def _pad_spatial_dims(x, x_shape, padding):\n \n # Add empty padding for batch and feature dimensions.\n no_pad = ((0, 0),)\n padding = tuple(padding)\n padding = no_pad + padding + no_pad\n x = tf.pad(x, padding)\n assert len(x.shape) == len(p" }, { "id": 63913, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/urllib3/_collections.py", "file_name": "_collections.py", "fun_name": "iteritems", "commit_message": "upd; format", "code": "def iteritems(self):\n \n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 67, "n_words": 16, "vocab_size": 13, "complexity": 3, "nloc": 5, "token_counts": 39, "n_ast_nodes": 63, "n_identifiers": 7, "random_cut": "def iteritems(self):\n \n fo" }, { "id": 292026, "commit_id": "0bd0b4766e8221584a74bffc7c2f0430c23169df", "repo": "core", "path": "tests/components/sleepiq/conftest.py", "file_name": "conftest.py", "fun_name": "mock_sleepers", "commit_message": "Refactor sleepiq as async with config flow (#64850)\n\nCo-authored-by: J. Nick Koston ", "code": "def mock_sleepers():\n \n return [\n Sleeper(sleeper)\n for sleeper in json.loads(load_fixture(\"sleeper.json\", \"sleepiq\"))[\"sleepers\"]\n ]\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 34, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 29, "n_ast_nodes": 53, "n_identifiers": 6, "random_cut": "def mock_sleepers():\n \n return [\n Sleeper(sleeper)\n for sleeper in json.loads(load_fixture(\"sleeper.json\", \"sleepiq\"))[\"sleepers" }, { "id": 297951, "commit_id": "94755a5773f8197153ab9bffe83b9711f3a76d9d", "repo": "core", "path": "homeassistant/components/plugwise/coordinator.py", "file_name": "coordinator.py", "fun_name": "_async_update_data", "commit_message": "String formatting and max line length - Part 4 (#84445)\n\nCo-authored-by: jjlawren ", "code": "async def _async_update_data(self) -> PlugwiseData:\n \n try:\n if not self._connected:\n await self._connect()\n data = await self.api.async_update()\n except InvalidAuthentication as err:\n raise ConfigEntryError(\"Invalid username or Smile ID\") from err\n except (InvalidXMLError, ResponseError) as err:\n raise UpdateFailed(\n \"Invalid XML data, or error indication received for the Plugwise\"\n \" Adam/Smile/Stretch\"\n ) from err\n except UnsupportedDeviceError as err:\n raise ConfigEntryError(\"Device with unsupported firmware\") from err\n except ConnectionFailedError as err:\n raise UpdateFailed(\"Failed to connect to the Plugwise Smile\") from err\n return PlugwiseData(\n gateway=cast(GatewayData, data[0]),\n devices=cast(dict[str, DeviceData], data[1]),\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 282, "n_words": 82, "vocab_size": 59, "complexity": 6, "nloc": 21, "token_counts": 118, "n_ast_nodes": 197, "n_identifiers": 23, "random_cut": "async def _async_update_data(self) -> PlugwiseData:\n \n try:\n if not self._connected:\n await self._connect()\n data = await self.api.async_update()\n except InvalidAuthentication as err:\n raise ConfigEntryError(\"Invalid username or Smile ID\") from err\n except (InvalidXMLError, ResponseError) as err:\n raise UpdateFailed(\n \"Invalid XML data, or error indication received for the Plugwise\"\n \" Adam/Smile/Stretch\"\n ) from err" }, { "id": 121136, "commit_id": "98e71fe31de8f6ea26be76488d41fb471fef56eb", "repo": "jax", "path": "jax/_src/lax/lax.py", "file_name": "lax.py", "fun_name": "_iota_abstract_eval", "commit_message": "[dynamic-shapes] revive basic bounded int machinery, add tests", "code": "def _iota_abstract_eval(*, dtype, shape, dimension):\n _check_shapelike(\"iota\", \"shape\", shape)\n if not any(dtypes.issubdtype(dtype, t) for t in _num):\n msg = 'iota does not accept dtype {}. Accepted dtypes are subtypes of {}.'\n typename = str(np.dtype(dtype).name)\n accepted_typenames = (t.__name__ for t in _num)\n raise TypeError(msg.format(typename, ', '.join(accepted_typenames)))\n if not 0 <= dimension < len(shape):\n raise ValueError(\"iota dimension must be between 0 and len(shape), got \"\n f\"dimension={dimension} for shape {shape}\")\n if not any(isinstance(d, core.BInt) for d in shape):\n return ShapedArray(shape, dtype)\n # TODO(mattjj): unify DShapedArray with ShapedArray, and remove this code\n return core.DShapedArray(shape, dtype, False)\n\niota_p = Primitive('iota')\niota_p.def_impl(partial(xla.apply_primitive, iota_p))\niota_p.def_abstract_eval(_iota_abstract_eval)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 137, "n_words": 97, "vocab_size": 77, "complexity": 7, "nloc": 13, "token_counts": 135, "n_ast_nodes": 264, "n_identifiers": 35, "random_cut": "def _iota_abstract_eval(*, dtype, shape, dimension):\n _check_shapelike(\"iota\", \"shape\", shape)\n if not any(dtypes.issubdtype(dtype, t) for t in _num):\n msg = 'iota does not accept dtyp" }, { "id": 190898, "commit_id": "d34fd16034e307b545c3e3adfa4d9d472a582cc6", "repo": "thumbor", "path": "thumbor/filters/redeye.py", "file_name": "redeye.py", "fun_name": "cascade", "commit_message": "Feature/optional opencv (#1400)\n\n* Removed opencv dependency\r\n\r\nNow OpenCV is optional and detectors are smart to\r\nskip if cv2 could not be imported.\r\nAlso refactored face detector a bit to make it more\r\nmaintainable.\r\nNow thumbor can be installed with\r\npip install thumbor\r\npip install thumbor[all]\r\npip install thumbor[opencv]\r\npip install thumbor[tests]", "code": "def cascade(self) -> None:\n if not hasattr(self, \"_cascade\"):\n setattr(self, \"_cascade\", cv2.CascadeClassifier(CASCADE_FILE_PATH))\n\n return getattr(self, \"_cascade\")\n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 38, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 4, "token_counts": 36, "n_ast_nodes": 61, "n_identifiers": 8, "random_cut": "def cascade(self) -> None:\n if not hasattr(self, \"_cascade\"):\n setattr(self, \"_cascade\", cv2.CascadeClassifier(CASCADE_FILE_PATH))\n\n return getattr(self, \"_cas" }, { "id": 205744, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/query.py", "file_name": "query.py", "fun_name": "__deepcopy__", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def __deepcopy__(self, memo):\n \n obj = self.__class__()\n for k, v in self.__dict__.items():\n if k == \"_result_cache\":\n obj.__dict__[k] = None\n else:\n obj.__dict__[k] = copy.deepcopy(v, memo)\n return obj\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 105, "n_words": 25, "vocab_size": 21, "complexity": 3, "nloc": 8, "token_counts": 60, "n_ast_nodes": 98, "n_identifiers": 11, "random_cut": "def __deepcopy__(self, memo):\n \n obj = self.__class__()\n for k, v in self." }, { "id": 125417, "commit_id": "8553df49bba654a9edd6befce198be90d6524fca", "repo": "ray", "path": "python/ray/data/_internal/lazy_block_list.py", "file_name": "lazy_block_list.py", "fun_name": "copy", "commit_message": "Make execution plan/blocklist aware of the memory ownership and who runs the plan (#26650)\n\nHaving the indicator about who's running the stage and who created a blocklist will enable the eager memory releasing.\r\n\r\nThis is an alternative with better abstraction to https://github.com/ray-project/ray/pull/26196.\r\n\r\nNote: this doesn't work for Dataset.split() yet, will do in a followup PR.", "code": "def copy(self) -> \"LazyBlockList\":\n return LazyBlockList(\n self._tasks.copy(),\n block_partition_refs=self._block_partition_refs.copy(),\n block_partition_meta_refs=self._block_partition_meta_refs.copy(),\n cached_metadata=self._cached_metadata,\n ray_remote_args=self._remote_args.copy(),\n owned_by_consumer=self._owned_by_consumer,\n stats_uuid=self._stats_uuid,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 104, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 10, "token_counts": 67, "n_ast_nodes": 102, "n_identifiers": 16, "random_cut": "def copy(self) -> \"LazyBlockList\":\n return LazyBlockList(\n self._tasks.copy(),\n block_partition_refs=self._block_partition_refs.copy(),\n block_partition_meta_refs=self._block_partition_meta_refs.copy(),\n " }, { "id": 115589, "commit_id": "41f58415fbd45c9ce0fb47962949e40e488424c6", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/couchbase_handler/couchbase_handler.py", "file_name": "couchbase_handler.py", "fun_name": "get_tables", "commit_message": "Initial commit for the couchbase handler", "code": "def get_tables(self) -> Response:\n \n\n cluster = self.connect()\n bucket = cluster.bucket(self.bucket_name)\n \n collections = []\n\n for _scope in bucket.collections().get_all_scopes():\n for __collections in _scope.collections:\n collections.append(__collections.name)\n collections_ar = [\n [i] for i in collections\n ]\n \n df = pd.DataFrame(collections_ar, columns=['TABLE_NAME'])\n \n response = Response(\n RESPONSE_TYPE.TABLE,\n df\n )\n \n return response\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 212, "n_words": 43, "vocab_size": 31, "complexity": 4, "nloc": 19, "token_counts": 94, "n_ast_nodes": 152, "n_identifiers": 22, "random_cut": "def get_tables(self) -> Response:\n \n\n cluster = self.connect()\n bucket = cluster.bucket(self.bucket_name)\n \n collections = []\n\n for _scope in bucket.collections().get_all_scopes():\n for __collections in _scope.collections:\n collections.append(__collections.name)\n collections_ar = [\n [i] for i in collections\n ]\n \n df = pd.DataFrame(collections_ar, col" }, { "id": 105091, "commit_id": "1c1eaf96d5ef4623e36c9124d49e88ab476dd655", "repo": "datasets", "path": "datasets/quickdraw/quickdraw.py", "file_name": "quickdraw.py", "fun_name": "process_struct", "commit_message": "Add QuickDraw dataset (#3592)\n\n* Add QuickDraw dataset\r\n\r\n* Style\r\n\r\n* Add infos file, dummy data, improve script\r\n\r\n* Add info and dummy data\r\n\r\n* Test readme\r\n\r\n* Finish readme\r\n\r\n* Delete generate_dummy.py\r\n\r\n* Remove whitespace", "code": "def process_struct(fileobj):\n \n (key_id,) = struct.unpack(\"Q\", fileobj.read(8))\n (country_code,) = struct.unpack(\"2s\", fileobj.read(2))\n (recognized,) = struct.unpack(\"b\", fileobj.read(1))\n (timestamp,) = struct.unpack(\"I\", fileobj.read(4))\n (n_strokes,) = struct.unpack(\"H\", fileobj.read(2))\n drawing = []\n for _ in range(n_strokes):\n (n_points,) = struct.unpack(\"H\", fileobj.read(2))\n fmt = str(n_points) + \"B\"\n x = struct.unpack(fmt, fileobj.read(n_points))\n y = struct.unpack(fmt, fileobj.read(n_points))\n drawing.append({\"x\": list(x), \"y\": list(y)})\n\n return {\n \"key_id\": str(key_id),\n \"recognized\": recognized,\n \"timestamp\": datetime.fromtimestamp(timestamp),\n \"countrycode\": country_code.decode(\"utf-8\"),\n \"drawing\": drawing,\n }\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 163, "n_words": 63, "vocab_size": 49, "complexity": 2, "nloc": 20, "token_counts": 220, "n_ast_nodes": 365, "n_identifiers": 23, "random_cut": "def process_struct(fileobj):\n \n (key_id,) = struct.unpack(\"Q\", fileobj.read(8))\n (country_code,) = struct.unpack(\"2s\", fileobj.read(2))\n (recognized,) = struct.unpack(\"b\", fileobj.read(1))\n (timestamp,) = struct.unpack(\"I\", fileobj.read(4))\n (n_strokes,) = struct.unpack(\"H\", fileobj.read(2))\n drawing = []\n for _ in range(n_str" }, { "id": 106840, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/__init__.py", "file_name": "__init__.py", "fun_name": "check_connection", "commit_message": "apply black py to all python files", "code": "def check_connection(self, timeout_seconds=0):\n \n while not self._has_connection() and timeout_seconds > 0:\n time.sleep(0.1)\n timeout_seconds -= 0.1\n print(\"waiting\")\n\n return self._has_connection()\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 71, "n_words": 17, "vocab_size": 15, "complexity": 3, "nloc": 6, "token_counts": 45, "n_ast_nodes": 72, "n_identifiers": 7, "random_cut": "def check_connection(self, timeout_seconds=0):" }, { "id": 131615, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_namespace.py", "file_name": "test_namespace.py", "fun_name": "test_namespace_client", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_namespace_client():\n cluster = Cluster()\n cluster.add_node(num_cpus=4, ray_client_server_port=8080)\n cluster.wait_for_nodes(1)\n\n template = ", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "template = \"\"\"\nimport ray\nray.util.connect(\"{address}\", namespace=\"{namespace}\")@ray.remote", "n_ast_errors": 2, "ast_levels": 8, "n_whitespaces": 22, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 28, "token_counts": 104, "n_ast_nodes": 57, "n_identifiers": 10, "random_cut": "def test_namespace_client():\n cluster = Cluster()\n cluster.add_node(num_cpus=4, ray_client_server_port=8080)\n cluster.wait_for_nodes(1)\n\n template = " }, { "id": 201689, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/base/test_operations.py", "file_name": "test_operations.py", "fun_name": "test_adapt_unknown_value_decimal", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_adapt_unknown_value_decimal(self):\n value = decimal.Decimal(\"3.14\")\n self.assertEqual(\n self.ops.adapt_unknown_value(value),\n self.ops.adapt_decimalfield_value(value),\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 36, "n_ast_nodes": 59, "n_identifiers": 9, "random_cut": "def test_adapt_unknown_value_decimal(self):\n value = decimal.Decimal(\"3.14\")\n self.assertEqual(\n " }, { "id": 311059, "commit_id": "367521e369839e6504989603b1282c2ba31dad49", "repo": "core", "path": "tests/pylint/test_enforce_type_hints.py", "file_name": "test_enforce_type_hints.py", "fun_name": "test_regex_x_of_y_comma_z", "commit_message": "Adjust pylint plugin to enforce device_tracker type hints (#64903)\n\n* Adjust pylint plugin to enforce device_tracker type hints\r\n\r\n* Use a constant for the type hint matchers\r\n\r\n* Add tests\r\n\r\n* Add x_of_y match\r\n\r\n* Adjust bluetooth_tracker\r\n\r\n* Adjust mysensors\r\n\r\n* Adjust tile\r\n\r\nCo-authored-by: epenet ", "code": "def test_regex_x_of_y_comma_z(string, expected_x, expected_y, expected_z):\n \n assert (match := _TYPE_HINT_MATCHERS[\"x_of_y_comma_z\"].match(string))\n assert match.group(0) == string\n assert match.group(1) == expected_x\n assert match.group(2) == expected_y\n assert match.group(3) == expected_z\n\n\n@pytest.mark.parametrize(\n (\"string\", \"expected_a\", \"expected_b\"),\n [(\"DiscoveryInfoType | None\", \"DiscoveryInfoType\", \"None\")],\n)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n (\"string\", \"expected_a\", \"expected_b\"),\n [(\"DiscoveryInfoType | None\", \"DiscoveryInfoType\", \"None\")],\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 57, "n_words": 35, "vocab_size": 28, "complexity": 1, "nloc": 6, "token_counts": 62, "n_ast_nodes": 145, "n_identifiers": 11, "random_cut": "def test_regex_x_of_y_comma_z(string, expected_x, expected_y, expected_z):\n \n " }, { "id": 278599, "commit_id": "5cf72f4934f3104ac2378c8b9b3638afea38ba1e", "repo": "keras", "path": "keras/engine/base_layer_v1.py", "file_name": "base_layer_v1.py", "fun_name": "add_update", "commit_message": "fix the rest", "code": "def add_update(self, updates):\n \n call_context = base_layer_utils.call_context()\n\n if (\n tf.distribute.has_strategy()\n and tf.distribute.in_cross_replica_context()\n # When saving the model, the distribution strategy context should be\n # ignored, following the default path for adding updates.\n and not call_context.saving\n ):\n # Updates don't need to be run in a cross-replica context.\n return\n\n updates = generic_utils.to_list(updates)\n\n if call_context.in_call:\n relevant_inputs = call_context.inputs\n else:\n inbound_nodes = getattr(self, \"_inbound_nodes\", [])\n relevant_inputs = [node.input_tensors for node in inbound_nodes]\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 227, "n_words": 68, "vocab_size": 54, "complexity": 7, "nloc": 17, "token_counts": 104, "n_ast_nodes": 138, "n_identifiers": 19, "random_cut": "def add_update(self, updates):\n \n call_context = base_layer_utils.call_context()\n\n if (\n tf.distribute.has_strategy()\n and tf.distribute.in_cross_replica_context()\n " }, { "id": 211590, "commit_id": "fa67fb9f88ff7b03ca24a4f80e0fde2ef6d80384", "repo": "PaddleDetection", "path": "ppdet/modeling/transformers/detr_transformer.py", "file_name": "detr_transformer.py", "fun_name": "forward", "commit_message": "[dev] fix export model bug in DETR (#7120)", "code": "def forward(self, src, src_mask=None, pos_embed=None):\n residual = src\n if self.normalize_before:\n src = self.norm1(src)\n q = k = self.with_pos_embed(src, pos_embed)\n src = self.self_attn(q, k, value=src, attn_mask=src_mask)\n\n src = residual + self.dropout1(src)\n if not self.normalize_before:\n src = self.norm1(src)\n\n residual = src\n if self.normalize_before:\n src = self.norm2(src)\n src = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = residual + self.dropout2(src)\n if not self.normalize_before:\n src = self.norm2(src)\n return src\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 187, "n_words": 60, "vocab_size": 26, "complexity": 5, "nloc": 18, "token_counts": 160, "n_ast_nodes": 234, "n_identifiers": 21, "random_cut": "def forward(self, src, src_mask=None, pos_embed=None):\n residual = src\n if self.normalize_before:\n src = self.norm1(src)\n q = k = self.with_pos_embed(src, pos_embed)\n src = self.self_attn(q, k, value=src, attn_mask=src_mask)\n\n src = residual + self.dropout1(src)\n if not self.normalize_before:\n src = self.norm1(src)\n\n residual = src\n if self.normalize_before:\n src = self.norm2(src)\n src = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = residual + self.dropout2(src)\n" }, { "id": 205412, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/base.py", "file_name": "base.py", "fun_name": "_check_m2m_through_same_relationship", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _check_m2m_through_same_relationship(cls):\n \n\n errors = []\n seen_intermediary_signatures = []\n\n fields = cls._meta.local_many_to_many\n\n # Skip when the target model wasn't found.\n fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))\n\n # Skip when the relationship model wasn't found.\n fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))\n\n for f in fields:\n signature = (\n f.remote_field.model,\n cls,\n f.remote_field.through,\n f.remote_field.through_fields,\n )\n if signature in seen_intermediary_signatures:\n errors.append(\n checks.Error(\n \"The model has two identical many-to-many relations \"\n \"through the intermediate model '%s'.\"\n % f.remote_field.through._meta.label,\n obj=cls,\n id=\"models.E003\",\n )\n )\n else:\n seen_intermediary_signatures.append(signature)\n return errors\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 460, "n_words": 88, "vocab_size": 53, "complexity": 7, "nloc": 26, "token_counts": 136, "n_ast_nodes": 215, "n_identifiers": 21, "random_cut": "def _check_m2m_through_same_relationship(cls):\n \n\n errors = []\n seen_intermediary_signatures = []\n\n fields = cls._meta.local_many_to_many\n\n # Skip when the target model wasn't found.\n fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))\n\n # Skip when the relationship model wasn't found.\n fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))\n\n for f in fields:\n signature = (\n f.remote_field.model,\n cls,\n f.remote_field.through,\n f.remote_field.through_fields,\n )\n if signature in seen_intermediary_signatures:\n " }, { "id": 199472, "commit_id": "801e149d69d5f88919a735f8b55b6024f97c6950", "repo": "sympy", "path": "sympy/physics/mechanics/tests/test_rigidbody.py", "file_name": "test_rigidbody.py", "fun_name": "test_parallel_axis", "commit_message": "Add optional frame argument to parallel axis method", "code": "def test_parallel_axis():\n N = ReferenceFrame('N')\n m, Ix, Iy, Iz, a, b = symbols('m, I_x, I_y, I_z, a, b')\n Io = inertia(N, Ix, Iy, Iz)\n o = Point('o')\n p = o.locatenew('p', a * N.x + b * N.y)\n R = RigidBody('R', o, N, m, (Io, o))\n Ip = R.parallel_axis(p)\n Ip_expected = inertia(N, Ix + m * b**2, Iy + m * a**2,\n Iz + m * (a**2 + b**2), ixy=-m * a * b)\n assert Ip == Ip_expected\n A = ReferenceFrame('A')\n A.orient_axis(N, N.z, 1)\n assert (R.parallel_axis(p, A).to_matrix(A) -\n Ip_expected.to_matrix(A)).simplify() == zeros(3, 3)\n\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 162, "n_words": 91, "vocab_size": 60, "complexity": 1, "nloc": 15, "token_counts": 191, "n_ast_nodes": 293, "n_identifiers": 30, "random_cut": "def test_parallel_axis():\n N = Refe" }, { "id": 213825, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy_tests/test_core/test_general.py", "file_name": "test_general.py", "fun_name": "test_set_framework", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def test_set_framework(fw_str, dev, call):\n ivy.set_framework(fw_str)\n ivy.unset_framework()\n\n\n# use_framework", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 12, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 7, "random_cut": "def test_set_framework(fw_str, dev, call):\n ivy.set_framework(fw_str)\n ivy.unset_framework()\n\n\n# use_framework" }, { "id": 273929, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/rnn/gru_test.py", "file_name": "gru_test.py", "fun_name": "test_recurrent_dropout_with_implementation_restriction", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_recurrent_dropout_with_implementation_restriction(self):\n layer = keras.layers.GRU(2, recurrent_dropout=0.1, implementation=2)\n # The implementation is force to 1 due to the limit of recurrent_dropout.\n self.assertEqual(layer.implementation, 1)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 3, "token_counts": 35, "n_ast_nodes": 51, "n_identifiers": 9, "random_cut": "def test_recurrent_dropout_with_implementation_restriction(self):\n laye" }, { "id": 24351, "commit_id": "a3a095150e8e1f56dd03d88ac71db6ad6262611a", "repo": "PaddleOCR", "path": "ppocr/losses/rec_vl_loss.py", "file_name": "rec_vl_loss.py", "fun_name": "forward", "commit_message": "add vl", "code": "def forward(self, predicts, batch):\n text_pre = predicts[0]\n target = batch[1].astype('int64')\n label_flatten, length = self.flatten_label(target)\n text_pre = self._flatten(text_pre, length)\n if self.mode == 'LF_1':\n loss = self.loss_func(text_pre, label_flatten)\n else:\n text_rem = predicts[1]\n text_mas = predicts[2]\n target_res = batch[2].astype('int64')\n target_sub = batch[3].astype('int64')\n label_flatten_res, length_res = self.flatten_label(target_res)\n label_flatten_sub, length_sub = self.flatten_label(target_sub)\n text_rem = self._flatten(text_rem, length_res)\n text_mas = self._flatten(text_mas, length_sub)\n loss_ori = self.loss_func(text_pre, label_flatten)\n loss_res = self.loss_func(text_rem, label_flatten_res)\n loss_mas = self.loss_func(text_mas, label_flatten_sub)\n loss = loss_ori + loss_res * self.weight_res + loss_mas * self.weight_mas\n return {'loss': loss}\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 272, "n_words": 81, "vocab_size": 54, "complexity": 2, "nloc": 21, "token_counts": 190, "n_ast_nodes": 301, "n_identifiers": 27, "random_cut": "def forward(self, predicts, batch):\n text_pre = predicts[0]\n target = batch[1].astype('int64')\n label_flatten, length = self.flatten_label(target)\n text_pre = self._flatten(text_pre, length)\n if self.mode == 'LF_1':\n loss = self.loss_func(text_pre, label_flatten)\n else:\n text_rem = predicts[1]\n text_mas = predicts[2]\n " }, { "id": 41173, "commit_id": "430cb8fe332a752b79fb74bd618038ac51e82df8", "repo": "seaborn", "path": "seaborn/tests/_core/test_moves.py", "file_name": "test_moves.py", "fun_name": "test_height", "commit_message": "Add move concept, with Dodge and Jitter, and ordered GroupBy", "code": "def test_height(self, df, groupby):\n\n df[\"height\"] = df[\"width\"]\n height = .4\n res = Jitter(height=height)(df, groupby, \"y\")\n self.check_same(res, df, \"y\", \"grp2\", \"width\")\n self.check_pos(res, df, \"x\", height * df[\"height\"])\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 6, "token_counts": 68, "n_ast_nodes": 110, "n_identifiers": 9, "random_cut": "def test_height(self, df, groupby):\n\n df[\"height\"] = df[\"width\"]\n height = .4\n res = Jitter(height=height)(df, groupby, \"y\")\n self.check_same(res, df, \"y\", \"grp2\", \"width\")\n self.check_pos(res, df, \"x\", height *" }, { "id": 122451, "commit_id": "f2f2faa4fa166f40a4a93bc966379cf1ebb720d1", "repo": "jax", "path": "jax/interpreters/batching.py", "file_name": "batching.py", "fun_name": "_pile_flatten", "commit_message": "add a basic prototype of piles, behind jax_dynamic_shapes\n\nCo-authored-by: Adam Paszke \nCo-authored-by: Dougal Maclaurin ", "code": "def _pile_flatten(pile):\n lengths = []\n new_shape = [lengths.append(d.lengths) or d.replace(lengths=len(lengths))\n if type(d) is IndexedAxisSize else d\n for d in pile.aval.elt_ty.shape]\n elt_ty = pile.aval.elt_ty.update(shape=tuple(new_shape))\n aval = pile.aval.replace(elt_ty=elt_ty)\n return (lengths, pile.data), aval\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 62, "n_words": 30, "vocab_size": 25, "complexity": 4, "nloc": 8, "token_counts": 91, "n_ast_nodes": 141, "n_identifiers": 16, "random_cut": "def _pile_flatten(pile):\n lengths = []\n new_shape = [lengths.append(d.lengths) or d.replace(lengths=len(lengths))\n if type(d) " }, { "id": 82163, "commit_id": "84f2b91105c959c4d89a63063cca441f3d67fc0f", "repo": "awx", "path": "awx/main/tasks/receptor.py", "file_name": "receptor.py", "fun_name": "get_receptor_ctl", "commit_message": "Fix fallout from turning off work signing in docker-compose", "code": "def get_receptor_ctl(config_data=None):\n if config_data is None:\n config_data = read_receptor_config()\n receptor_sockfile = get_receptor_sockfile(config_data)\n try:\n return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True))\n except RuntimeError:\n return ReceptorControl(receptor_sockfile)\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 22, "vocab_size": 19, "complexity": 3, "nloc": 8, "token_counts": 51, "n_ast_nodes": 81, "n_identifiers": 11, "random_cut": "def get_receptor_ctl(config_data=None):\n if config_data is None:\n config_data = read_receptor_config()\n receptor_sockfile = get_receptor_sockfile(config_data)\n try:\n return ReceptorControl(receptor_sockfile, config=__RECEPTOR_CONF, tlsclient=get_tls_client(config_data, True))\n except RuntimeError:\n r" }, { "id": 93164, "commit_id": "d3b8c9dd7bef6bccb5e70d2ccf3cda8463444a34", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_mep.py", "file_name": "test_organization_events_mep.py", "fun_name": "test_failed_dry_run_does_not_error", "commit_message": "chore(discover): Cleanup events tests (#36797)\n\n- Delete the deprecated eventsv2 tests\r\n- Move MEP tests to its own file", "code": "def test_failed_dry_run_does_not_error(self, mock_builder):\n with self.feature(\"organizations:performance-dry-run-mep\"):\n mock_builder.side_effect = InvalidSearchQuery(\"Something bad\")\n query = {\n \"field\": [\"count()\"],\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert len(mock_builder.mock_calls) == 1\n assert mock_builder.call_args.kwargs[\"dry_run\"]\n\n mock_builder.side_effect = IncompatibleMetricsQuery(\"Something bad\")\n query = {\n \"field\": [\"count()\"],\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert len(mock_builder.mock_calls) == 2\n assert mock_builder.call_args.kwargs[\"dry_run\"]\n\n mock_builder.side_effect = InvalidConditionError(\"Something bad\")\n query = {\n \"field\": [\"count()\"],\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert len(mock_builder.mock_calls) == 3\n assert mock_builder.call_args.kwargs[\"dry_run\"]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 410, "n_words": 83, "vocab_size": 30, "complexity": 1, "nloc": 29, "token_counts": 207, "n_ast_nodes": 346, "n_identifiers": 19, "random_cut": "def test_failed_dry_run_does_not_error(self, mock_builder):\n with self.feature(\"organizations:performance-dry-run-mep\"):\n mock_builder.side_effect = InvalidSearchQuery(\"Something bad\")\n query = {\n \"field\": [\"count()\"],\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert len(mock_builder.mock_calls) == 1\n assert mock_builder.call_args.kwargs[\"dry_run\"]\n\n mock_builder.side_effect = IncompatibleMetricsQuery(\"Something bad\")\n query = {\n \"field\": [\"count()\"],\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert len(mock_builder.mock_calls) == 2\n assert mock_builder.call_args.kwargs[\"dry_run\"]\n\n mock_builder.side_effect = InvalidConditionError(\"Something bad\")\n query = {\n \"field\": [\"count()\"],\n \"project\": [self.project.id]" }, { "id": 19824, "commit_id": "99cf729dd52100efba406b9c6af585425de0788c", "repo": "pipenv", "path": "pipenv/utils/resolver.py", "file_name": "resolver.py", "fun_name": "collect_hashes", "commit_message": "Improve documentation around index restrictions (#5029)\n\n* Improve documentation around index restrictions\r\n\r\n* Update docs/advanced.rst\r\n\r\n* Refine index documentation updates. Factor out and re-use method before closing down other PR.\r\n\r\n* Fully remove the --extra-index-url argument\r\n\r\nCo-authored-by: Yusuke Nishioka ", "code": "def collect_hashes(self, ireq):\n link = ireq.link # Handle VCS and file links first\n if link and (link.is_vcs or (link.is_file and link.is_existing_dir())):\n return set()\n\n if not is_pinned_requirement(ireq):\n return set()\n\n sources = self.sources # Enforce index restrictions\n if ireq.name in self.index_lookup:\n sources = list(\n filter(lambda s: s.get(\"name\") == self.index_lookup[ireq.name], sources)\n )\n if any(is_pypi_url(source[\"url\"]) for source in sources):\n hashes = self._get_hashes_from_pypi(ireq)\n if hashes:\n return hashes\n\n applicable_candidates = self.ignore_compatibility_finder.find_best_candidate(\n ireq.name, ireq.specifier\n ).iter_applicable()\n applicable_candidates = list(applicable_candidates)\n if applicable_candidates:\n return {\n self._get_hash_from_link(candidate.link)\n for candidate in applicable_candidates\n }\n if link:\n return {self._get_hash_from_link(link)}\n if ireq.original_link:\n return {self._get_hash_from_link(ireq.original_link)}\n return set()\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 364, "n_words": 91, "vocab_size": 60, "complexity": 14, "nloc": 29, "token_counts": 195, "n_ast_nodes": 313, "n_identifiers": 29, "random_cut": "def collect_hashes(self, ireq):\n link = ireq.link # Handle VCS and file links first\n if link and (link.is_vcs or (link.is_file and link.is_existing_dir())):\n return set()\n\n if not is_pinned_requirement(ireq):\n return set()\n\n sources = self.sources # Enforc" }, { "id": 5908, "commit_id": "69604268c2ddc06a4ee0b3dce0e05a8fb73b5d16", "repo": "ludwig", "path": "tests/integration_tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_api_callbacks", "commit_message": "Rename fc_size to output_size (#1641)\n\n* Rename fc_size to output_size\r\n\r\n* Responding to comments", "code": "def test_api_callbacks(csv_filename):\n mock_callback = mock.Mock()\n\n epochs = 2\n batch_size = 8\n num_examples = 32\n\n with tempfile.TemporaryDirectory() as output_dir:\n input_features = [sequence_feature(reduce_output=\"sum\")]\n output_features = [category_feature(vocab_size=5, reduce_input=\"sum\")]\n\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n \"training\": {\"epochs\": epochs, \"batch_size\": batch_size},\n }\n model = LudwigModel(config, callbacks=[mock_callback])\n\n data_csv = generate_data(\n input_features, output_features, os.path.join(output_dir, csv_filename), num_examples=num_examples\n )\n val_csv = shutil.copyfile(data_csv, os.path.join(output_dir, \"validation.csv\"))\n test_csv = shutil.copyfile(data_csv, os.path.join(output_dir, \"test.csv\"))\n\n model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv)\n\n assert mock_callback.on_epoch_start.call_count == epochs\n assert mock_callback.on_epoch_end.call_count == epochs\n\n assert mock_callback.on_validation_start.call_count == epochs\n assert mock_callback.on_validation_end.call_count == epochs\n\n assert mock_callback.on_test_start.call_count == epochs\n assert mock_callback.on_test_end.call_count == epochs\n\n assert mock_callback.on_batch_start.call_count == epochs * (num_examples / batch_size)\n assert mock_callback.on_batch_end.call_count == epochs * (num_examples / batch_size)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 272, "n_words": 109, "vocab_size": 68, "complexity": 1, "nloc": 29, "token_counts": 255, "n_ast_nodes": 409, "n_identifiers": 44, "random_cut": "def test_api_callbacks(csv_filename):\n mock_callback = mock.Mock()\n\n epochs = 2\n batch_size = 8\n num_examples = 32\n\n with tempfile.TemporaryDirectory() as output_dir:\n input_features = [sequence_feature(reduce_output=\"sum\")]\n output_features = [category_feature(vocab_size=5, reduce_input=\"sum\")]\n\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n \"training\": {\"epochs\": epochs, \"batch_size\": batch_size},\n }\n model = LudwigModel(config, callbacks=[mock_callback])\n\n data_csv = generate_data(\n input_features, output_features, os.path.join(output_dir, csv_filename), num_examples=num_examples\n )\n val_csv = shutil.copyfile(data_csv, os.path.join(output_dir, \"validation.csv\"))\n test_csv = shutil.copyfile(data_csv, os.path.join(output_dir, \"test.csv\"))\n\n model." }, { "id": 262547, "commit_id": "bfc63829ac869f479bf9e8bf0fb75a2fb6d04959", "repo": "TTS", "path": "TTS/tts/datasets/formatters.py", "file_name": "formatters.py", "fun_name": "mls", "commit_message": "Implement bucketed weighted sampling for VITS (#1871)", "code": "def mls(root_path, meta_files=None, ignored_speakers=None):\n \n items = []\n with open(os.path.join(root_path, meta_files), \"r\", encoding=\"utf-8\") as meta:\n for line in meta:\n file, text = line.split(\"\\t\")\n text = text[:-1]\n speaker, book, *_ = file.split(\"_\")\n wav_file = os.path.join(root_path, os.path.dirname(meta_files), \"audio\", speaker, book, file + \".wav\")\n # ignore speakers\n if isinstance(ignored_speakers, list):\n if speaker in ignored_speakers:\n continue\n items.append(\n {\"text\": text, \"audio_file\": wav_file, \"speaker_name\": \"MLS_\" + speaker, \"root_path\": root_path}\n )\n return items\n\n\n# ======================================== VOX CELEB ===========================================", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 225, "n_words": 70, "vocab_size": 56, "complexity": 4, "nloc": 15, "token_counts": 146, "n_ast_nodes": 246, "n_identifiers": 23, "random_cut": "def mls(root_path, meta_files=None, ignored_speakers=None):\n \n items = []\n with open(os.path.join(root_path, meta_files), \"r\", encoding=\"utf-8\") as meta:\n for line in meta:\n file, text = line.split(\"\\t\")\n text = text[:-1]\n speaker, book, *_ = file.split(\"_\")\n wav_file = os.path.join(root_path, os.path.dirname(meta_files), \"audio\", speaker, book, file + \".wav\")\n # ignore speakers\n if isinstance(ignored_speakers, list):\n if speaker in ignored_speakers:\n continue\n items.append(\n {\"text\": text, \"audio_" }, { "id": 163734, "commit_id": "d2d7ffb56f0f12c412c36c0c867ab3bb240d04ca", "repo": "pandas", "path": "pandas/tests/io/xml/test_xml_dtypes.py", "file_name": "test_xml_dtypes.py", "fun_name": "test_dtype_float", "commit_message": "ENH: Add dtypes/converters arguments for pandas.read_xml (#45411)", "code": "def test_dtype_float(parser):\n df_result = read_xml(xml_types, dtype={\"degrees\": \"float\"}, parser=parser)\n\n df_expected = DataFrame(\n {\n \"shape\": [\"square\", \"circle\", \"triangle\"],\n \"degrees\": Series([360, 360, 180]).astype(\"float\"),\n \"sides\": [4.0, float(\"nan\"), 3.0],\n }\n )\n\n tm.assert_frame_equal(df_result, df_expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 86, "n_words": 28, "vocab_size": 27, "complexity": 1, "nloc": 10, "token_counts": 83, "n_ast_nodes": 133, "n_identifiers": 13, "random_cut": "def test_dtype_float(parser):\n df_resul" }, { "id": 131627, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_object_manager.py", "file_name": "test_object_manager.py", "fun_name": "test_actor_broadcast", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_actor_broadcast(ray_start_cluster_with_resource):\n cluster, num_nodes = ray_start_cluster_with_resource\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 8, "n_words": 6, "vocab_size": 6, "complexity": 5, "nloc": 19, "token_counts": 147, "n_ast_nodes": 17, "n_identifiers": 4, "random_cut": "def test_actor_broadcast(ray_start_cluster_with_resource):\n cluster, num_nodes = ray_start_cluster_with_resource\n" }, { "id": 269240, "commit_id": "3337f8716967b9b5c9c575e73c66cef0a17e891f", "repo": "keras", "path": "keras/utils/dataset_utils.py", "file_name": "dataset_utils.py", "fun_name": "_rescale_dataset_split_sizes", "commit_message": "adds mnist dataset test case", "code": "def _rescale_dataset_split_sizes(left_size,right_size,total_length):\n \n left_size_type = type(left_size)\n right_size_type = type(right_size)\n\n # check both left_size and right_size are integers or floats\n if ((left_size is not None and left_size_type not in [int,float]) and\n (right_size is not None and right_size_type not in [int,float])):\n raise TypeError('Invalid `left_size` and `right_size` Types. Expected: '\n 'integer or float or None, Received: type(left_size)='\n f'{left_size_type} and type(right_size)={right_size_type}')\n\n # check left_size is a integer or float\n if left_size is not None and left_size_type not in [int,float]:\n raise TypeError('Invalid `left_size` Type.Expected: int or float or None, '\n f'Received: type(left_size)={left_size_type}. ')\n\n # check right_size is a integer or float\n if right_size is not None and right_size_type not in [int,float]:\n raise TypeError(f'Invalid `right_size` Type.Expected: int or float or None,'\n f'Received: type(right_size)={right_size_type}. ')\n\n # check left_size and right_size are non-zero\n if left_size == 0 and right_size == 0:\n raise ValueError('Both `left_size` and `right_size` are zero. '\n 'Atleast one of the split sizes must be non-zero.')\n\n # check left_size is non-negative and less than 1 and less than total_length\n if (left_size_type == int and (left_size <= 0 or left_size>= total_length)\n or left_size_type == float and (left_size <= 0 or left_size>= 1) ):\n raise ValueError('`left_size` should be either a positive integer '\n f'and smaller than {total_length} or a float '\n 'within the range `[0, 1]`. Received: left_size='\n f'{left_size}')\n\n # check right_size is non-negative and less than 1 and less than total_length\n if (right_size_type == int and (right_size <= 0 or right_size>= total_length)\n or right_size_type == float and (right_size <= 0 or right_size>= 1)):\n raise ValueError('`right_size` should be either a positive integer '\n f'and smaller than {total_length} or a float '\n 'within the range `[0, 1]`. Received: right_size='\n f'{right_size}')\n\n # check sum of left_size and right_size is less than or equal to total_length\n if right_size_type == left_size_type == float and right_size + left_size > 1:\n raise ValueError('The sum of `left_size` and `right_size` is greater '\n 'than 1. It must be less than or equal to 1.')\n\n if left_size_type == float:\n left_size = round(left_size*total_length)\n elif left_size_type == int:\n left_size = float(left_size)\n\n if right_size_type == float:\n right_size = round(right_size*total_length)\n elif right_size_type == int:\n right_size = float(right_size)\n\n if left_size is None:\n left_size = total_length - right_size\n elif right_size is None:\n right_size = total_length - left_size\n\n if left_size + right_size > total_length:\n raise ValueError('The sum of `left_size` and `right_size` should '\n 'be smaller than the {total_length}. '\n f'Received: left_size + right_size = {left_size+right_size}'\n f'and total_length = {total_length}')\n\n for split,side in [(left_size,'left'),(right_size,'right')]:\n if split == 0:\n raise ValueError(f'With `dataset` of length={total_length}, `left_size`='\n '{left_size} and `right_size`={right_size}.'\n f'Resulting {side} side dataset split will be empty. '\n 'Adjust any of the aforementioned parameters')\n\n left_size,right_size = int(left_size) ,int(right_size)\n return left_size,right_size\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 882, "n_words": 432, "vocab_size": 146, "complexity": 34, "nloc": 57, "token_counts": 362, "n_ast_nodes": 651, "n_identifiers": 14, "random_cut": "def _rescale_dataset_split_sizes(left_size,right_size,total_length):\n \n left_size_type = type(left_size)\n right_size_type = type(right_size)\n\n # check both left_size and right_size are integers or floats\n if ((left_size is not None and left_size_type not in [int,float]) and\n (right_size is not None and right_size_type not in [int,float])):\n raise TypeError('Invalid `left_size` and `right_size` Types. Expected: '\n 'integer or float or None, Received: type(left_size)='\n f'{left_size_type} and type(right_size)={right_size_type}')\n\n # check left_size is a integer or float\n if left_size is not None and left_size_type not in [int,float]:\n raise TypeError('Invalid `left_size` Type.Expected: int or float or None, " }, { "id": 269616, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "flatten", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def flatten(x):\n \n return tf.reshape(x, [-1])\n\n\n@keras_export(\"keras.backend.batch_flatten\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.batch_flatten\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 11, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 60, "n_identifiers": 10, "random_cut": "def flatten(x):\n \n return tf.reshape(x, [-1])\n\n\n@keras_export(\"keras.backend.batch_flatten\")\n@tf.__internal__.dispatch.add_" }, { "id": 26959, "commit_id": "513fc80bc698c177b87774b3aff3da7b9aedbe06", "repo": "saleor", "path": "saleor/graphql/discount/schema.py", "file_name": "schema.py", "fun_name": "resolve_voucher", "commit_message": "Stricter signatures for resolvers and mutations (#9649)", "code": "def resolve_voucher(_root, _info, *, id, channel=None):\n _, id = from_global_id_or_error(id, Voucher)\n return resolve_voucher(id, channel)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 46, "n_identifiers": 8, "random_cut": "def resolve_voucher(_root, _info, *, id, channel=None):\n _, id = from_global_id_or_error(id, Voucher)\n " }, { "id": 72798, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/api/v2/tests/test_pages.py", "file_name": "test_pages.py", "fun_name": "test_descendant_of_filter", "commit_message": "Reformat with black", "code": "def test_descendant_of_filter(self):\n response = self.get_response(descendant_of=6)\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n page_id_list = self.get_page_id_list(content)\n self.assertEqual(page_id_list, [10, 15, 17, 21, 22, 23])\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 58, "n_ast_nodes": 89, "n_identifiers": 12, "random_cut": "def test_descendant_of_filter(self):\n response = self.get_response(descendant_of=6)\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n page_id_list = self.get_page_id_list(content)\n self.assertEqual(page_id_list, [10, 15, 17, 21, 22, " }, { "id": 122411, "commit_id": "2416d154355f19e77b5c1ddf1de1f8552e4a98ad", "repo": "jax", "path": "jax/_src/third_party/numpy/linalg.py", "file_name": "linalg.py", "fun_name": "_multi_dot_matrix_chain_order", "commit_message": "Call _check_arraylike for jnp.linalg & jnp.fft functions", "code": "def _multi_dot_matrix_chain_order(arrays, return_costs=False):\n \n n = len(arrays)\n # p stores the dimensions of the matrices\n # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]\n p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]\n # m is a matrix of costs of the subproblems\n # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}\n m = np.zeros((n, n), dtype=np.double)\n # s is the actual ordering\n # s[i, j] is the value of k at which we split the product A_i..A_j\n s = np.empty((n, n), dtype=np.intp)\n\n for l in range(1, n):\n for i in range(n - l):\n j = i + l\n m[i, j] = jnp.inf\n for k in range(i, j):\n q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]\n if q < m[i, j]:\n m[i, j] = q\n s[i, j] = k # Note that Cormen uses 1-based index\n\n return (s, m) if return_costs else s\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 214, "n_words": 150, "vocab_size": 92, "complexity": 7, "nloc": 15, "token_counts": 196, "n_ast_nodes": 290, "n_identifiers": 24, "random_cut": "def _multi_dot_matrix_chain_order(arrays, return_costs=False):\n \n n = len(arrays)\n # p stores the dimensions of the matrices\n " }, { "id": 175833, "commit_id": "54610bb448a9cf5be77d53b66169fca4c11be6cb", "repo": "cpython", "path": "Lib/test/test_posix.py", "file_name": "test_posix.py", "fun_name": "test_chmod_dir_fd", "commit_message": "bpo-46426: Improve tests for the dir_fd argument (GH-30668)\n\nEnsure that directory file descriptors refer to directories different\r\nfrom the current directory, and that src_dir_fd and dst_dir_fd refer\r\nto different directories.\r\n\r\nAdd context manager open_dir_fd() in test.support.os_helper.", "code": "def test_chmod_dir_fd(self):\n with self.prepare_file() as (dir_fd, name, fullname):\n posix.chmod(fullname, stat.S_IRUSR)\n posix.chmod(name, stat.S_IRUSR | stat.S_IWUSR, dir_fd=dir_fd)\n s = posix.stat(fullname)\n self.assertEqual(s.st_mode & stat.S_IRWXU,\n stat.S_IRUSR | stat.S_IWUSR)\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 102, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 7, "token_counts": 76, "n_ast_nodes": 118, "n_identifiers": 15, "random_cut": "def test_chmod_dir_fd(self):\n with self.prepare_file() as (dir_fd, name, fullname):\n posix.chmod(fullname, stat.S_I" }, { "id": 77926, "commit_id": "2664a4c1fc7df471225d3e71355802401217889a", "repo": "wagtail", "path": "wagtail/admin/views/pages/revisions.py", "file_name": "revisions.py", "fun_name": "dispatch", "commit_message": "Extract generic RevisionsCompareView from page revisions_compare view", "code": "def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 15, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 28, "n_ast_nodes": 42, "n_identifiers": 6, "random_cut": "def dispatch(self, request, *args, **kwargs):\n return super().dispatch(request, *args, **kwargs)\n" }, { "id": 308607, "commit_id": "5c8271552a3023808e272125f71ba79f3a1e97d8", "repo": "core", "path": "tests/components/homekit/test_util.py", "file_name": "test_util.py", "fun_name": "test_format_version", "commit_message": "Add hardware revision support to homekit (#63336)", "code": "async def test_format_version():\n \n assert format_version(\"soho+3.6.8+soho-release-rt120+10\") == \"3.6.8\"\n assert format_version(\"undefined-undefined-1.6.8\") == \"1.6.8\"\n assert format_version(\"56.0-76060\") == \"56.0.76060\"\n assert format_version(3.6) == \"3.6\"\n assert format_version(\"AK001-ZJ100\") == \"001.100\"\n assert format_version(\"HF-LPB100-\") == \"100\"\n assert format_version(\"AK001-ZJ2149\") == \"001.2149\"\n assert format_version(\"0.1\") == \"0.1\"\n assert format_version(\"unknown\") is None\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 69, "n_words": 39, "vocab_size": 24, "complexity": 1, "nloc": 10, "token_counts": 70, "n_ast_nodes": 144, "n_identifiers": 2, "random_cut": "async def test_format_version():\n \n assert format_version(\"soho+3.6.8+soho-release-rt120+10\") == \"3.6.8\"\n assert format_version(\"undefined-undefined-1.6.8\") == \"1.6.8\"\n assert format_version(\"56.0-76060\") == \"56.0.76060\"\n assert format_version(3.6) == \"3.6\"\n assert format_version(\"AK001-ZJ100\") == \"001.100\"\n assert format_version(\"HF-LPB100-\") == \"100\"\n assert" }, { "id": 101659, "commit_id": "892d8626ed4e7f834ac5607af59f14f5476d5997", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_set_skip_list", "commit_message": "Bugfix: Alignments tool - don't error on from-faces job", "code": "def _set_skip_list(self) -> Optional[List[int]]:\n \n skip_num = self._arguments.extract_every_n\n if skip_num == 1:\n logger.debug(\"Not skipping any frames\")\n return None\n skip_list = []\n for idx, item in enumerate(self._frames.file_list_sorted):\n if idx % skip_num != 0:\n logger.trace(\"Adding image '%s' to skip list due to \" # type:ignore\n \"extract_every_n = %s\", item[\"frame_fullname\"], skip_num)\n skip_list.append(idx)\n logger.debug(\"Adding skip list: %s\", skip_list)\n return skip_list\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 196, "n_words": 55, "vocab_size": 45, "complexity": 4, "nloc": 22, "token_counts": 89, "n_ast_nodes": 151, "n_identifiers": 18, "random_cut": "def _set_skip_list(self) -> Optional[List[int]]:\n \n skip_num = self._arguments.extract_every_n\n if skip_num == 1:\n logger.debug(\"Not skipping any frames\")\n return None\n skip_list = []\n for idx, item in enumerate(s" }, { "id": 29400, "commit_id": "67df28935c555fdd673f17e8c9183e24dde7c51f", "repo": "saleor", "path": "saleor/graphql/order/mutations/draft_order_create.py", "file_name": "draft_order_create.py", "fun_name": "_save_lines", "commit_message": "Simple (flat rate) taxes API (#9784)\n\n* Add empty tax module\r\n\r\n* Add tax models (#9839)\r\n\r\n* Add tax API queries (#9856)\r\n\r\n* Add MANAGE_TAXES permission\r\n\r\n* Add tax configuration queries\r\n\r\n* Create tax configuration when channel is created\r\n\r\n* Drop sorters for now\r\n\r\n* Add TaxConfigurationPerCountry type\r\n\r\n* Update migration\r\n\r\n* Add metadata to TaxConfiguration type\r\n\r\n* Add tests for tax configuration queries\r\n\r\n* Add TaxClass types\r\n\r\n* Improve tests\r\n\r\n* Add queries for tax configuration per country\r\n\r\n* Fix query in tests\r\n\r\n* Update query cost map\r\n\r\n* Add tax API mutations (#9934)\r\n\r\n* Add taxConfigurationUpdate mutation\r\n\r\n* Update schema\r\n\r\n* Add tax class CRUD mutations\r\n\r\n* Add mutations to update/delete tax class rates per country\r\n\r\n* Review fixes\r\n\r\n* Add taxClass field to ProductType type (#9999)\r\n\r\n* Add taxClass field to ProductType type\r\n\r\n* Add taxClass field to Product type\r\n\r\n* Add taxClass field to shipping method type\r\n\r\n* Add displayGrossPrices to ProductPricingInfo (#10008)\r\n\r\n* Add displayGrossPrices to ProductPricingInfo\r\n\r\n* Add displayGrossPrices to Checkout\r\n\r\n* Add displayGrossPrices to Order\r\n\r\n* Add tests\r\n\r\n* Add ADDED_IN_35 label to new fields' descriptions\r\n\r\n* Use new display_gross_prices flag (#10121)\r\n\r\n* Use new display_gross_prices flag\r\n\r\n* Update tests\r\n\r\n* Add tests\r\n\r\n* Review fixes\r\n\r\n* Drop Vatlayer (#10335)\r\n\r\n* Add migration from Vatlayer to simple taxes\r\n\r\n* Review fixes\r\n\r\n* Review fixes\r\n\r\n* Drop usages of global include_taxes_in_prices flag (#10406)\r\n\r\n* Drop `include_taxes_in_prices` function from site settings\r\n\r\n* Adjust tests\r\n\r\n* Review fixes\r\n\r\n* Drop the `charge_taxes_on_shipping` flag from site settings. (#10466)\r\n\r\n* Include migrating Avatax tax codes in tax class migration\r\n\r\n* Drop `charge_taxes_on_shipping` function\r\n\r\n* Add tax_class to ShippingMethodData\r\n\r\n* Review fixes\r\n\r\n* Always calculate shipping tax with Avalara\r\n\r\n* Add default country rate (#10497)\r\n\r\n* Allow setting default tax rate for a country (without providing a tax class)\r\n\r\n* Add validation to allow settings only one default rate at once\r\n\r\n* Code review fixes\r\n\r\n* Add taxCalculationStrategy field\r\n\r\n* Add tests\r\n\r\n* CR fixes\r\n\r\n* Adjust resolver to use new tax configuration (#10533)\r\n\r\n* CR fixes\r\n\r\n* Add database router to fix false positives on relation mismatch. (#10524)\r\n\r\n* Add database router to fix false positives on relation mismatch.\r\n\r\n* The db router should have only 'allow_relation' implemented.\r\n\r\n* The 'db_for_write' part should stay.\r\n\r\n* Subscription for sync tax webooks (#10433)\r\n\r\n* Add proposed changes to schema\r\n\r\n* Add base implementation for sync tax subscription\r\n\r\n* Add test for empty order\r\n\r\n* Add clean up and missing part for tests\r\n\r\n* Use subscription for tax webhooks. Add more tests\r\n\r\n* Improve descriptions for tax objects\r\n\r\n* Adjust resolver to use new tax configuration (#10533)\r\n\r\n* Add taxCalculationStrategy field (#10532)\r\n\r\n* Add taxCalculationStrategy field\r\n\r\n* Add tests\r\n\r\n* CR fixes\r\n\r\n* CR fixes\r\n\r\n* Add datamigration to populate taxCalculationStrategy\r\n\r\n* Migrate Product.charge_taxes to new tax configuration (#10585)\r\n\r\n* Migrate Product.charge_taxes field to new tax configuration\r\n\r\n* Rename function\r\n\r\n* Fix tests\r\n\r\n* Change assign_tax_code_to_object_meta function to support tax classes\r\n\r\n* Update tax class fixtures\r\n\r\n* Improve dataloader\r\n\r\n* CR fixes\r\n\r\n* CR fixes\r\n\r\n* Add deprecation notice to dataloader\r\n\r\n* Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647)\r\n\r\n* Allow deleting rates in taxCountryConfigurationUpdate mutation\r\n\r\n* Change tax rates ordering to keep default rates first (with null tax classes)\r\n\r\n* Update existing migration\r\n\r\n* Remove TaxClass.is_default field (#10660)\r\n\r\n* Change tax rates ordering to keep default rates first (with null tax classes)\r\n\r\n* Update existing migration\r\n\r\n* Drop is_default field from TaxClass model\r\n\r\n* Drop extra Avalara config (#10673)\r\n\r\n* Drop extra Avatax config options\r\n\r\n* Adjust tests\r\n\r\n* Use flat rates in tax calculations (#10747)\r\n\r\n* WIP Use new tax configuration in tax calculations\r\n\r\n* Use new tax calculations for checkout\r\n\r\n* Adjust tests\r\n\r\n* Add flat rates calculations for checkout and order\r\n\r\n* Calculate flat rates in product pricing objects\r\n\r\n* Adjust tests\r\n\r\n* Add tests for order calculations\r\n\r\n* Add tests for product queries tax calculations\r\n\r\n* Add tests for order calculations\r\n\r\n* Use base calculations to get default checkout shipping price\r\n\r\n* Add tests for using tax_class from product_type\r\n\r\n* Add tests for get_order_country\r\n\r\n* Adjust tests\r\n\r\n* Code review fixes\r\n\r\n* Drop update_taxes_for_order_lines (#11000)\r\n\r\n* Fix calls to Avalara not validating order (#11012)\r\n\r\n* Add validation to disallow creating negative rates (#11010)\r\n\r\n* Add missing recalculation of order.undiscounted_total (#11039)\r\n\r\n* Optimize getting tax class country rates (#11040)\r\n\r\n* Tax API adjustments for dashboard (#11042)\r\n\r\n* Ignore null rates in taxCountryConfigurationUpdate mutation\r\n\r\n* Allow to pass null rates in taxClassUpdate mutation\r\n\r\n* Improve tests\r\n\r\n* Update saleor/graphql/tax/mutations/tax_class_update.py\r\n\r\nCo-authored-by: Krzysztof Waliczek \r\n\r\n* Update schema\r\n\r\nCo-authored-by: Krzysztof Waliczek \r\n\r\n* Cleanup before release (#11049)\r\n\r\n* Update ADDED_IN labels\r\n\r\n* Fix skippeded test\r\n\r\n* Regenerate migrations\r\n\r\n* Deprecate CountryDisplay.vat field\r\n\r\n* Add changelog\r\n\r\n* Update order.undiscounted_total calculation to not include taxes (#11068)\r\n\r\n* Fix assigning rates to tax classes (#11105)\r\n\r\n* Allow all staff users and apps to query tax-related data (#11113)\r\n\r\n* Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127)\r\n\r\nBumps:\r\n- cryptography to 38.0.3\r\n- pillow to 9.3.0\r\n\r\n* Fix using tax code from product and product type's tax class (#11111)\r\n\r\n* Fix using tax code from product and product type's tax class\r\n\r\n* Extract function\r\n\r\n* Replace synchronous load_site with promise (#11165)\r\n\r\n* Denormalize tax class for order lines and orders (#11172)\r\n\r\n* WIP Denormalize tax class for order lines and orders\r\n\r\n* Add denormalized fields in GraphQL types\r\n\r\n* Add tests for denormalized API fields\r\n\r\n* Return 0 rate in API when rate is null\r\n\r\n* Add preview/version notes in new field descriptions\r\n\r\n* Update changelog\r\n\r\nCo-authored-by: Dominik Kozaczko \r\nCo-authored-by: Maciej Korycinski \r\nCo-authored-by: Krzysztof Waliczek \r\nCo-authored-by: Mika <6186720+NyanKiyoshi@users.noreply.github.com>\r\nCo-authored-by: Krzysztof Kwaśniak ", "code": "def _save_lines(info, instance, lines_data, app, manager):\n if lines_data:\n lines = []\n for line_data in lines_data:\n new_line = create_order_line(\n instance,\n line_data,\n manager,\n )\n lines.append(new_line)\n\n # New event\n events.order_added_products_event(\n order=instance,\n user=info.context.user,\n app=app,\n order_lines=lines,\n )\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 255, "n_words": 32, "vocab_size": 28, "complexity": 3, "nloc": 16, "token_counts": 67, "n_ast_nodes": 97, "n_identifiers": 17, "random_cut": "def _save_lines(info, instance, lines_data, app, manager):\n if lines_data:\n lines = []\n for line_data in lines_data:\n new_line = create_order_line(\n instance,\n line_data,\n manager,\n )\n lines.append(new_line)\n\n # New event\n events.order_added_products_event(\n order=instance,\n user=info.context.user,\n app=app,\n order_lines=lines,\n " }, { "id": 74800, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_reupload_different_file_size_and_file_hash", "commit_message": "Reformat with black", "code": "def test_reupload_different_file_size_and_file_hash(self):\n \n # Build a fake file, and create it through the admin view\n # since self.document doesn't have a file_size set.\n fake_file = SimpleUploadedFile(\"some_file.txt\", b\"this is the content\")\n post_data = {\n \"title\": \"My doc\",\n \"file\": fake_file,\n }\n self.client.post(reverse(\"wagtaildocs:add\"), post_data)\n\n document = models.Document.objects.get(title=\"My doc\")\n old_file_size, old_file_hash = document.file_size, document.file_hash\n\n new_file = SimpleUploadedFile(document.filename, b\"less content\")\n\n self.client.post(\n reverse(\"wagtaildocs:edit\", args=(document.pk,)),\n {\n \"title\": document.title,\n \"file\": new_file,\n },\n )\n\n document.refresh_from_db()\n\n self.assertNotEqual(document.file_size, old_file_size)\n self.assertNotEqual(document.file_hash, old_file_hash)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 259, "n_words": 69, "vocab_size": 58, "complexity": 1, "nloc": 20, "token_counts": 135, "n_ast_nodes": 227, "n_identifiers": 24, "random_cut": "def test_reupload_different_file_size_and_file_hash(self):\n \n # Build a fake file, and create it through the admin view\n # since self.document doesn't have a file_size set.\n fake_file = SimpleUploaded" }, { "id": 142705, "commit_id": "43aa2299e6623c8f8c7c4a1b80133459d0aa68b0", "repo": "ray", "path": "python/ray/util/rpdb.py", "file_name": "rpdb.py", "fun_name": "do_remote", "commit_message": "[api] Annotate as public / move ray-core APIs to _private and add enforcement rule (#25695)\n\nEnable checking of the ray core module, excluding serve, workflows, and tune, in ./ci/lint/check_api_annotations.py. This required moving many files to ray._private and associated fixes.", "code": "def do_remote(self, arg):\n \n # Tell the next task to drop into the debugger.\n ray._private.worker.global_worker.debugger_breakpoint = self._breakpoint_uuid\n # Tell the debug loop to connect to the next task.\n data = json.dumps(\n {\n \"job_id\": ray.get_runtime_context().job_id.hex(),\n }\n )\n _internal_kv_put(\n \"RAY_PDB_CONTINUE_{}\".format(self._breakpoint_uuid),\n data,\n namespace=ray_constants.KV_NAMESPACE_PDB,\n )\n self.__restore()\n self.handle.connection.close()\n return Pdb.do_continue(self, arg)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 192, "n_words": 45, "vocab_size": 35, "complexity": 1, "nloc": 15, "token_counts": 87, "n_ast_nodes": 144, "n_identifiers": 26, "random_cut": "def do_remote(self, arg):\n \n # Tell the next task to drop into the debugger.\n ray._private.worker.global_worker.debugger_breakpoint = self._breakpoint_uuid\n # Tell the debug loop to connect to the next task.\n data = json.dumps(\n {\n \"job_id\": ray.get_runtime_context().job_id.hex(),\n }\n )\n _internal_kv_put(\n \"RAY_PDB_CONTINUE_{}\".format(self._breakpoint_uuid),\n data,\n namespace=ray_constants.KV_NAMESPACE_PDB,\n )\n self.__restore()\n self.handle.connection.close()\n return Pdb.do_continue(self, arg)\n" }, { "id": 225158, "commit_id": "73e8fef5068d47ab7bdc4c49bc4abcc74434b57e", "repo": "mkdocs", "path": "mkdocs/tests/config/base_tests.py", "file_name": "base_tests.py", "fun_name": "test_missing_required", "commit_message": "Rework ConfigOption schemas as class-based\n\nThis is NOT a breaking change, the old style keeps working.\n\nNow developers can make a subclass of Config, declare the schema of the config as fields of the class, and instances of this class will hold the processed config.\n\nThis better represents the relationship between what a config definition and a config instance is, now you think of configs definitions as classes and parsed configs as instances.\n\nWe also can write these fields as descriptors and enable safe attribute-based access. Static analysis will be able to see when a missing fields is accessed. And in followup changes I plan to add type annotations which will make even type checking fully sound.", "code": "def test_missing_required(self):\n conf = defaults.MkDocsConfig()\n\n errors, warnings = conf.validate()\n\n self.assertEqual(\n errors, [('site_name', ValidationError('Required configuration not provided.'))]\n )\n self.assertEqual(warnings, [])\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 64, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 47, "n_ast_nodes": 79, "n_identifiers": 10, "random_cut": "def test_missing_required(self):\n conf = defa" }, { "id": 284453, "commit_id": "9068ad01249c1e1adaca3ef9a704d70da7e3a17b", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/cryptocurrency/test_cryptocurrency_helpers.py", "file_name": "test_cryptocurrency_helpers.py", "fun_name": "test_read_data_file", "commit_message": "Refactored Crypto Tests (#1743)\n\n* Refactored tests\r\n\r\n* Removed unused command\r\n\r\n* Added tests\r\n\r\n* Tests : remove cassettes files + add fixture\r\n\r\n* Black\r\n\r\n* Tests : skip tests\r\n\r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: Chavithra PARANA ", "code": "def test_read_data_file(recorder):\n file = read_data_file(\"coinbase_gecko_map.json\")\n\n recorder.capture(file)\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def test_read_data_file(recorder):\n file = read_data_file(\"coinbase_gecko_map.json\")\n\n recorder.captur" }, { "id": 318057, "commit_id": "bbd7041a73572547be49ead53b183aa1e55a6d75", "repo": "core", "path": "tests/components/anthemav/conftest.py", "file_name": "conftest.py", "fun_name": "mock_anthemav", "commit_message": "Refactor and improve anthemav (#75852)", "code": "def mock_anthemav() -> AsyncMock:\n \n avr = AsyncMock()\n avr.protocol.macaddress = \"000000000001\"\n avr.protocol.model = \"MRX 520\"\n avr.reconnect = AsyncMock()\n avr.close = MagicMock()\n avr.protocol.input_list = []\n avr.protocol.audio_listening_mode_list = []\n avr.protocol.power = False\n return avr\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 61, "n_words": 32, "vocab_size": 22, "complexity": 1, "nloc": 11, "token_counts": 65, "n_ast_nodes": 121, "n_identifiers": 14, "random_cut": "def mock_anthemav() -> AsyncMock:\n \n avr = AsyncMock()\n avr.protocol.macaddress = \"000000000001\"\n avr.protocol.model = \"MRX 520\"\n avr.reconnect = AsyncMock()\n avr.close = MagicMock()\n avr.protocol.input_list = []\n avr.protocol.audio_listening_mode_list = []\n avr.protocol.power = False\n return avr\n\n\n@pytest.fixture" }, { "id": 156163, "commit_id": "4e5dfe7463028a39a90e026c7fb9220969093ab3", "repo": "dask", "path": "dask/bag/tests/test_random.py", "file_name": "test_random.py", "fun_name": "test_reservoir_sample_with_replacement_map_partitions_correctness", "commit_message": "Bag: add implementation for reservoir sampling (#7068) (#7636)\n\n - Implement the [L algorithm](https://en.wikipedia.org/wiki/Reservoir_sampling#An_optimal_algorithm) for reservoir sampling without replacement. \r\n - Use the **k** reservoir of size 1 strategy for sampling with replacement (see [reference](http://utopia.duth.gr/~pefraimi/research/data/2007EncOfAlg.pdf)) of **k** items", "code": "def test_reservoir_sample_with_replacement_map_partitions_correctness():\n N, k = 20, 10\n seq = list(range(N))\n distribution = [0 for _ in range(N)]\n expected_distribution = [0 for _ in range(N)]\n reps = 2000\n for _ in range(reps):\n picks, _ = random._sample_with_replacement_map_partitions(seq, k)\n for pick in picks:\n distribution[pick] += 1\n for pick in rnd.choices(seq, k=k):\n expected_distribution[pick] += 1\n\n # convert to probabilities\n distribution = [c / (reps * k) for c in distribution]\n expected_distribution = [c / (reps * k) for c in expected_distribution]\n\n # use bhattacharyya distance to asses the similarity of distributions\n assert math.isclose(\n 0.0, bhattacharyya(distribution, expected_distribution), abs_tol=1e-2\n )\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 179, "n_words": 94, "vocab_size": 56, "complexity": 8, "nloc": 17, "token_counts": 150, "n_ast_nodes": 221, "n_identifiers": 21, "random_cut": "def test_reservoir_sample_with_replacement_map_partitions_correctness():\n N, k = 20, 10\n seq = list(range(N))\n distribution = [0 for _ in range(N)]\n expected_distribution = [0 for _ in range(N)]\n reps = 2000\n for _ in range(reps):\n picks, _ = random._sample_with_replacement_map_partitions(seq, k)\n for pick in picks:\n distribution[pick] += 1\n for pick in rnd.choices(seq, k=k):\n expected_distribution[" }, { "id": 169035, "commit_id": "54347fe684e0f7844bf407b1fb958a5269646825", "repo": "pandas", "path": "pandas/io/formats/latex.py", "file_name": "latex.py", "fun_name": "_empty_info_line", "commit_message": "TYP: Autotyping (#48191)\n\n* annotate-magics\r\n\r\n* annotate-imprecise-magics\r\n\r\n* none-return\r\n\r\n* scalar-return\r\n\r\n* pyi files\r\n\r\n* ignore vendored file\r\n\r\n* manual changes\r\n\r\n* ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments)\r\n\r\n* run autotyping in pre-commit\r\n\r\n* remove final and expand safe (and add annotate-imprecise-magics)", "code": "def _empty_info_line(self) -> str:\n return (\n f\"Empty {type(self.frame).__name__}\\n\"\n f\"Columns: {self.frame.columns}\\n\"\n f\"Index: {self.frame.index}\"\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 59, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 16, "n_ast_nodes": 64, "n_identifiers": 8, "random_cut": "def _empty_info_line(self) -> str:\n return (\n f\"Empty {type(self.frame).__name__}\\n\"\n f\"Columns: {self.frame.columns}\\n\"\n f\"Index: {self.frame.index" }, { "id": 197626, "commit_id": "40a89803dbe877edc8ab6672819715f959273e60", "repo": "sympy", "path": "sympy/physics/units/tests/test_quantities.py", "file_name": "test_quantities.py", "fun_name": "test_prefixed_property", "commit_message": "feat(physics.units): add `is_prefixed` property to `Quantity`", "code": "def test_prefixed_property():\n assert not meter.is_prefixed\n assert not joule.is_prefixed\n assert not day.is_prefixed\n assert not second.is_prefixed\n assert centimeter.is_prefixed\n assert kilometer.is_prefixed\n assert kilogram.is_prefixed\n assert pebibyte.is_prefixed\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 45, "n_words": 22, "vocab_size": 12, "complexity": 1, "nloc": 9, "token_counts": 40, "n_ast_nodes": 64, "n_identifiers": 10, "random_cut": "def test_prefixed_property():\n assert not meter.is_prefixed\n assert not joule.is_prefixed\n assert not day.is_prefixed\n assert not second.is_prefixed\n assert centimeter.is_prefixed\n assert kilometer.is_prefixed\n" }, { "id": 222147, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ctypes/test/test_python_api.py", "file_name": "test_python_api.py", "fun_name": "test_PyObj_FromPtr", "commit_message": "add python 3.10.4 for windows", "code": "def test_PyObj_FromPtr(self):\n s = \"abc def ghi jkl\"\n ref = grc(s)\n # id(python-object) is the address\n pyobj = PyObj_FromPtr(id(s))\n self.assertIs(s, pyobj)\n\n self.assertEqual(grc(s), ref + 1)\n del pyobj\n self.assertEqual(grc(s), ref)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 84, "n_words": 29, "vocab_size": 23, "complexity": 1, "nloc": 8, "token_counts": 57, "n_ast_nodes": 94, "n_identifiers": 10, "random_cut": "def test_PyObj_FromPtr(self):\n s = \"abc def ghi jkl\"\n ref = grc(s)\n # id(python-object) is the address\n pyobj = PyObj_FromPtr(id(s))\n self.assertIs(s, pyobj)\n\n self.assertEqual(grc(s), ref + 1)\n del pyobj\n self.assertEqual(grc(s), ref)\n" }, { "id": 278142, "commit_id": "4f1d333ded256b0315cf02eee067d6fa902b748d", "repo": "keras", "path": "keras/integration_test/preprocessing_applied_in_model_test.py", "file_name": "preprocessing_applied_in_model_test.py", "fun_name": "testDistributedModelFit", "commit_message": "resolve line-too-long in integration_test", "code": "def testDistributedModelFit(self, strategy):\n if not tf.__internal__.tf2.enabled() and isinstance(\n strategy, tf.distribute.experimental.ParameterServerStrategy\n ):\n self.skipTest(\n \"Parameter Server strategy with dataset creator need to be run \"\n \"when eager execution is enabled.\"\n )\n with strategy.scope():\n preprocessing_model = utils.make_preprocessing_model(\n self.get_temp_dir()\n )\n training_model = utils.make_training_model()\n # Merge the two separate models into a single model for training.\n inputs = preprocessing_model.inputs\n outputs = training_model(preprocessing_model(inputs))\n merged_model = tf.keras.Model(inputs, outputs)\n merged_model.compile(optimizer=\"sgd\", loss=\"binary_crossentropy\")\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 249, "n_words": 63, "vocab_size": 57, "complexity": 3, "nloc": 20, "token_counts": 135, "n_ast_nodes": 176, "n_identifiers": 27, "random_cut": "def testDistributedModelFit(self, strategy):\n if not tf.__internal__.tf2.enabled() and isinstance(\n strategy, tf.distribute.experimental.ParameterServerStrategy\n ):\n self.skipTest(\n \"Parameter Server strategy with dataset creator need to be run \"\n \"when eager execution is enabled.\"\n )\n" }, { "id": 29518, "commit_id": "67df28935c555fdd673f17e8c9183e24dde7c51f", "repo": "saleor", "path": "saleor/order/tests/test_order_utils.py", "file_name": "test_order_utils.py", "fun_name": "test_update_order_display_gross_prices_use_country_specific_tax_settings", "commit_message": "Simple (flat rate) taxes API (#9784)\n\n* Add empty tax module\r\n\r\n* Add tax models (#9839)\r\n\r\n* Add tax API queries (#9856)\r\n\r\n* Add MANAGE_TAXES permission\r\n\r\n* Add tax configuration queries\r\n\r\n* Create tax configuration when channel is created\r\n\r\n* Drop sorters for now\r\n\r\n* Add TaxConfigurationPerCountry type\r\n\r\n* Update migration\r\n\r\n* Add metadata to TaxConfiguration type\r\n\r\n* Add tests for tax configuration queries\r\n\r\n* Add TaxClass types\r\n\r\n* Improve tests\r\n\r\n* Add queries for tax configuration per country\r\n\r\n* Fix query in tests\r\n\r\n* Update query cost map\r\n\r\n* Add tax API mutations (#9934)\r\n\r\n* Add taxConfigurationUpdate mutation\r\n\r\n* Update schema\r\n\r\n* Add tax class CRUD mutations\r\n\r\n* Add mutations to update/delete tax class rates per country\r\n\r\n* Review fixes\r\n\r\n* Add taxClass field to ProductType type (#9999)\r\n\r\n* Add taxClass field to ProductType type\r\n\r\n* Add taxClass field to Product type\r\n\r\n* Add taxClass field to shipping method type\r\n\r\n* Add displayGrossPrices to ProductPricingInfo (#10008)\r\n\r\n* Add displayGrossPrices to ProductPricingInfo\r\n\r\n* Add displayGrossPrices to Checkout\r\n\r\n* Add displayGrossPrices to Order\r\n\r\n* Add tests\r\n\r\n* Add ADDED_IN_35 label to new fields' descriptions\r\n\r\n* Use new display_gross_prices flag (#10121)\r\n\r\n* Use new display_gross_prices flag\r\n\r\n* Update tests\r\n\r\n* Add tests\r\n\r\n* Review fixes\r\n\r\n* Drop Vatlayer (#10335)\r\n\r\n* Add migration from Vatlayer to simple taxes\r\n\r\n* Review fixes\r\n\r\n* Review fixes\r\n\r\n* Drop usages of global include_taxes_in_prices flag (#10406)\r\n\r\n* Drop `include_taxes_in_prices` function from site settings\r\n\r\n* Adjust tests\r\n\r\n* Review fixes\r\n\r\n* Drop the `charge_taxes_on_shipping` flag from site settings. (#10466)\r\n\r\n* Include migrating Avatax tax codes in tax class migration\r\n\r\n* Drop `charge_taxes_on_shipping` function\r\n\r\n* Add tax_class to ShippingMethodData\r\n\r\n* Review fixes\r\n\r\n* Always calculate shipping tax with Avalara\r\n\r\n* Add default country rate (#10497)\r\n\r\n* Allow setting default tax rate for a country (without providing a tax class)\r\n\r\n* Add validation to allow settings only one default rate at once\r\n\r\n* Code review fixes\r\n\r\n* Add taxCalculationStrategy field\r\n\r\n* Add tests\r\n\r\n* CR fixes\r\n\r\n* Adjust resolver to use new tax configuration (#10533)\r\n\r\n* CR fixes\r\n\r\n* Add database router to fix false positives on relation mismatch. (#10524)\r\n\r\n* Add database router to fix false positives on relation mismatch.\r\n\r\n* The db router should have only 'allow_relation' implemented.\r\n\r\n* The 'db_for_write' part should stay.\r\n\r\n* Subscription for sync tax webooks (#10433)\r\n\r\n* Add proposed changes to schema\r\n\r\n* Add base implementation for sync tax subscription\r\n\r\n* Add test for empty order\r\n\r\n* Add clean up and missing part for tests\r\n\r\n* Use subscription for tax webhooks. Add more tests\r\n\r\n* Improve descriptions for tax objects\r\n\r\n* Adjust resolver to use new tax configuration (#10533)\r\n\r\n* Add taxCalculationStrategy field (#10532)\r\n\r\n* Add taxCalculationStrategy field\r\n\r\n* Add tests\r\n\r\n* CR fixes\r\n\r\n* CR fixes\r\n\r\n* Add datamigration to populate taxCalculationStrategy\r\n\r\n* Migrate Product.charge_taxes to new tax configuration (#10585)\r\n\r\n* Migrate Product.charge_taxes field to new tax configuration\r\n\r\n* Rename function\r\n\r\n* Fix tests\r\n\r\n* Change assign_tax_code_to_object_meta function to support tax classes\r\n\r\n* Update tax class fixtures\r\n\r\n* Improve dataloader\r\n\r\n* CR fixes\r\n\r\n* CR fixes\r\n\r\n* Add deprecation notice to dataloader\r\n\r\n* Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647)\r\n\r\n* Allow deleting rates in taxCountryConfigurationUpdate mutation\r\n\r\n* Change tax rates ordering to keep default rates first (with null tax classes)\r\n\r\n* Update existing migration\r\n\r\n* Remove TaxClass.is_default field (#10660)\r\n\r\n* Change tax rates ordering to keep default rates first (with null tax classes)\r\n\r\n* Update existing migration\r\n\r\n* Drop is_default field from TaxClass model\r\n\r\n* Drop extra Avalara config (#10673)\r\n\r\n* Drop extra Avatax config options\r\n\r\n* Adjust tests\r\n\r\n* Use flat rates in tax calculations (#10747)\r\n\r\n* WIP Use new tax configuration in tax calculations\r\n\r\n* Use new tax calculations for checkout\r\n\r\n* Adjust tests\r\n\r\n* Add flat rates calculations for checkout and order\r\n\r\n* Calculate flat rates in product pricing objects\r\n\r\n* Adjust tests\r\n\r\n* Add tests for order calculations\r\n\r\n* Add tests for product queries tax calculations\r\n\r\n* Add tests for order calculations\r\n\r\n* Use base calculations to get default checkout shipping price\r\n\r\n* Add tests for using tax_class from product_type\r\n\r\n* Add tests for get_order_country\r\n\r\n* Adjust tests\r\n\r\n* Code review fixes\r\n\r\n* Drop update_taxes_for_order_lines (#11000)\r\n\r\n* Fix calls to Avalara not validating order (#11012)\r\n\r\n* Add validation to disallow creating negative rates (#11010)\r\n\r\n* Add missing recalculation of order.undiscounted_total (#11039)\r\n\r\n* Optimize getting tax class country rates (#11040)\r\n\r\n* Tax API adjustments for dashboard (#11042)\r\n\r\n* Ignore null rates in taxCountryConfigurationUpdate mutation\r\n\r\n* Allow to pass null rates in taxClassUpdate mutation\r\n\r\n* Improve tests\r\n\r\n* Update saleor/graphql/tax/mutations/tax_class_update.py\r\n\r\nCo-authored-by: Krzysztof Waliczek \r\n\r\n* Update schema\r\n\r\nCo-authored-by: Krzysztof Waliczek \r\n\r\n* Cleanup before release (#11049)\r\n\r\n* Update ADDED_IN labels\r\n\r\n* Fix skippeded test\r\n\r\n* Regenerate migrations\r\n\r\n* Deprecate CountryDisplay.vat field\r\n\r\n* Add changelog\r\n\r\n* Update order.undiscounted_total calculation to not include taxes (#11068)\r\n\r\n* Fix assigning rates to tax classes (#11105)\r\n\r\n* Allow all staff users and apps to query tax-related data (#11113)\r\n\r\n* Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127)\r\n\r\nBumps:\r\n- cryptography to 38.0.3\r\n- pillow to 9.3.0\r\n\r\n* Fix using tax code from product and product type's tax class (#11111)\r\n\r\n* Fix using tax code from product and product type's tax class\r\n\r\n* Extract function\r\n\r\n* Replace synchronous load_site with promise (#11165)\r\n\r\n* Denormalize tax class for order lines and orders (#11172)\r\n\r\n* WIP Denormalize tax class for order lines and orders\r\n\r\n* Add denormalized fields in GraphQL types\r\n\r\n* Add tests for denormalized API fields\r\n\r\n* Return 0 rate in API when rate is null\r\n\r\n* Add preview/version notes in new field descriptions\r\n\r\n* Update changelog\r\n\r\nCo-authored-by: Dominik Kozaczko \r\nCo-authored-by: Maciej Korycinski \r\nCo-authored-by: Krzysztof Waliczek \r\nCo-authored-by: Mika <6186720+NyanKiyoshi@users.noreply.github.com>\r\nCo-authored-by: Krzysztof Kwaśniak ", "code": "def test_update_order_display_gross_prices_use_country_specific_tax_settings(order):\n # given\n country_code = \"PT\"\n tax_config = order.channel.tax_configuration\n tax_config.display_gross_prices = False\n tax_config.save()\n tax_config.country_exceptions.create(\n country=country_code, display_gross_prices=True\n )\n\n order.display_gross_prices = False\n order.save(update_fields=[\"display_gross_prices\"])\n order.shipping_address.country = country_code\n order.shipping_address.save()\n\n # when\n update_order_display_gross_prices(order)\n\n # then\n assert order.display_gross_prices\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 84, "n_words": 33, "vocab_size": 24, "complexity": 1, "nloc": 14, "token_counts": 76, "n_ast_nodes": 130, "n_identifiers": 14, "random_cut": "def test_update_order_display_gross_prices_use_country_specific_tax_settings(order):\n # given\n country_code = \"PT\"\n tax_config = order.channel.tax_configuration\n tax_config.display_gross_prices = False\n tax_config.save()\n tax_config.country_exceptions.create(\n country=country_code, display_gross_prices=True\n )\n\n order.display_gross_prices = False\n order.save(update_fields=[\"display_gross_prices\"])\n order.shipping_address.country = c" }, { "id": 154353, "commit_id": "c5107e5be29089720528c6c0ec4f96bc2a6a1eb3", "repo": "modin", "path": "modin/pandas/test/test_io.py", "file_name": "test_io.py", "fun_name": "test_read_csv_google_cloud_storage", "commit_message": "FEAT-#4766: Support fsspec URLs in `read_csv` and `read_csv_glob` (#4898)\n\nSigned-off-by: Karthik Velayutham ", "code": "def test_read_csv_google_cloud_storage(self):\n eval_io(\n fn_name=\"read_csv\",\n # read_csv kwargs\n filepath_or_buffer=\"gs://modin-testing/testing/multiple_csv/test_data0.csv\",\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 5, "token_counts": 16, "n_ast_nodes": 30, "n_identifiers": 5, "random_cut": "def test_read_csv_google_cloud_storage(self):\n eval_io(\n fn_name=\"read_csv\",\n # read_csv kwargs\n filepath_or_buffer=\"gs://modin-testing/testing/multiple_csv/tes" }, { "id": 155850, "commit_id": "e3b3259419c21d0d412b9d5f12531ebe5ad6967a", "repo": "dask", "path": "dask/array/tests/test_creation.py", "file_name": "test_creation.py", "fun_name": "test_diag_2d_array_creation", "commit_message": "increased performance of k-diagonal extraction in da.diag() and da.diagonal() (#8689)\n\n* added support for extracting k-diagonals from a 2d-array\r\n\r\n* included heterogeneous chunks in test_diag()\r\n\r\n* fixed linting errors in test_diag()\r\n\r\n* improved efficiency of diagonal extractor a bit\r\n\r\n* stole @TAdeJong's simple padding solution for diag(v, k) when v is 1d\r\n\r\n* reduced complexity of `diagonal()` from O(N**2) to O(N)\r\n\r\ndiag() now calls diagonal()\r\n\r\n* fixed linting errors in diagonal()\r\n\r\n* reorganized tests and ensured coverage of diag() & diagonal()\r\n\r\nas per @jcrist's advice\r\n\r\n* catered for cupy type input arrays to diagonal()", "code": "def test_diag_2d_array_creation(k):\n # when input 1d-array is a numpy array:\n v = np.arange(11)\n assert_eq(da.diag(v, k), np.diag(v, k))\n\n # when input 1d-array is a dask array:\n v = da.arange(11, chunks=3)\n darr = da.diag(v, k)\n nparr = np.diag(v, k)\n assert_eq(darr, nparr)\n assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask)\n\n v = v + v + 3\n darr = da.diag(v, k)\n nparr = np.diag(v, k)\n assert_eq(darr, nparr)\n\n v = da.arange(11, chunks=11)\n darr = da.diag(v, k)\n nparr = np.diag(v, k)\n assert_eq(darr, nparr)\n assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask)\n\n\n@pytest.mark.parametrize(\"k\", [0, 3, -3, 8])", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"k\", [0, 3, -3, 8])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 139, "n_words": 87, "vocab_size": 38, "complexity": 1, "nloc": 17, "token_counts": 198, "n_ast_nodes": 331, "n_identifiers": 16, "random_cut": "def test_diag_2d_array_creation(k):\n # when input 1d-array is a numpy array:\n v = np.arange(11)\n assert_eq(da.diag(v, k), np.diag(v, k))\n\n # when input 1d-array is a dask array:\n v = da.arange(11, chunks=3)\n darr = da.diag(v, k)\n nparr = np.diag(v, k)\n assert_eq(darr, nparr)\n assert sorted(da.diag(v, k).dask) == sorted(da.diag(v, k).dask)\n\n v = v + v + 3\n darr = da.diag(v, k)\n nparr = np.diag(v, k)\n assert_eq(darr, nparr)\n\n v = da.arange(11, chunks=11)\n darr = da.diag(v, k)\n nparr " }, { "id": 104925, "commit_id": "78941675d6f39c269f9d445121718c6c27c511dc", "repo": "datasets", "path": "datasets/imagenet_sketch/imagenet_sketch.py", "file_name": "imagenet_sketch.py", "fun_name": "_split_generators", "commit_message": "Add ImageNet-Sketch dataset (#4301)\n\n* :sparkles: Add ImageNet-Sketch dataset\r\n\r\n* :memo: add data splits to dataset card\r\n\r\n* Update datasets/imagenet_sketch/README.md\r\n\r\n* :sparkles: labels->label and use HF hosted download link\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\n* :memo: update imagenet_sketch README.md\r\n\r\n* Use dataset repo data url\r\n\r\nCo-authored-by: Mario Šaško ", "code": "def _split_generators(self, dl_manager):\n data_files = dl_manager.download_and_extract(_URL)\n\n return [\n datasets.SplitGenerator(\n name=datasets.Split.TRAIN,\n gen_kwargs={\n \"files\": dl_manager.iter_files([data_files]),\n },\n ),\n ]\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 122, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 10, "token_counts": 48, "n_ast_nodes": 74, "n_identifiers": 13, "random_cut": "def _split_generators(self, dl_manager):\n data_files = dl_manager.download_and_extract(_URL)\n\n return [\n datasets.SplitGenerator(\n name=datasets.Split.TRAIN,\n gen_kwargs={\n \"files\": dl_manager.iter_files([data_files]),\n },\n ),\n ]\n" }, { "id": 192538, "commit_id": "96f2c0d47f00371dd066c84f69c34fde07e876c3", "repo": "vision", "path": "torchvision/datasets/utils.py", "file_name": "utils.py", "fun_name": "gen_bar_updater", "commit_message": "support confirming no virus scan on GDrive download (#5645)\n\n* support confirming no virus scan on GDrive download\r\n\r\n* put gen_bar_updater back\r\n\r\n* Update torchvision/datasets/utils.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\nCo-authored-by: Nicolas Hug ", "code": "def gen_bar_updater() -> Callable[[int, int, int], None]:\n warnings.warn(\"The function `gen_bar_update` is deprecated since 0.13 and will be removed in 0.15.\")\n pbar = tqdm(total=None)\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 28, "n_words": 23, "vocab_size": 23, "complexity": 1, "nloc": 5, "token_counts": 35, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "def gen_bar_updater() -> Callable[[int, int, int], None]:\n warnings.warn(\"The function `gen_bar_update` " }, { "id": 71307, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/api/test_pages.py", "file_name": "test_pages.py", "fun_name": "test_revert_to_page_revision", "commit_message": "Reformat with black", "code": "def test_revert_to_page_revision(self):\n self.assertEqual(self.events_page.title, \"Evenements\")\n\n response = self.get_response(\n self.events_page.id, {\"revision_id\": self.first_revision.id}\n )\n self.assertEqual(response.status_code, 200)\n\n self.events_page.get_latest_revision().publish()\n self.events_page.refresh_from_db()\n self.assertEqual(self.events_page.title, \"Events\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 76, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 9, "token_counts": 79, "n_ast_nodes": 130, "n_identifiers": 13, "random_cut": "def test_revert_to_page_revision(self):\n self.assertEqual(self.events_page.title, \"Evenements\")\n\n response = self.get_response(\n self.events_page.id, {\"revision_id\": self.first_revision.id}\n )\n self.assertEqual(response.status_code, 200)\n\n self.events_page.get_latest_" }, { "id": 148554, "commit_id": "f26cd191466b792123f3d0b1a18b3b117a23a638", "repo": "freqtrade", "path": "freqtrade/optimize/backtesting.py", "file_name": "backtesting.py", "fun_name": "load_bt_data_detail", "commit_message": "Merge index and mark rates as part of dataload", "code": "def load_bt_data_detail(self) -> None:\n \n if self.timeframe_detail:\n self.detail_data = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.timeframe_detail,\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=self.config.get('candle_type_def', CandleType.SPOT)\n )\n else:\n self.detail_data = {}\n if self.trading_mode == TradingMode.FUTURES:\n # Load additional futures data.\n funding_rates_dict = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'],\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=CandleType.FUNDING_RATE\n )\n\n # For simplicity, assign to CandleType.Mark (might contian index candles!)\n mark_rates_dict = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'],\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=CandleType.from_string(self.exchange._ft_has[\"mark_ohlcv_price\"])\n )\n # Combine data to avoid combining the data per trade.\n for pair in self.pairlists.whitelist:\n self.futures_data[pair] = funding_rates_dict[pair].merge(\n mark_rates_dict[pair], on='date', how=\"inner\", suffixes=[\"_fund\", \"_mark\"])\n\n else:\n self.futures_data = {}\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 656, "n_words": 95, "vocab_size": 63, "complexity": 4, "nloc": 43, "token_counts": 299, "n_ast_nodes": 467, "n_identifiers": 35, "random_cut": "def load_bt_data_detail(self) -> None:\n \n if self.timeframe_detail:\n self.detail_data = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.timeframe_detail,\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=self.config.get('candle_type_def', CandleType.SPOT)\n )\n else:\n self.detail_data = {}\n if self.trading_mode == TradingMode.FUTURES:\n # Load additional futures data.\n funding_rates_dict = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'],\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=CandleType.FUNDING_RATE\n )\n\n # For simplicity, assign to CandleType.Mark (might contian index candles!)\n mark_rates_dict = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'],\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=CandleType.from_string(se" }, { "id": 297803, "commit_id": "b0cee0bc46cbd7efe0e6421da18d91595c7a25ad", "repo": "core", "path": "homeassistant/components/bond/light.py", "file_name": "light.py", "fun_name": "async_stop", "commit_message": "String formatting and max line length - Part 1 (#84390)\n\nCo-authored-by: Erik Montnemery ", "code": "async def async_stop(self) -> None:\n \n _LOGGER.warning(\n \"The bond.stop service is deprecated and has been replaced with a button;\"\n \" Call the button.press service instead\"\n )\n self._async_has_action_or_raise(Action.STOP)\n await self._hub.bond.action(self._device.device_id, Action(Action.STOP))\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 86, "n_words": 29, "vocab_size": 28, "complexity": 1, "nloc": 8, "token_counts": 45, "n_ast_nodes": 80, "n_identifiers": 12, "random_cut": "async def async_stop(self) -> None:\n \n _LOGGER.warning(\n \"The bond.stop service is deprecated and has been replaced with a button;\"\n \" Call the button.press service instead\"\n )\n self._async_has_a" }, { "id": 301321, "commit_id": "6cac1dadeba6cb81285960db1ab6ec6239547cd9", "repo": "core", "path": "homeassistant/components/zwave_js/sensor.py", "file_name": "sensor.py", "fun_name": "extra_state_attributes", "commit_message": "Clean zwave_js platform typing (#72439)\n\n* Fix binary sensor\r\n\r\n* Fix climate\r\n\r\n* Fix cover\r\n\r\n* Fix fan\r\n\r\n* Fix light\r\n\r\n* Fix lock\r\n\r\n* Fix number\r\n\r\n* Fix select\r\n\r\n* Fix sensor\r\n\r\n* Add back type ignore until library bump", "code": "def extra_state_attributes(self) -> dict[str, str] | None:\n \n if (value := self.info.primary_value.value) is None:\n return None\n # add the value's int value as property for multi-value (list) items\n return {ATTR_VALUE: value}\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 30, "vocab_size": 28, "complexity": 2, "nloc": 5, "token_counts": 38, "n_ast_nodes": 61, "n_identifiers": 8, "random_cut": "def extra_state_attributes(self) -> dict[str, str] | None:\n \n if (value := se" }, { "id": 314506, "commit_id": "3743d42ade80528325d36357ca6f9629d4970eaa", "repo": "core", "path": "homeassistant/components/smartthings/cover.py", "file_name": "cover.py", "fun_name": "current_cover_position", "commit_message": "Adjust smartthings cover type hints (#73948)", "code": "def current_cover_position(self) -> int | None:\n \n if not self._attr_supported_features & CoverEntityFeature.SET_POSITION:\n return None\n return self._device.status.level\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 50, "n_identifiers": 9, "random_cut": "def current_cover_position(self) -> int | None:\n \n " }, { "id": 213207, "commit_id": "b50046a631badcf15ee25b6355a2d2052f6f5bf9", "repo": "ivy", "path": "ivy_tests/test_nn/test_functional/test_activations.py", "file_name": "test_activations.py", "fun_name": "test_softmax", "commit_message": "created backends sub-folder for all backend implementations.", "code": "def test_softmax(x, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n x = tensor_fn(x, dtype_str, dev_str)\n ret = ivy.softmax(x)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n assert ret.shape == x.shape\n # value test\n assert np.allclose(call(ivy.softmax, x), ivy.backends.numpy.softmax(ivy.to_numpy(x)))\n # compilation test\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.softmax)\n\n\n# softplus\n@pytest.mark.parametrize(\n \"x\", [[[-1., 1., 2.]]])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"x\", [[[-1., 1., 2.]]])\n@pytest.mark.parametrize(\n \"dtype_str\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 101, "n_words": 57, "vocab_size": 42, "complexity": 2, "nloc": 8, "token_counts": 92, "n_ast_nodes": 231, "n_identifiers": 24, "random_cut": "def test_softmax(x, dtype_str, tensor_fn, dev_str, call):\n # smoke test\n x = tensor_fn(x, dtype_str, dev_str)\n ret = ivy.softmax(x)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n assert ret.shape == x.shape\n # value test\n assert np.allclose(call(ivy.softmax, x), ivy.backends.numpy.softmax(ivy.to_numpy(x)))\n # compilation test\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.soft" }, { "id": 120722, "commit_id": "ece9b999fb5f85eee6570e5f987ad6704c130503", "repo": "jax", "path": "jax/_src/lax/convolution.py", "file_name": "convolution.py", "fun_name": "_reshape_axis_into", "commit_message": "Fix batching rule for convolution for batch dimensions of size 0.", "code": "def _reshape_axis_into(src, dst, x):\n # NB: `dst` is the number of the dimension that we should reshape into\n # *after* `src` is removed from `x`'s list of dimensions. For example, if\n # `src` is an added batch dimension, `dst` might name a target dimension in\n # the unbatched list of dimensions.\n perm = [i for i in range(x.ndim) if i != src]\n perm.insert(dst, src)\n new_shape = list(np.delete(x.shape, src))\n new_shape[dst] *= x.shape[src]\n return lax.reshape(x, new_shape, perm)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 83, "n_words": 75, "vocab_size": 57, "complexity": 3, "nloc": 6, "token_counts": 73, "n_ast_nodes": 110, "n_identifiers": 16, "random_cut": "def _reshape_axis_into(src, dst, x):\n # NB: `dst` is the number of the dimension that we should reshape into\n # *after* `src` is removed from `x`'s list of dimensions. For example, if\n # `src` is an added batch dimension, `dst` might name a target dimension in\n # the " }, { "id": 263221, "commit_id": "64ccb7aea824fbec57f7ed1bbe483ec486183c13", "repo": "pyinstaller", "path": "bootloader/waflib/TaskGen.py", "file_name": "TaskGen.py", "fun_name": "force_permissions", "commit_message": "Bootloader: Building: Unpack waf's lib archive.\n\nDoing so makes it easier to modify. This is a temporary measure until the next\nwaf version is released (although I'm tempted to keep it since it's much more\nIDE completion friendly).", "code": "def force_permissions(self):\n if getattr(self.generator, 'chmod', None):\n for x in self.outputs:\n os.chmod(x.abspath(), self.generator.chmod)\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 44, "n_words": 12, "vocab_size": 12, "complexity": 3, "nloc": 4, "token_counts": 40, "n_ast_nodes": 63, "n_identifiers": 9, "random_cut": "def force_permissions(self):\n if getattr(self.generator, 'chm" }, { "id": 317791, "commit_id": "7075032bf743f8702d942410c0c41214c90c212b", "repo": "core", "path": "tests/components/generic/test_diagnostics.py", "file_name": "test_diagnostics.py", "fun_name": "test_redact_url", "commit_message": "Fix diagnostics export for generic camera (#75665)\n\nFix url redaction and add tests\r\n\r\nCo-authored-by: Dave T ", "code": "def test_redact_url(url_in, url_out_expected):\n \n url_out = redact_url(url_in)\n assert url_out == url_out_expected\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def test_redact_url(url_in, url_out_expected):\n \n url_out = redact_url(" }, { "id": 159774, "commit_id": "66a61b03658f3c9f312505dcf7eab07e4cf91ac6", "repo": "numpy", "path": "numpy/lib/tests/test_io.py", "file_name": "test_io.py", "fun_name": "test_loadtxt_converters_negative_indices", "commit_message": "Port over tests from npreadtext test suite\n\n- Add test for parsing scientific notation.\n- Add multiple-char comment test.\n- Port over tests for structured dtypes.\n- Add tests for exceptions on skiprows/max_rows.\n- port over ndmin tests.\n- Make structured data reusable, add unpack tests.\n- Port over delimiter tests.\n- Port over maxrows test w/ various dtypes.\n- Port over test of exception msg on parse failure.\n- Port over test for converters w/neg indices.\n- Port over usecols tests\n- Port over unicode tests.\n- Port over more converter tests.\n- Port over test for large rows.\n- Port over test for string-len discovery.\n- Port over float conversion accuracy test.\n- Port over bool test.\n- Add test for implicit float->int conversion.\n- Port over complex parsing tests.\n- Port over tests for reading from generator.\n- Port over object cleanup test.\n- Port over bytes incompat test.\n- Port over converters tests.\n\nCo-authored-by: Warren Weckesser \nCo-authored-by: Sebastian Berg ", "code": "def test_loadtxt_converters_negative_indices():\n txt = TextIO('1.5,2.5\\n3.0,XXX\\n5.5,6.0')\n conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}\n expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]])\n res = np.loadtxt(\n txt, dtype=np.float64, delimiter=\",\", converters=conv, encoding=None\n )\n assert_equal(res, expected)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 36, "vocab_size": 33, "complexity": 2, "nloc": 8, "token_counts": 102, "n_ast_nodes": 143, "n_identifiers": 18, "random_cut": "def test_loadtxt_converters_negative_indices():\n txt = TextIO('1.5,2.5\\n3.0,XXX\\n5.5,6.0')\n conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}\n expected = np.array([[1.5, 2" }, { "id": 139341, "commit_id": "29eebdfef2acb7d278042f38247a7d82473c3fd6", "repo": "ray", "path": "python/ray/tests/test_autoscaler_yaml.py", "file_name": "test_autoscaler_yaml.py", "fun_name": "testValidateLocal", "commit_message": "[Autoscaler][Local Node Provider] Log a warning if max_workers < len(worker_ips) (#24635)\n\nLogs a warning when a user sets max_workers for local node provider less than the number of available ips.\r\n\r\nAlso removes defaults of 0 for min_workers and max_workers from example configs to help prevent users inadvertantly setting max_workers=0 again.", "code": "def testValidateLocal(self):\n \n local_config_path = os.path.join(\n RAY_PATH, \"autoscaler/local/example-minimal-manual.yaml\"\n )\n base_config = yaml.safe_load(open(local_config_path).read())\n base_config[\"provider\"][\"head_ip\"] = \"xxx.yyy\"\n base_config[\"provider\"][\"worker_ips\"] = [\"aaa.bbb\", \"ccc.ddd\", \"eee.fff\"]\n base_config[\"auth\"][\"ssh_user\"] = \"user\"\n base_config[\"auth\"][\"ssh_private_key\"] = \"~/.ssh/id_rsa\"\n\n test_prepare_config = copy.deepcopy(base_config)\n prepared_config = prepare_config(test_prepare_config)\n try:\n validate_config(prepared_config)\n except Exception:\n self.fail(\"Failed to validate local/example-minimal-manual.yaml\")\n expected_prepared = yaml.safe_load(EXPECTED_LOCAL_CONFIG_STR)\n assert prepared_config == expected_prepared\n\n no_worker_config = copy.deepcopy(base_config)\n del no_worker_config[\"provider\"][\"worker_ips\"]\n with pytest.raises(ClickException):\n prepare_config(no_worker_config)\n no_head_config = copy.deepcopy(base_config)\n del no_head_config[\"provider\"][\"head_ip\"]\n with pytest.raises(ClickException):\n prepare_config(no_head_config)\n for field in \"head_node\", \"worker_nodes\", \"available_node_types\":\n faulty_config = copy.deepcopy(base_config)\n faulty_config[field] = \"This field shouldn't be in here.\"\n with pytest.raises(ClickException):\n prepare_config(faulty_config)\n\n too_many_workers_config = copy.deepcopy(base_config)\n\n # More workers requested than the three available ips.\n too_many_workers_config[\"max_workers\"] = 10\n too_many_workers_config[\"min_workers\"] = 10\n prepared_config = prepare_config(too_many_workers_config)\n\n # Check that worker config numbers were clipped to 3.\n assert prepared_config == expected_prepared\n\n not_enough_workers_config = copy.deepcopy(base_config)\n\n # Max workers is less than than the three available ips.\n # The user is probably has probably made an error. Make sure we log a warning.\n not_enough_workers_config[\"max_workers\"] = 0\n not_enough_workers_config[\"min_workers\"] = 0\n with mock.patch(\n \"ray.autoscaler._private.local.config.cli_logger.warning\"\n ) as warning:\n prepared_config = prepare_config(not_enough_workers_config)\n warning.assert_called_with(\n \"The value of `max_workers` supplied (0) is less\"\n \" than the number of available worker ips (3).\"\n \" At most 0 Ray worker nodes will connect to the cluster.\"\n )\n expected_prepared = yaml.safe_load(EXPECTED_LOCAL_CONFIG_STR)\n # We logged a warning.\n # However, prepare_config does not repair the strange config setting:\n expected_prepared[\"max_workers\"] = 0\n expected_prepared[\"available_node_types\"][\"local.cluster.node\"][\n \"max_workers\"\n ] = 0\n expected_prepared[\"available_node_types\"][\"local.cluster.node\"][\n \"min_workers\"\n ] = 0\n assert prepared_config == expected_prepared\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 750, "n_words": 228, "vocab_size": 138, "complexity": 3, "nloc": 56, "token_counts": 323, "n_ast_nodes": 597, "n_identifiers": 35, "random_cut": "def testValidateLocal(self):\n \n local_config_path = os.path.join(\n RAY_PATH, \"autoscaler/local/example-minimal-manual.yaml\"\n )\n base_config = yaml.safe_load(open(local_config_path).read())\n base_config[\"provider\"][\"head_ip\"] = \"xxx.yyy\"\n base_config[\"provider\"][\"worker_ips\"] = [\"aaa.bbb\", \"ccc.ddd\", \"eee.fff\"]\n base_config[\"" }, { "id": 73564, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/sitemaps/tests.py", "file_name": "tests.py", "fun_name": "get_request_and_django_site", "commit_message": "Reformat with black", "code": "def get_request_and_django_site(self, url):\n request = RequestFactory().get(url)\n request.META[\"HTTP_HOST\"] = self.site.hostname\n request.META[\"SERVER_PORT\"] = self.site.port\n return request, get_current_site(request)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 42, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 48, "n_ast_nodes": 79, "n_identifiers": 11, "random_cut": "def get_request_and_django_site(self, url):\n request = RequestFactory().get(url)\n request.META[\"HTTP_HOST\"] = self.site.hostname\n request.META[\"SERVER_PORT\"] = self.site.port\n return request, get_current_site(request)\n" }, { "id": 122094, "commit_id": "1d895b2c85e17b9f563cd41d9a340528179d29aa", "repo": "jax", "path": "jax/_src/ad_checkpoint.py", "file_name": "ad_checkpoint.py", "fun_name": "dot_with_no_batch_dims", "commit_message": "Fix lax imports", "code": "def dot_with_no_batch_dims(prim, *_, **params) -> bool:\n # This is a useful heuristic for transformers.\n if prim is lax_internal.dot_general_p:\n (_, _), (lhs_b, rhs_b) = params['dimension_numbers']\n if not lhs_b and not rhs_b:\n return True\n return False\n\nname_p = core.Primitive('name')\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 49, "n_words": 37, "vocab_size": 32, "complexity": 4, "nloc": 6, "token_counts": 47, "n_ast_nodes": 89, "n_identifiers": 12, "random_cut": "def dot_with_no_batch_dims(prim, *_, **params) -> bool:\n # This is a useful heuristic for transformers.\n if prim is lax_inter" }, { "id": 139015, "commit_id": "f72555262afbbfc1aabb87c9e40839aaaee3ba0b", "repo": "ray", "path": "python/ray/data/impl/block_list.py", "file_name": "block_list.py", "fun_name": "_check_if_cleared", "commit_message": "[Datasets] Provide more efficient + intuitive block clearing semantics for different execution modes (#24127)\n\n**TL;DR:** Don't clear for eager, clear all but non-lazy input blocks if lazy, clear everything if pipelining.\r\n \r\nThis PR provides more efficient and intuitive block clearing semantics for eager mode, lazy mode, and pipelining, while still supporting multiple operations applied to the same base dataset, i.e. fan-out. For example, two different map operations are applied to the same base `ds` in this example:\r\n\r\n```python\r\nds = ray.data.range(10).map(lambda x: x+1)\r\nds1 = ds.map(lambda x: 2*x)\r\nds2 = ds.map(lambda x: 3*x)\r\n```\r\n\r\nIf naively clear the blocks when executing the map to produce `ds1`, the map producing `ds2` will fail.\r\n\r\n### Desired Semantics\r\n\r\n- **Eager mode** - don’t clear input blocks, thereby supporting fan-out from cached data at any point in the stage chain without triggering unexpected recomputation.\r\n- **Lazy mode** - if lazy datasource, clear the input blocks for every stage, relying on recomputing via stage lineage if fan-out occurs; if non-lazy datasource, do not clear source blocks for execution plan when executing first stage, but do clear input blocks for every subsequent stage.\r\n- **Pipelines** - Same as lazy mode, although the only fan-out that can occur is from the pipeline source blocks when repeating a dataset/pipeline, so unintended intermediate recomputation will never happen.", "code": "def _check_if_cleared(self) -> None:\n \n if self.is_cleared():\n raise ValueError(\n \"This Dataset's blocks have been moved, which means that you \"\n \"can no longer use this Dataset.\"\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 26, "vocab_size": 26, "complexity": 2, "nloc": 7, "token_counts": 21, "n_ast_nodes": 42, "n_identifiers": 4, "random_cut": "def _check_if_cleared(self) -> None:\n \n if self.is_cleared():\n raise ValueError(\n " }, { "id": 156539, "commit_id": "5fbda77cfc5bc1b8f1453a2dbb034b048fc10726", "repo": "dask", "path": "dask/dataframe/tests/test_groupby.py", "file_name": "test_groupby.py", "fun_name": "test_bfill", "commit_message": "Implement {Series,DataFrame}GroupBy `fillna` methods (#8869)\n\nCo-authored-by: Ian Rose ", "code": "def test_bfill():\n df = pd.DataFrame(\n {\n \"A\": [1, 1, 2, 2],\n \"B\": [3, 4, 3, 4],\n \"C\": [np.nan, 3, np.nan, np.nan],\n \"D\": [np.nan, 4, np.nan, 5],\n \"E\": [np.nan, 6, np.nan, 7],\n }\n )\n ddf = dd.from_pandas(df, npartitions=2)\n assert_eq(\n df.groupby(\"A\").bfill(),\n ddf.groupby(\"A\").bfill(),\n )\n assert_eq(\n df.groupby(\"A\").B.bfill(),\n ddf.groupby(\"A\").B.bfill(),\n )\n assert_eq(\n df.groupby([\"A\", \"B\"]).bfill(),\n ddf.groupby([\"A\", \"B\"]).bfill(),\n )\n\n\n@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: [\"a\"],\n lambda df: [\"a\", \"b\"],\n lambda df: df[\"a\"],\n lambda df: [df[\"a\"], df[\"b\"]],\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n ],\n)", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"grouper\",\n [\n lambda df: [\"a\"],\n lambda df: [\"a\", \"b\"],\n lambda df: df[\"a\"],\n lambda df: [df[\"a\"], df[\"b\"]],\n lambda df: [df[\"a\"] > 2, df[\"b\"] > 1],\n ],\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 257, "n_words": 78, "vocab_size": 54, "complexity": 1, "nloc": 23, "token_counts": 186, "n_ast_nodes": 411, "n_identifiers": 17, "random_cut": "def test_bfill():\n df = pd.DataFrame(\n {\n \"A\": [1, 1, 2, 2],\n \"B\": [3, 4, 3, 4],\n \"C\": [np.nan, 3, np.nan, np.nan],\n \"D\": [np.nan, 4, np.nan, 5],\n \"E\": [np.nan, 6, np.nan, 7],\n }\n )\n ddf = dd.from_pandas(df, npartit" }, { "id": 106625, "commit_id": "b4115c0337b1bacc876bef1ece97e8fa8b3e2834", "repo": "visdom", "path": "example/components/plot_line.py", "file_name": "plot_line.py", "fun_name": "plot_line_stackedarea", "commit_message": "test: split demo.py into seperate files and functions", "code": "def plot_line_stackedarea(viz, env):\n Y = np.linspace(0, 4, 200)\n return viz.line(\n Y=np.column_stack((np.sqrt(Y), np.sqrt(Y) + 2)),\n X=np.column_stack((Y, Y)),\n opts=dict(\n fillarea=True,\n showlegend=False,\n width=800,\n height=800,\n xlabel='Time',\n ylabel='Volume',\n ytype='log',\n title='Stacked area plot',\n marginleft=30,\n marginright=30,\n marginbottom=80,\n margintop=30,\n ),\n )\n\n# Assure that the stacked area plot isn't giant", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 209, "n_words": 42, "vocab_size": 41, "complexity": 1, "nloc": 20, "token_counts": 117, "n_ast_nodes": 171, "n_identifiers": 24, "random_cut": "def plot_line_stackedarea(viz, env):\n Y = np.linspace(0, 4, 200)\n " }, { "id": 61123, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/provider.py", "file_name": "provider.py", "fun_name": "identify", "commit_message": "upd; format", "code": "def identify(self, requirement_or_candidate):\n # type: (Union[Requirement, Candidate]) -> str\n return requirement_or_candidate.name\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 24, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 18, "n_identifiers": 4, "random_cut": "def identify(self, requirement_or_candidate):\n # type: (Union[Requirement, Candidate]) -> str\n return requirement_or_candidate.na" }, { "id": 60419, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/scripts/cpp_lint.py", "file_name": "cpp_lint.py", "fun_name": "CleanseComments", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def CleanseComments(line):\n \n commentpos = line.find('//')\n if commentpos != -1 and not IsCppString(line[:commentpos]):\n line = line[:commentpos].rstrip()\n # get rid of /* ... */\n return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 33, "n_words": 25, "vocab_size": 23, "complexity": 3, "nloc": 5, "token_counts": 50, "n_ast_nodes": 88, "n_identifiers": 8, "random_cut": "def CleanseComments(line):\n \n comment" }, { "id": 60801, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/metadata/pkg_resources.py", "file_name": "pkg_resources.py", "fun_name": "from_wheel", "commit_message": "upd; format", "code": "def from_wheel(cls, path, name):\n # type: (str, str) -> Distribution\n with zipfile.ZipFile(path, allowZip64=True) as zf:\n dist = pkg_resources_distribution_for_wheel(zf, name, path)\n return cls(dist)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 22, "vocab_size": 22, "complexity": 1, "nloc": 4, "token_counts": 38, "n_ast_nodes": 60, "n_identifiers": 10, "random_cut": "def from_wheel(cls, path, name):\n # type: (str, str) -> Distribution\n with zipfile.ZipFile(path, allowZip64=True) as zf:\n d" }, { "id": 43497, "commit_id": "25537acfa28eebc82a90274840e0e6fb5c91e271", "repo": "airflow", "path": "airflow/utils/sqlalchemy.py", "file_name": "sqlalchemy.py", "fun_name": "load_dialect_impl", "commit_message": "Have consistent types between the ORM and the migration files (#24044)\n\nWe currently don't compare column types between ORM and the migration files. Some columns in the migration files have different types from the same columns in the ORM.\r\nHere, I made effort to match the types in migration files with the\r\n types in ORM, using the migration files as the source of truth in most cases.\r\n\r\nI couldn't convert the MySQL VARCHAR collation in db(utf8_bin) to use the one in ORM(utf8mb3_bin). It seems it's not possible to convert a collation of an already existing column in MySQL.", "code": "def load_dialect_impl(self, dialect):\n if dialect.name == 'mssql':\n return mssql.DATETIME2(precision=6)\n elif dialect.name == 'mysql':\n return mysql.TIMESTAMP(fsp=6)\n return super().load_dialect_impl(dialect)\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 17, "vocab_size": 13, "complexity": 3, "nloc": 6, "token_counts": 48, "n_ast_nodes": 80, "n_identifiers": 11, "random_cut": "def load_dialect_impl(self, dialect):\n if dialect.name == 'mssq" }, { "id": 90873, "commit_id": "b9f5a910dc841b85f58d46266ec049ae5a7fd305", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_release_details.py", "file_name": "test_organization_release_details.py", "fun_name": "test_activity_generation_long_release", "commit_message": "ref(models): `ActivityType` (#34978)\n\n## Objective:\r\nWe want to separate enum logic from Model logic. This breaks a lot of circular dependencies.", "code": "def test_activity_generation_long_release(self):\n user = self.create_user(is_staff=False, is_superuser=False)\n org = self.organization\n org.flags.allow_joinleave = False\n org.save()\n\n team = self.create_team(organization=org)\n\n project = self.create_project(teams=[team], organization=org)\n\n release = Release.objects.create(organization_id=org.id, version=\"x\" * 65)\n\n release.add_project(project)\n\n self.create_member(teams=[team], user=user, organization=org)\n\n self.login_as(user=user)\n\n url = reverse(\n \"sentry-api-0-organization-release-details\",\n kwargs={\"organization_slug\": org.slug, \"version\": release.version},\n )\n response = self.client.put(url, data={\"dateReleased\": datetime.utcnow().isoformat() + \"Z\"})\n\n assert response.status_code == 200, (response.status_code, response.content)\n\n release = Release.objects.get(id=release.id)\n assert release.date_released\n\n activity = Activity.objects.filter(\n type=ActivityType.RELEASE.value, project=project, ident=release.version[:64]\n )\n assert activity.exists()\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 232, "n_words": 67, "vocab_size": 53, "complexity": 1, "nloc": 23, "token_counts": 235, "n_ast_nodes": 370, "n_identifiers": 50, "random_cut": "def test_activity_generation_long_release(self):\n user = self.create_user(is_staff=False, is_superuser=False)\n org = self.organization\n org.flags.allow_joinleave = False\n org.save()\n\n team = self.create_team(organization=org)\n\n project = self.create_project(teams=[team], organization=org)\n\n release = Release.objects.create(organization_id=org.id, version=\"x\" * 65)\n\n release.add_project(project)\n\n self.create_member(teams=[team], user=user, organization=org)\n\n self.login_as(user=user)\n\n url = reverse(\n \"sentry-api-0-organization-release-details\",\n kwargs={\"organization_slug\": org.slug, \"version\": release.version},\n )\n response = self.client.put(url, data={\"dateRel" }, { "id": 190250, "commit_id": "4fc3616712edb19179b17dd270ad6cf63abf99c2", "repo": "DeOldify", "path": "fastai/data_block.py", "file_name": "data_block.py", "fun_name": "split_by_list", "commit_message": "Upgrading to support latest Pytorch version", "code": "def split_by_list(self, train, valid):\n \"Split the data between `train` and `valid`.\"\n return self._split(self.path, train, valid)\n", "url": "https://github.com/jantic/DeOldify.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 28, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 35, "n_identifiers": 6, "random_cut": "def split_by_list(self, train, valid):\n \"Split the data between `train` and `val" }, { "id": 12744, "commit_id": "6f5b3f2a9b13c2eae78b746531132cbfcdc8c2da", "repo": "jina", "path": "tests/integration/gateway_clients/test_clients_gateways.py", "file_name": "test_clients_gateways.py", "fun_name": "test_grpc_gateway_runtime_lazy_request_access", "commit_message": "fix: fix endpoint discovery tries (#5014)", "code": "def test_grpc_gateway_runtime_lazy_request_access(linear_graph_dict, monkeypatch):\n call_counts = multiprocessing.Queue()\n\n monkeypatch.setattr(\n networking.GrpcConnectionPool,\n 'send_requests_once',\n DummyNoDocAccessMockConnectionPool.send_requests_once,\n )\n monkeypatch.setattr(\n networking.GrpcConnectionPool,\n 'send_discover_endpoint',\n DummyMockConnectionPool.send_discover_endpoint,\n )\n port = random_port()\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 78, "n_words": 19, "vocab_size": 15, "complexity": 4, "nloc": 40, "token_counts": 183, "n_ast_nodes": 78, "n_identifiers": 15, "random_cut": "def test_grpc_gateway_runtime_lazy_request_access(linear_graph_dict, monkeypatch):\n call_counts = multiprocessing.Queue()\n\n monkeypatch.setattr(\n networking.GrpcConnectionPool,\n 'send_requests_once',\n DummyNoDocAccessMockConnectionPool.send_requests_once,\n )\n monkeypatch.setattr(\n networking.GrpcConnectionPool,\n 'send" }, { "id": 6983, "commit_id": "ae25cc4c5a229bbc44339249e1f94bf256f18317", "repo": "ludwig", "path": "ludwig/utils/defaults.py", "file_name": "defaults.py", "fun_name": "_perform_sanity_checks", "commit_message": "Comprehensive configs for trainer and combiner. (#2118)", "code": "def _perform_sanity_checks(config):\n assert \"input_features\" in config, \"config does not define any input features\"\n\n assert \"output_features\" in config, \"config does not define any output features\"\n\n assert isinstance(config[\"input_features\"], list), (\n \"Ludwig expects input features in a list. Check your model \" \"config format\"\n )\n\n assert isinstance(config[\"output_features\"], list), (\n \"Ludwig expects output features in a list. Check your model \" \"config format\"\n )\n\n assert len(config[\"input_features\"]) > 0, \"config needs to have at least one input feature\"\n\n assert len(config[\"output_features\"]) > 0, \"config needs to have at least one output feature\"\n\n if TRAINER in config:\n assert isinstance(config[TRAINER], dict), (\n \"There is an issue while reading the training section of the \"\n \"config. The parameters are expected to be\"\n \"read as a dictionary. Please check your config format.\"\n )\n\n if \"preprocessing\" in config:\n assert isinstance(config[\"preprocessing\"], dict), (\n \"There is an issue while reading the preprocessing section of the \"\n \"config. The parameters are expected to be read\"\n \"as a dictionary. Please check your config format.\"\n )\n\n if COMBINER in config:\n assert isinstance(config[COMBINER], dict), (\n \"There is an issue while reading the combiner section of the \"\n \"config. The parameters are expected to be read\"\n \"as a dictionary. Please check your config format.\"\n )\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 384, "n_words": 197, "vocab_size": 78, "complexity": 4, "nloc": 29, "token_counts": 134, "n_ast_nodes": 241, "n_identifiers": 8, "random_cut": "def _perform_sanity_checks(config):\n assert \"input_features\" in config, \"config does not define any input features\"\n\n assert \"output_features\" in config, \"config does not define any output features\"\n\n assert isinstance(config[\"input_features\"], list), (\n \"Ludwig expects input features in a list. Check your model \" \"config format\"\n )\n\n assert isinstance(config[\"output_features\"], list), (\n \"Ludwig expects output features in a list. Check your model \" \"config format\"\n )\n\n assert len(config[\"input_fe" }, { "id": 266388, "commit_id": "97104f1221b64ef36cf42cb90c5a0eff263a2adb", "repo": "ansible", "path": "test/units/plugins/callback/test_callback.py", "file_name": "test_callback.py", "fun_name": "test_host_label", "commit_message": "Avoid deprecated TestCase functions in unit tests. (#76678)\n\n* Avoid deprecated TestCase functions in unit tests.\r\n* Add assertRaisesRegex for Python 2.7.\r\n* Fix indentation.", "code": "def test_host_label(self):\n result = TaskResult(host=Host('host1'), task=mock_task, return_data={})\n\n self.assertEqual(CallbackBase.host_label(result), 'host1')\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 22, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 38, "n_ast_nodes": 63, "n_identifiers": 12, "random_cut": "def test_host_label(self):\n " }, { "id": 156242, "commit_id": "0b36d7fcaf54ee9a78fff4b07f124cb0c8741cdf", "repo": "dask", "path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "fun_name": "test_writing_parquet_with_kwargs", "commit_message": "Remove pyarrow-legacy engine from parquet API (#8835)\n\n* remove pyarrow-legacy\r\n\r\n* Small fixup\r\n\r\n* Small fixup for pyarrow < 5\r\n\r\nCo-authored-by: Jim Crist-Harif ", "code": "def test_writing_parquet_with_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n path1 = os.path.join(fn, \"normal\")\n path2 = os.path.join(fn, \"partitioned\")\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n engine_kwargs = {\n \"pyarrow\": {\n \"compression\": \"snappy\",\n \"coerce_timestamps\": None,\n \"use_dictionary\": True,\n },\n \"fastparquet\": {\"compression\": \"snappy\", \"times\": \"int64\", \"fixed_text\": None},\n }\n\n ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])\n out = dd.read_parquet(path1, engine=engine)\n assert_eq(out, ddf, check_index=(engine != \"fastparquet\"))\n\n # Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets\n with dask.config.set(scheduler=\"sync\"):\n ddf.to_parquet(\n path2, engine=engine, partition_on=[\"a\"], **engine_kwargs[engine]\n )\n out = dd.read_parquet(path2, engine=engine).compute()\n for val in df.a.unique():\n assert set(df.b[df.a == val]) == set(out.b[out.a == val])\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 285, "n_words": 105, "vocab_size": 84, "complexity": 2, "nloc": 31, "token_counts": 284, "n_ast_nodes": 465, "n_identifiers": 40, "random_cut": "def test_writing_parquet_with_kwargs(tmpdir, engine):\n fn = str(tmpdir)\n path1 = os.path.join(fn, \"normal\")\n path2 = os.path.join(fn, \"partitioned\")\n\n df = pd.DataFrame(\n {\n \"a\": np.random.choice([\"A\", \"B\", \"C\"], size=100),\n \"b\": np.random.random(size=100),\n \"c\": np.random.randint(1, 5, size=100),\n }\n )\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=3)\n\n engine_kwargs = {\n \"pyarrow\": {\n \"compression\": \"snappy\",\n \"coerce_timestamps\": None,\n \"use_dictionary\": True,\n },\n \"fastparquet\": {\"compression\": \"snappy\", \"times\": \"int64\", \"fixed_text\": None},\n }\n\n ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine" }, { "id": 28271, "commit_id": "8201efcde2d7aacccf3512c544cceea6780a0598", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py", "file_name": "fixtures.py", "fun_name": "subscription_order_fulfilled_webhook", "commit_message": "GraphQL subscription support for synchronous webhook events (#9763)\n\n* WIP add sync webhooks subscription payload handling\r\n\r\n* add tests, fix minor things\r\n\r\n* update schema\r\n\r\n* remove unneeded code\r\n\r\n* add fix for circular field resolve\r\n\r\n* fix-filter-shipping-methods-payload\r\n\r\n* added_in added to desription\r\n\r\n* add missing types\r\n\r\n* revert refactor, precommit issues\r\n\r\n* fixes after review\r\n\r\n* cosmetix fixes post-review\r\n\r\n* subscription types description fixes\r\n\r\n* remove unneeded description from PaymentBase\r\n\r\n* add validation for creating webhook with two top level fields, add tests for shippingListMethodsForCheckout\r\n\r\n* add docstring, refactor prevent_sync_event_circular_wuery wrapper\r\n\r\n* fix docstring of revent_sync_event_circular_query\r\n\r\n* fix linters", "code": "def subscription_order_fulfilled_webhook(subscription_webhook):\n return subscription_webhook(\n queries.ORDER_FULFILLED, WebhookEventAsyncType.ORDER_FULFILLED\n )\n\n\n@pytest.fixture", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 19, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 16, "n_ast_nodes": 32, "n_identifiers": 7, "random_cut": "def subscription_order_fulfilled_webhook(subscription_webhook):\n return subscription_webhook(\n queries.ORDER_FULFILLED, WebhookEventAsyncType.ORDER_FULFILLED\n )\n\n\n@pytest.fixture" }, { "id": 1804, "commit_id": "a81b66ea18721dc36c77aefac733dd224f48cc87", "repo": "PySyft", "path": "packages/syft/src/syft/core/tensor/autodp/phi_tensor.py", "file_name": "phi_tensor.py", "fun_name": "_object2bytes", "commit_message": "add data subject and data subject shape serialization to GammaTensor", "code": "def _object2bytes(self) -> bytes:\n schema = get_capnp_schema(schema_file=\"phi_tensor.capnp\")\n\n pt_struct: CapnpModule = schema.PT # type: ignore\n pt_msg = pt_struct.new_message()\n # this is how we dispatch correct deserialization of bytes\n pt_msg.magicHeader = serde_magic_header(type(self))\n\n # We always have FPT as the child of an PT in the tensor chain.\n chunk_bytes(serialize(self.child, to_bytes=True), \"child\", pt_msg) # type: ignore\n\n pt_msg.minVals = serialize(self.min_vals, to_bytes=True)\n pt_msg.maxVals = serialize(self.max_vals, to_bytes=True)\n pt_msg.dataSubjects = serialize(\n dslarraytonumpyutf8(self.data_subjects), to_bytes=True\n )\n pt_msg.dataSubjectsShape = serialize(self.data_subjects.shape, to_bytes=True)\n # to pack or not to pack?\n # to_bytes = pt_msg.to_bytes()\n\n return pt_msg.to_bytes_packed()\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 200, "n_words": 83, "vocab_size": 63, "complexity": 1, "nloc": 13, "token_counts": 124, "n_ast_nodes": 202, "n_identifiers": 28, "random_cut": "def _object2bytes(self) -> bytes:\n schema = get_capnp_schema(schema_file=\"phi_tensor.capnp\")\n\n pt_struct: CapnpModule = schema.PT # type: ignore\n pt_msg = pt_struct.new_message()\n # this is how we dispatch correct deserialization of bytes\n pt_msg.magicHeader = serde_magic_header(type(self))\n\n # We always have FPT as the child of an PT in the tensor chain.\n chunk_bytes(serialize(self.child, to_bytes=True), \"child\", pt_msg) # type: ignore\n\n pt_msg.minVals = serialize(self.min_vals, to_bytes=True)\n pt_msg.maxVals = serialize(self.max_vals, to_bytes=True)\n pt_msg.dataSubjects = serialize(\n dslarraytonumpyutf8(self.da" }, { "id": 298501, "commit_id": "121d2008c2e98c94775f0379ccd4eedc15476d7d", "repo": "core", "path": "homeassistant/components/tplink/light.py", "file_name": "light.py", "fun_name": "supported_color_modes", "commit_message": "Use ColorMode enum in tplink (#70542)", "code": "def supported_color_modes(self) -> set[ColorMode | str] | None:\n \n modes: set[ColorMode | str] = set()\n if self.device.is_variable_color_temp:\n modes.add(ColorMode.COLOR_TEMP)\n if self.device.is_color:\n modes.add(ColorMode.HS)\n if self.device.is_dimmable:\n modes.add(ColorMode.BRIGHTNESS)\n\n if not modes:\n modes.add(ColorMode.ONOFF)\n\n return modes\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 122, "n_words": 29, "vocab_size": 21, "complexity": 5, "nloc": 12, "token_counts": 86, "n_ast_nodes": 140, "n_identifiers": 15, "random_cut": "def supported_color_modes(self) -> set[ColorMode | str] | None:\n \n modes: set[ColorMode | str] = set()\n if self.device.is_variable_color_temp:\n modes.add(ColorMode.COLOR_TEMP)\n if self.device.is_color:\n m" }, { "id": 105154, "commit_id": "ab7d3045ac9154e9c1c2602d0869130defdc6dc7", "repo": "datasets", "path": "tests/test_patching.py", "file_name": "test_patching.py", "fun_name": "test_patch_submodule_missing_builtin", "commit_message": "Support DataLoader with num_workers > 0 in streaming mode (#4375)\n\n* make TorchIterableDataset work in parallel\r\n- make it picklable\r\n- paralellize over the shards when num_workers is passed\r\n\r\n* start writing some tests\r\n\r\n* fix streaming extension and fsspec issues in subprocesses\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix import\r\n\r\n* fix and add tests\r\n\r\n* fix patch (handle successive patches and builtins)\r\n\r\n* revert unnecessary change to enriched_web_blg\r\n\r\n* style\r\n\r\n* use open locally to fix win permission errors\r\n\r\n* keep file opened in read_csv\r\n\r\n* fix compression for read_csv\r\n\r\n* consistency of read_csv: don't infer compression for file-like objects\r\n\r\n* stringify Path objects\r\n\r\n* comments + raise error if sharding is ambiguous\r\n\r\n* minor\r\n\r\n* Update src/datasets/iterable_dataset.py\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\nCo-authored-by: Mario Šaško ", "code": "def test_patch_submodule_missing_builtin():\n # builtin should always be mocked even if they're not in the globals\n # in case they're loaded at one point\n mock = \"__test_patch_submodule_missing_builtin_mock__\"\n # _test_patching doesn't have \"len\" in its globals\n assert getattr(_test_patching, \"len\", None) is None\n with patch_submodule(_test_patching, \"len\", mock):\n assert _test_patching.len is mock\n assert _test_patching.len is len\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 79, "n_words": 52, "vocab_size": 39, "complexity": 1, "nloc": 6, "token_counts": 40, "n_ast_nodes": 71, "n_identifiers": 6, "random_cut": "def test_patch_submodule_missing_builtin():\n # builtin should always be mocked even if they're not in the globals\n # in case they're loaded at one point\n mock = \"__test" }, { "id": 283567, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/stocks/fundamental_analysis/test_market_watch_view.py", "file_name": "test_market_watch_view.py", "fun_name": "test_call_func_no_parser", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def test_call_func_no_parser(func, mocker):\n mocker.patch(\n \"openbb_terminal.stocks.fundamental_analysis.market_watch_view.parse_known_args_and_warn\",\n return_value=None,\n )\n\n func_result = getattr(market_watch_view, func)(other_args=list(), ticker=\"TSLA\")\n assert func_result is None\n getattr(market_watch_view, \"parse_known_args_and_warn\").assert_called_once()\n\n\n@pytest.mark.vcr\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n \"func\",\n [\n \"income\",\n \"balance\",\n \"cash\",\n ],\n)\n@pytest.mark.parametrize(\n \"use_color\",\n [True, False],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n \"func\",\n [\n \"income\",\n \"balance\",\n \"cash\",\n ],\n)\n@pytest.mark.parametrize(\n \"use_color\",\n [True, False],\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 91, "n_words": 33, "vocab_size": 28, "complexity": 1, "nloc": 8, "token_counts": 51, "n_ast_nodes": 160, "n_identifiers": 17, "random_cut": "def test_call_func_no_parser(func, mocker):\n mocker.patch(\n \"openbb_terminal.stocks.fundamental_analysis.market_watch_view.parse_known_args_and_warn\",\n return_value=None,\n )\n\n func_result = getattr(market_watch_view, func)(other_args=list(), ticker=\"TSLA\")\n assert func_result is None\n getattr(market_watch_view, \"parse_known_args_and_warn\").assert_called_once()\n\n\n@pytest.mark.vcr\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n " }, { "id": 71347, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/api/test_pages.py", "file_name": "test_pages.py", "fun_name": "test_all_nested_fields", "commit_message": "Reformat with black", "code": "def test_all_nested_fields(self):\n response = self.get_response(\n type=\"demosite.BlogEntryPage\", fields=\"feed_image(*)\"\n )\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n for page in content[\"items\"]:\n self.assertEqual(\n set(page[\"feed_image\"].keys()),\n {\"id\", \"meta\", \"title\", \"width\", \"height\", \"thumbnail\"},\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 114, "n_words": 24, "vocab_size": 22, "complexity": 2, "nloc": 10, "token_counts": 73, "n_ast_nodes": 129, "n_identifiers": 14, "random_cut": "def test_all_nested_fields(self):\n response = self.get_response(\n type=\"demosite.BlogEntryPage\", fields=\"feed_image(*)\"\n )\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n for page in content[\"items\"]:\n self.assertEqual(\n set(page[\"feed_image\"].keys()),\n " }, { "id": 95790, "commit_id": "7fbf708470ba13992a5d53b088be2388a8ed93df", "repo": "sentry", "path": "tests/snuba/sessions/test_sessions_v2.py", "file_name": "test_sessions_v2.py", "fun_name": "test_massage_simple_timeseries", "commit_message": "fix(sessions): Order results by timestamp and log error if snuba limit exceeded (#31214)\n\nAs described in https://getsentry.atlassian.net/browse/ISSUE-1372, gaps\r\noccur in sessions_v2 time series when the number of releases is large.\r\nThis seems to be caused by the fact that snuba applies a default limit\r\nof 1000. The sessions API queries these series without an orderBy\r\nconstraint, so a random subset of entries default to zero.\r\n\r\nThis PR logs an error if this limit is actually reached. Furthermore, we\r\nadd an order by clause to the snuba query, such that at least the most\r\nrecent part of the time series is complete.", "code": "def test_massage_simple_timeseries():\n \n\n query = _make_query(\"statsPeriod=1d&interval=6h&field=sum(session)\")\n result_totals = [{\"sessions\": 4}]\n # snuba returns the datetimes as strings for now\n result_timeseries = [\n {\"sessions\": 2, \"bucketed_started\": \"2020-12-18T06:00:00+00:00\"},\n {\"sessions\": 2, \"bucketed_started\": \"2020-12-17T12:00:00+00:00\"},\n ]\n\n expected_result = {\n \"start\": \"2020-12-17T12:00:00Z\",\n \"end\": \"2020-12-18T11:15:00Z\",\n \"query\": \"\",\n \"intervals\": [\n \"2020-12-17T12:00:00Z\",\n \"2020-12-17T18:00:00Z\",\n \"2020-12-18T00:00:00Z\",\n \"2020-12-18T06:00:00Z\",\n ],\n \"groups\": [\n {\"by\": {}, \"series\": {\"sum(session)\": [2, 0, 0, 2]}, \"totals\": {\"sum(session)\": 4}}\n ],\n }\n\n actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))\n\n assert actual_result == expected_result\n\n\n@freeze_time(\"2020-12-18T11:14:17.105Z\")", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@freeze_time(\"2020-12-18T11:14:17.105Z\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 218, "n_words": 71, "vocab_size": 56, "complexity": 1, "nloc": 23, "token_counts": 125, "n_ast_nodes": 240, "n_identifiers": 10, "random_cut": "def test_massage_simple_timeseries():\n \n\n query = _make_query(\"statsPeriod=1d&interval=6h&field=sum(session)\")\n result_totals = [{\"sessions\": 4}]\n # snuba returns the datetimes as strings for now\n result_timeseries = [\n {\"sessions\": 2, \"bucketed_started\": \"2020-12-18T06:00:00+00:00\"},\n {\"sessions\": 2, \"bucketed_started\": \"2020-12-17T12:00:00+00:00\"},\n ]\n\n expected_result = {\n \"start\": \"2020-12-17T12:00:00Z\",\n \"end\": \"2020-12-18T11:15:00Z\",\n \"query\": \"\",\n \"intervals\": [\n \"2020-12-17T12:00:00Z\",\n \"2020-12-1" }, { "id": 260984, "commit_id": "60cc5b596f38d0d236dab34e02c05d98b5a72bad", "repo": "scikit-learn", "path": "sklearn/metrics/tests/test_pairwise_distances_reduction.py", "file_name": "test_pairwise_distances_reduction.py", "fun_name": "test_pairwise_distances_reduction_is_usable_for", "commit_message": "FEA Fused sparse-dense support for `PairwiseDistancesReduction` (#23585)\n\n\r\n\r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Christian Lorentzen \r\nCo-authored-by: Jérémie du Boisberranger \r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Meekail Zain ", "code": "def test_pairwise_distances_reduction_is_usable_for():\n rng = np.random.RandomState(0)\n X = rng.rand(100, 10)\n Y = rng.rand(100, 10)\n X_csr = csr_matrix(X)\n Y_csr = csr_matrix(Y)\n metric = \"manhattan\"\n\n # Must be usable for all possible pair of {dense, sparse} datasets\n assert BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric)\n assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y_csr, metric)\n assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y, metric)\n assert BaseDistanceReductionDispatcher.is_usable_for(X, Y_csr, metric)\n\n assert BaseDistanceReductionDispatcher.is_usable_for(\n X.astype(np.float64), Y.astype(np.float64), metric\n )\n\n assert BaseDistanceReductionDispatcher.is_usable_for(\n X.astype(np.float32), Y.astype(np.float32), metric\n )\n\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n X.astype(np.int64), Y.astype(np.int64), metric\n )\n\n assert not BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric=\"pyfunc\")\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n X.astype(np.float32), Y, metric\n )\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n X, Y.astype(np.int32), metric\n )\n\n # F-ordered arrays are not supported\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n np.asfortranarray(X), Y, metric\n )\n\n # We prefer not to use those implementations for fused sparse-dense when\n # metric=\"(sq)euclidean\" because it's not yet the most efficient one on\n # all configurations of datasets.\n # See: https://github.com/scikit-learn/scikit-learn/pull/23585#issuecomment-1247996669 # noqa\n # TODO: implement specialisation for (sq)euclidean on fused sparse-dense\n # using sparse-dense routines for matrix-vector multiplications.\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n X_csr, Y, metric=\"euclidean\"\n )\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n X_csr, Y_csr, metric=\"sqeuclidean\"\n )\n\n # CSR matrices without non-zeros elements aren't currently supported\n # TODO: support CSR matrices without non-zeros elements\n X_csr_0_nnz = csr_matrix(X * 0)\n assert not BaseDistanceReductionDispatcher.is_usable_for(X_csr_0_nnz, Y, metric)\n\n # CSR matrices with int64 indices and indptr (e.g. large nnz, or large n_features)\n # aren't supported as of now.\n # See: https://github.com/scikit-learn/scikit-learn/issues/23653\n # TODO: support CSR matrices with int64 indices and indptr\n X_csr_int64 = csr_matrix(X)\n X_csr_int64.indices = X_csr_int64.indices.astype(np.int64)\n assert not BaseDistanceReductionDispatcher.is_usable_for(X_csr_int64, Y, metric)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 429, "n_words": 235, "vocab_size": 114, "complexity": 1, "nloc": 41, "token_counts": 318, "n_ast_nodes": 494, "n_identifiers": 23, "random_cut": "def test_pairwise_distances_reduction_is_usable_for():\n rng = np.random.RandomState(0)\n X = rng.rand(100, 10)\n Y = rng.rand(100, 10)\n X_csr = csr_matrix(X)\n Y_csr = csr_matrix(Y)\n metric = \"manhattan\"\n\n # Must be usable for all possible pair of {dense, sparse} datasets\n assert BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric)\n assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y_csr, metric)\n assert BaseDistanceReductionDispatcher.is_usable_for(X_csr, Y, metric)\n assert BaseDistanceReductionDispatcher.is_usable_for(X, Y_csr, metric)\n\n assert BaseDistanceReductionDispatcher.is_usable_for(\n X.astype(np.float64), Y.astype(np.float64), metric\n )\n\n assert BaseDistanceReductionDispatcher.is_usable_for(\n X.astype(np.float32), Y.astype(np.float32), metric\n )\n\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n X.astype(np.int64), Y.astype(np.int64), metric\n )\n\n assert not BaseDistanceReductionDispatcher.is_usable_for(X, Y, metric=\"pyfunc\")\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n X.astype(np.float32), Y, metric\n )\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n X, Y.astype(np.int32), metric\n )\n\n # F-ordered arrays are not supported\n assert not BaseDistanceReductionDispatcher.is_usable_for(\n np.asfortranarray(X), Y, metric\n )\n\n # We prefer not to use those implementations for fused sparse-dense when\n # metric=\"(sq)euclidean\" because it's not yet the most efficient o" }, { "id": 46738, "commit_id": "c758c76ac336c054fd17d4b878378aa893b7a979", "repo": "airflow", "path": "airflow/providers/arangodb/hooks/arangodb.py", "file_name": "arangodb.py", "fun_name": "create_database", "commit_message": "Adding ArangoDB Provider (#22548)\n\n* Adding ArangoDB Provider", "code": "def create_database(self, name):\n if not self.db_conn.has_database(name):\n self.db_conn.create_database(name)\n return True\n else:\n self.log.info('Database already exists: %s', name)\n return False\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 7, "token_counts": 42, "n_ast_nodes": 69, "n_identifiers": 7, "random_cut": "def create_database(self, name):\n if not self.db_conn.has_database(name):\n self.db_conn.create_database(name)\n " }, { "id": 64563, "commit_id": "f57725f8fa016b9826e8fdf2f14dbf1a3d9991f7", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/bom_update_tool/bom_update_tool.py", "file_name": "bom_update_tool.py", "fun_name": "replace_bom", "commit_message": "refactor: Add exception handling in background job within BOM Update Tool", "code": "def replace_bom(args):\n\ttry:\n\t\tfrappe.db.auto_commit_on_many_writes = 1\n\t\targs = frappe._dict(args)\n\t\tdoc = frappe.get_doc(\"BOM Update Tool\")\n\t\tdoc.current_bom = args.current_bom\n\t\tdoc.new_bom = args.new_bom\n\t\tdoc.replace_bom()\n\texcept Exception:\n\t\tfrappe.log_error(\n\t\t\tmsg=frappe.get_traceback(),\n\t\t\ttitle=_(\"BOM Update Tool Error\")\n\t\t)\n\tfinally:\n\t\tfrappe.db.auto_commit_on_many_writes = 0\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 19, "n_words": 34, "vocab_size": 27, "complexity": 3, "nloc": 15, "token_counts": 80, "n_ast_nodes": 135, "n_identifiers": 16, "random_cut": "def replace_bom(args):\n\ttry:\n\t\tfrappe.db.auto_commit_on_many_writes = 1\n\t\targs = frappe._dict(args)\n\t\tdoc = frappe.get_doc(\"BOM Update Tool\")\n\t\tdoc.current_bom " }, { "id": 244274, "commit_id": "151a803ed0119560f59dbe7b73824dbdcae08fc6", "repo": "mmdetection", "path": "mmdet/models/dense_heads/ddod_head.py", "file_name": "ddod_head.py", "fun_name": "forward_single", "commit_message": "[Feature] Support DDOD: Disentangle Your Dense Object Detector(ACM MM2021 oral) (#7279)\n\n* add ddod feature\r\n\r\n* add ddod feature\r\n\r\n* modify new\r\n\r\n* [Feature] modify ddod code0225\r\n\r\n* [Feature] modify ddod code0226\r\n\r\n* [Feature] modify ddod code0228\r\n\r\n* [Feature] modify ddod code0228#7279\r\n\r\n* [Feature] modify ddod code0301\r\n\r\n* [Feature] modify ddod code0301 test draft\r\n\r\n* [Feature] modify ddod code0301 test\r\n\r\n* [Feature] modify ddod code0301 extra\r\n\r\n* [Feature] modify ddod code0301 delete src/mmtrack\r\n\r\n* [Feature] modify ddod code0302\r\n\r\n* [Feature] modify ddod code0302(2)\r\n\r\n* [Feature] modify ddod code0303\r\n\r\n* [Feature] modify ddod code0303(2)\r\n\r\n* [Feature] modify ddod code0303(3)\r\n\r\n* [Feature] modify ddod code0305\r\n\r\n* [Feature] modify ddod code0305(2) delete diou\r\n\r\n* [Feature] modify ddod code0305(3)\r\n\r\n* modify ddod code0306\r\n\r\n* [Feature] modify ddod code0307\r\n\r\n* [Feature] modify ddod code0311\r\n\r\n* [Feature] modify ddod code0311(2)\r\n\r\n* [Feature] modify ddod code0313\r\n\r\n* update\r\n\r\n* [Feature] modify ddod code0319\r\n\r\n* fix\r\n\r\n* fix lint\r\n\r\n* [Feature] modify ddod code0321\r\n\r\n* update readme\r\n\r\n* [0502] compute common vars at once for get_target\r\n\r\n* [0504] update ddod conflicts\r\n\r\n* [0518] seperate reg and cls loss and get_target compute\r\n\r\n* [0518] merge ATSSCostAssigner to ATSSAssigner\r\n\r\n* [0518] refine ATSSAssigner\r\n\r\n* [0518] refine ATSSAssigner 2\r\n\r\n* [0518] refine ATSSAssigner 2\r\n\r\n* [0518] refine ATSSAssigner 3\r\n\r\n* [0519] fix bugs\r\n\r\n* update\r\n\r\n* fix lr\r\n\r\n* update weight\r\n\r\nCo-authored-by: hha <1286304229@qq.com>", "code": "def forward_single(self, x, scale):\n \n cls_feat = x\n reg_feat = x\n for cls_conv in self.cls_convs:\n cls_feat = cls_conv(cls_feat)\n for reg_conv in self.reg_convs:\n reg_feat = reg_conv(reg_feat)\n cls_score = self.atss_cls(cls_feat)\n # we just follow atss, not apply exp in bbox_pred\n bbox_pred = scale(self.atss_reg(reg_feat)).float()\n iou_pred = self.atss_iou(reg_feat)\n return cls_score, bbox_pred, iou_pred\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 139, "n_words": 47, "vocab_size": 33, "complexity": 3, "nloc": 11, "token_counts": 79, "n_ast_nodes": 130, "n_identifiers": 17, "random_cut": "def forward_single(self, x, scale):\n \n cls_feat = x\n reg_feat = x\n for cls_conv in self.cls_convs:\n cls_feat = cls_conv(cls_feat)\n for reg_conv in self.reg_convs:\n reg_feat = reg_conv(reg_feat)\n cls_score = self.atss_cls(cls_feat)\n # we just follow atss, not apply exp in bbox_pred\n bbox_pred = scale(self.atss_reg(reg_feat)).float()\n iou_pred = self.atss_iou(reg_f" }, { "id": 64440, "commit_id": "312db429e4605d6d0ce47d1034662fdf0ec053b7", "repo": "erpnext", "path": "erpnext/patches/v14_0/update_batch_valuation_flag.py", "file_name": "update_batch_valuation_flag.py", "fun_name": "execute", "commit_message": "refactor: use qb for patching flag", "code": "def execute():\n\t\n\n\tbatch = frappe.qb.DocType(\"Batch\")\n\tfrappe.qb.update(batch).set(batch.use_batchwise_valuation, 0).run()\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 4, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 63, "n_identifiers": 9, "random_cut": "def execute():\n\t\n\n\tbatch = frappe.qb.D" }, { "id": 264062, "commit_id": "2b2559af1c7790596e7b2040f48e56baef608f9d", "repo": "pyinstaller", "path": "PyInstaller/utils/hooks/tcl_tk.py", "file_name": "tcl_tk.py", "fun_name": "_get_tcl_tk_info", "commit_message": "hookutils: tcl/tk: port to PyInstaller.isolated framework", "code": "def _get_tcl_tk_info():\n \n try:\n import tkinter\n from _tkinter import TCL_VERSION, TK_VERSION\n except ImportError:\n # tkinter unavailable\n return None, None, None, False\n\n tcl = tkinter.Tcl()\n\n # Query the location of Tcl library/data directory.\n tcl_dir = tcl.eval(\"info library\")\n\n # Check if Tcl/Tk is built with multi-threaded support (built with --enable-threads), as indicated by the presence\n # of optional `threaded` member in `tcl_platform` array.\n try:\n tcl.getvar(\"tcl_platform(threaded)\") # Ignore the actual value.\n tcl_threaded = True\n except tkinter.TclError:\n tcl_threaded = False\n\n return tcl_dir, TCL_VERSION, TK_VERSION, tcl_threaded\n\n\n# Populate the variables. If `tkinter` is unavailable, the values are set to `None` or `False`.\n(\n tcl_dir,\n tcl_version,\n tk_version,\n tcl_threaded,\n) = _get_tcl_tk_info()\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 196, "n_words": 104, "vocab_size": 76, "complexity": 3, "nloc": 14, "token_counts": 68, "n_ast_nodes": 141, "n_identifiers": 15, "random_cut": "def _get_tcl_tk_info():\n \n try:\n import tkinter\n from _tkinter import TCL_VERSION, TK_VERSION\n except ImportError:\n # tkinter unavailable\n return None, None, None, False\n\n tcl = tkinter.Tcl()\n\n # Query the location of Tcl library/data directory.\n t" }, { "id": 92287, "commit_id": "5cf12753665512f60b32a99dd8fd9aa27d0a4a3a", "repo": "sentry", "path": "tests/sentry/utils/locking/backends/test_redis.py", "file_name": "test_redis.py", "fun_name": "test_cluster_as_str", "commit_message": "ref(locks): Make the post_process locks backend configurable (#36328)", "code": "def test_cluster_as_str(self):\n assert RedisLockBackend(cluster=\"default\").cluster == self.cluster\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 31, "n_identifiers": 4, "random_cut": "def test_cluster_as_str(self):\n assert RedisLockBackend(cluster=\"defau" }, { "id": 104781, "commit_id": "23efe55f5547c640f9efdcb2bc678fb7b76e663e", "repo": "datasets", "path": "datasets/crd3/crd3.py", "file_name": "crd3.py", "fun_name": "_info", "commit_message": "Fix yield for crd3 (#4240)\n\n* yielding example per chunk id\r\n\r\n* changing data type for turns\r\n\r\n* removing unused variable\r\n\r\n* Update crd3.py\r\n\r\nCo-authored-by: Shanya Sharma - s0s0cr3 ", "code": "def _info(self):\n return datasets.DatasetInfo(\n description=_DESCRIPTION,\n features=datasets.Features(\n {\n \"chunk\": datasets.Value(\"string\"),\n \"chunk_id\": datasets.Value(\"int32\"),\n \"turn_start\": datasets.Value(\"int32\"),\n \"turn_end\": datasets.Value(\"int32\"),\n \"alignment_score\": datasets.Value(\"float32\"),\n \"turns\": [\n {\n \"names\": datasets.features.Sequence(datasets.Value(\"string\")),\n \"utterances\": datasets.features.Sequence(datasets.Value(\"string\")),\n \"number\": datasets.Value(\"int32\"),\n }\n ],\n }\n ),\n homepage=\"https://github.com/RevanthRameshkumar/CRD3\",\n citation=_CITATION,\n )\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 391, "n_words": 33, "vocab_size": 27, "complexity": 1, "nloc": 22, "token_counts": 126, "n_ast_nodes": 221, "n_identifiers": 13, "random_cut": "def _info(self):\n return datasets.DatasetInfo(\n description=_DESCRIPTION,\n features=datasets.Features(\n {\n \"chunk\": datasets.Value(\"string\"),\n \"chunk_id\": datasets.Value(\"int32\"),\n \"turn_start\": datasets.Value(\"int32\"),\n \"turn_end\": datasets.Value(\"int32\"),\n \"alignment_score\": datasets.Value(\"float32\"),\n \"turns\": [\n {\n \"names\": datasets.features.Sequence(datas" }, { "id": 262872, "commit_id": "1a7d704ffbabb433007e3ba04750c2f13ade48e5", "repo": "pyinstaller", "path": "tests/unit/test_pyimodulegraph.py", "file_name": "test_pyimodulegraph.py", "fun_name": "test_graph_collects_script_dependencies", "commit_message": "Fix typos (#6782) [skip ci]", "code": "def test_graph_collects_script_dependencies(fresh_pyi_modgraph, tmpdir):\n mg = fresh_pyi_modgraph\n # self-test 1: uuid is not included in the graph by default\n src1 = gen_sourcefile(tmpdir, , test_id=\"1\")\n node = mg.add_script(str(src1))\n assert node is not None\n assert not mg.find_node(\"uuid\") # self-test\n\n # Add script importing uuid\n src2 = gen_sourcefile(tmpdir, , test_id=\"2\")\n mg.add_script(str(src2))\n assert mg.find_node(\"uuid\") # self-test\n\n # The actual test: uuid is (indirectly) linked to the first script\n names = [n.identifier for n in mg.iter_graph(start=node)]\n assert str(src2) in names\n assert \"uuid\" in names\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 121, "n_words": 78, "vocab_size": 47, "complexity": 2, "nloc": 12, "token_counts": 103, "n_ast_nodes": 177, "n_identifiers": 17, "random_cut": "def test_graph_collects_script_dependencies(fresh_pyi_modgraph, tmpdir):\n mg = fresh_pyi_modgraph\n # self-test 1: uuid is not included in the graph by default\n src1 = gen_sourcefile(tmpdir, , test_id=\"1\")\n node = mg.add_script(str(src1))\n assert node is not None\n assert not mg.find_node(\"uu" }, { "id": 162995, "commit_id": "3dfed3fcd552dcbf4daf7f78c82a87638f896512", "repo": "pandas", "path": "pandas/tests/io/test_sql.py", "file_name": "test_sql.py", "fun_name": "test_nan_string", "commit_message": "ENH: to_sql returns rowcount (#45137)", "code": "def test_nan_string(self):\n # NaNs in string column\n df = DataFrame({\"A\": [0, 1, 2], \"B\": [\"a\", \"b\", np.nan]})\n assert df.to_sql(\"test_nan\", self.conn, index=False) == 3\n\n # NaNs are coming back as None\n df.loc[2, \"B\"] = None\n\n # with read_table\n result = sql.read_sql_table(\"test_nan\", self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql\n result = sql.read_sql_query(\"SELECT * FROM test_nan\", self.conn)\n tm.assert_frame_equal(result, df)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 131, "n_words": 55, "vocab_size": 42, "complexity": 1, "nloc": 8, "token_counts": 100, "n_ast_nodes": 165, "n_identifiers": 16, "random_cut": "def test_nan_string(self):\n # NaNs in string c" }, { "id": 56574, "commit_id": "1e29ed45c704bb4b652e15134e95bcbdb77e73a5", "repo": "prefect", "path": "src/prefect/blocks/core.py", "file_name": "core.py", "fun_name": "get_block_capabilities", "commit_message": "Adds capability filtering to block schema filter route", "code": "def get_block_capabilities(cls):\n base_block_capabilities = [\n getattr(base, \"_block_schema_capabilities\", []) or []\n for base in cls.__bases__\n ]\n\n return list(\n {c for capabilities in base_block_capabilities for c in capabilities}\n )\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 87, "n_words": 27, "vocab_size": 22, "complexity": 5, "nloc": 8, "token_counts": 42, "n_ast_nodes": 64, "n_identifiers": 9, "random_cut": "def get_block_capabilities(cls):\n base_block_capabilities = [\n " }, { "id": 172325, "commit_id": "35a7f807ac9f02128333c1b5df0f03c897d13445", "repo": "pandas", "path": "pandas/tests/arithmetic/test_period.py", "file_name": "test_period.py", "fun_name": "test_parr_add_sub_object_array", "commit_message": "API: dont do type inference on arithmetic results (#49714)\n\n* API: dont do type inference on arithmetic results\r\n\r\n* mypy fixup\r\n\r\n* use concat_compat\r\n\r\n* dont infer in TimedeltaArray\r\n\r\n* update addsub\r\n\r\n* avoid messing with box_expected", "code": "def test_parr_add_sub_object_array(self):\n pi = period_range(\"2000-12-31\", periods=3, freq=\"D\")\n parr = pi.array\n\n other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3])\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = parr + other\n\n expected = PeriodIndex(\n [\"2001-01-01\", \"2001-01-03\", \"2001-01-05\"], freq=\"D\"\n )._data.astype(object)\n tm.assert_equal(result, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = parr - other\n\n expected = PeriodIndex([\"2000-12-30\"] * 3, freq=\"D\")._data.astype(object)\n tm.assert_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 149, "n_words": 47, "vocab_size": 31, "complexity": 1, "nloc": 14, "token_counts": 136, "n_ast_nodes": 227, "n_identifiers": 25, "random_cut": "def test_parr_add_sub_object_array(self):\n pi = period_range(\"2000-12-31\", periods=3, freq=\"D\")\n parr = pi.array\n\n other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3])\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = parr + other\n\n expected = PeriodIndex(\n [\"2001-01-01\", \"2001-01-03\", \"2001-01-05\"], freq=\"D\"\n )._data.astype(object)\n tm.assert_equal(result, expected)\n\n with tm.a" }, { "id": 307870, "commit_id": "ca5a9c945649f7de9cf58a09f41a33f5ba89b037", "repo": "core", "path": "homeassistant/components/openuv/__init__.py", "file_name": "__init__.py", "fun_name": "async_update_state", "commit_message": "Allow multiple instances of OpenUV via the `homeassistant.update_entity` service (#76878)\n\n* Allow for multiple instances of the OpenUV integration\r\n\r\n* Docstring\r\n\r\n* Remove Repairs\r\n\r\n* Fix tests\r\n\r\n* Slightly faster OpenUV object lookup\r\n\r\n* Entity update service\r\n\r\n* Remove service descriptions\r\n\r\n* hassfest\r\n\r\n* Simplify strings\r\n\r\n* Don't add UI instructions to Repairs item\r\n\r\n* Add a throttle to entity update\r\n\r\n* Update homeassistant/components/openuv/__init__.py\r\n\r\nCo-authored-by: Paulus Schoutsen \r\n\r\n* Switch from Throttle to Debouncer(s)\r\n\r\n* Keep dispatcher for services\r\n\r\n* Reduce change surface area\r\n\r\n* Duplicate method\r\n\r\n* Add issue registry through helper\r\n\r\n* Update deprecation version\r\n\r\n* Use config entry selector\r\n\r\n* Remove device/service info\r\n\r\n* Remove commented out method\r\n\r\n* Correct entity IDs and better verbiage\r\n\r\n* Fix tests\r\n\r\n* Handle missing config entry ID in service calls\r\n\r\n* Remove unhelpful comment\r\n\r\n* Remove unused constants\r\n\r\nCo-authored-by: Paulus Schoutsen \r\nCo-authored-by: J. Nick Koston ", "code": "def async_update_state(self) -> None:\n \n self.update_from_latest_data()\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 4, "token_counts": 18, "n_ast_nodes": 34, "n_identifiers": 4, "random_cut": "def async_update_state(self) -> None:\n \n self.update_from_latest_data()\n self.async_write_ha_state()\n" }, { "id": 253148, "commit_id": "6cf2a1202aaa24156b471e6f0a4c1fd58ad57602", "repo": "mitmproxy", "path": "test/mitmproxy/addons/test_next_layer.py", "file_name": "test_next_layer.py", "fun_name": "test_next_layer_reverse_udp_mode", "commit_message": "[quic] full-stack test", "code": "def test_next_layer_reverse_udp_mode(self):\n nl = NextLayer()\n ctx = MagicMock()\n ctx.client.alpn = None\n ctx.server.address = (\"example.com\", 443)\n ctx.client.transport_protocol = \"udp\"\n ctx.client.proxy_mode.scheme = \"udp\"\n ctx.layers = [layers.modes.ReverseProxy(ctx)]\n assert isinstance(nl._next_layer(ctx, b\"\", b\"\"), layers.UDPLayer)\n ctx.layers = [layers.modes.ReverseProxy(ctx)]\n assert isinstance(nl._next_layer(ctx, dtls_client_hello_with_extensions, b\"\"), layers.ClientTLSLayer)\n assert len(ctx.layers) == 2\n assert isinstance(nl._next_layer(ctx, b\"\", b\"\"), layers.UDPLayer)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 129, "n_words": 46, "vocab_size": 27, "complexity": 1, "nloc": 13, "token_counts": 145, "n_ast_nodes": 228, "n_identifiers": 22, "random_cut": "def test_next_layer_reverse_udp_mode(self):\n nl = NextLayer()\n ctx = MagicMock()\n ctx.client.alpn = None\n ctx.server.address = (\"example.com\", 443)\n ctx.cli" }, { "id": 288947, "commit_id": "fc32071562de406c32e75410cd87920f82153856", "repo": "core", "path": "homeassistant/components/alert/__init__.py", "file_name": "__init__.py", "fun_name": "state", "commit_message": "Remove ToggleEntity inheritance from Alert (#80185)", "code": "def state(self) -> str:\n \n if self._firing:\n if self._ack:\n return STATE_OFF\n return STATE_ON\n return STATE_IDLE\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 72, "n_words": 14, "vocab_size": 11, "complexity": 3, "nloc": 7, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 8, "random_cut": "def state(self) -> str:\n \n if self._firing:\n if s" }, { "id": 187578, "commit_id": "4088bcdf6685ddca7f1400767266d0665a727455", "repo": "streamlink", "path": "src/streamlink/plugins/youtube.py", "file_name": "youtube.py", "fun_name": "_schema_videodetails", "commit_message": "plugins: call schema.validate(value)\n\ninstead of validate.validate(schema, value) in various plugins, so that\na proper PluginError gets raised on failure instead of a ValidationError", "code": "def _schema_videodetails(cls, data):\n schema = validate.Schema(\n {\n \"videoDetails\": {\n \"videoId\": str,\n \"author\": str,\n \"title\": str,\n validate.optional(\"isLive\"): validate.transform(bool),\n validate.optional(\"isLiveContent\"): validate.transform(bool),\n validate.optional(\"isLiveDvrEnabled\"): validate.transform(bool),\n validate.optional(\"isLowLatencyLiveStream\"): validate.transform(bool),\n validate.optional(\"isPrivate\"): validate.transform(bool),\n },\n \"microformat\": validate.all(\n validate.any(\n validate.all(\n {\"playerMicroformatRenderer\": dict},\n validate.get(\"playerMicroformatRenderer\")\n ),\n validate.all(\n {\"microformatDataRenderer\": dict},\n validate.get(\"microformatDataRenderer\")\n )\n ),\n {\n \"category\": str\n }\n )\n },\n validate.union_get(\n (\"videoDetails\", \"videoId\"),\n (\"videoDetails\", \"author\"),\n (\"microformat\", \"category\"),\n (\"videoDetails\", \"title\"),\n (\"videoDetails\", \"isLive\")\n )\n )\n videoDetails = schema.validate(data)\n log.trace(f\"videoDetails = {videoDetails!r}\")\n return videoDetails\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 731, "n_words": 67, "vocab_size": 45, "complexity": 1, "nloc": 40, "token_counts": 208, "n_ast_nodes": 357, "n_identifiers": 18, "random_cut": "def _schema_videodetails(cls, data):\n schema = validate.Schema(\n {\n \"videoDetails\": {\n \"videoId\": str,\n \"author\": str,\n \"title\": str,\n validate.optional(\"isLive\"): validate.transform(bool),\n validate.optional(\"isLiveContent\"): validate.transform(bool),\n validate.optional(\"isLi" }, { "id": 133883, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/contrib/alpha_zero/core/mcts.py", "file_name": "mcts.py", "fun_name": "compute_action", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def compute_action(self, node):\n for _ in range(self.num_sims):\n leaf = node.select()\n if leaf.done:\n value = leaf.reward\n else:\n child_priors, value = self.model.compute_priors_and_value(leaf.obs)\n if self.add_dirichlet_noise:\n child_priors = (1 - self.dir_epsilon) * child_priors\n child_priors += self.dir_epsilon * np.random.dirichlet(\n [self.dir_noise] * child_priors.size\n )\n\n leaf.expand(child_priors)\n leaf.backup(value)\n\n # Tree policy target (TPT)\n tree_policy = node.child_number_visits / node.number_visits\n tree_policy = tree_policy / np.max(\n tree_policy\n ) # to avoid overflows when computing softmax\n tree_policy = np.power(tree_policy, self.temperature)\n tree_policy = tree_policy / np.sum(tree_policy)\n if self.exploit:\n # if exploit then choose action that has the maximum\n # tree policy probability\n action = np.argmax(tree_policy)\n else:\n # otherwise sample an action according to tree policy probabilities\n action = np.random.choice(np.arange(node.action_space_size), p=tree_policy)\n return tree_policy, action, node.children[action]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 431, "n_words": 111, "vocab_size": 73, "complexity": 5, "nloc": 25, "token_counts": 188, "n_ast_nodes": 300, "n_identifiers": 39, "random_cut": "def compute_action(self, node):\n for _ in range(self.num_sims):\n leaf = node.select()\n if leaf.done:\n value = leaf.reward\n else:\n child_priors, value = self.model.compute_priors_and_value(leaf.obs)\n if self.add_dirichlet_noise:\n child_priors = (1 - self.dir_epsilon) * child_priors\n child_priors += self.dir_epsilon * np.random.dirichlet(\n [self.dir_noise] * child_priors.size\n )\n\n leaf.expand(child_priors)\n leaf.backup(value)\n\n # Tree policy target (TPT)\n " }, { "id": 44750, "commit_id": "0cd3b11f3a5c406fbbd4433d8e44d326086db634", "repo": "airflow", "path": "tests/models/test_baseoperator.py", "file_name": "test_baseoperator.py", "fun_name": "test_expand_mapped_task_instance_skipped_on_zero", "commit_message": "Straighten up MappedOperator hierarchy and typing (#21505)", "code": "def test_expand_mapped_task_instance_skipped_on_zero(dag_maker, session):\n with dag_maker(session=session):\n task1 = BaseOperator(task_id=\"op1\")\n xcomarg = XComArg(task1, \"test_key\")\n mapped = MockOperator.partial(task_id='task_2').map(arg2=xcomarg)\n\n dr = dag_maker.create_dagrun()\n\n session.add(\n TaskMap(dag_id=dr.dag_id, task_id=task1.task_id, run_id=dr.run_id, map_index=-1, length=0, keys=None)\n )\n\n mapped.expand_mapped_task(upstream_ti=dr.get_task_instance(task1.task_id), session=session)\n\n indices = (\n session.query(TaskInstance.map_index, TaskInstance.state)\n .filter_by(task_id=mapped.task_id, dag_id=mapped.dag_id, run_id=dr.run_id)\n .order_by(TaskInstance.map_index)\n .all()\n )\n\n assert indices == [(-1, TaskInstanceState.SKIPPED)]\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 123, "n_words": 44, "vocab_size": 38, "complexity": 1, "nloc": 17, "token_counts": 173, "n_ast_nodes": 270, "n_identifiers": 34, "random_cut": "def test_expand_mapped_task_instance_skipped_on_zero(dag_maker, session):\n with dag_maker(session=session):\n task1 = BaseOperator(task_id=\"op1\")\n xcomarg = XComArg(task1, \"test_key\")\n mapped = MockOperator.partial(task_id='task_2').map(arg2=xcomarg)\n\n dr = dag_maker.create_dagrun()\n\n session.add(\n TaskMap(dag_id=dr.dag_id, task_id=task1.task_id, run_id=dr.run_id, map_index=-1, length=0, keys=None)\n )\n\n mapped.expand_mapped_task(upstream_ti=dr.get_task_instance(task1.task_id), session=session)\n\n indices = (\n session.query(TaskInstance.map_index, T" }, { "id": 151853, "commit_id": "a8c9aa01fb3c11330618f26efa822bfe9394124e", "repo": "freqtrade", "path": "tests/freqai/test_freqai_interface.py", "file_name": "test_freqai_interface.py", "fun_name": "test_extract_data_and_train_model_Standard", "commit_message": "Add 3ac test", "code": "def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, dbscan, float32):\n if is_arm() and model == 'CatboostRegressor':\n pytest.skip(\"CatBoost is not supported on ARM\")\n\n if is_mac() and 'Reinforcement' in model:\n pytest.skip(\"Reinforcement learning module not available on intel based Mac OS\")\n\n model_save_ext = 'joblib'\n freqai_conf.update({\"freqaimodel\": model})\n freqai_conf.update({\"timerange\": \"20180110-20180130\"})\n freqai_conf.update({\"strategy\": \"freqai_test_strat\"})\n freqai_conf['freqai']['feature_parameters'].update({\"principal_component_analysis\": pca})\n freqai_conf['freqai']['feature_parameters'].update({\"use_DBSCAN_to_remove_outliers\": dbscan})\n freqai_conf.update({\"reduce_df_footprint\": float32})\n\n if 'ReinforcementLearner' in model:\n model_save_ext = 'zip'\n freqai_conf = make_rl_config(freqai_conf)\n # test the RL guardrails\n freqai_conf['freqai']['feature_parameters'].update({\"use_SVM_to_remove_outliers\": True})\n freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True})\n\n if 'test_3ac' in model or 'test_4ac' in model:\n freqai_conf[\"freqaimodel_path\"] = str(Path(__file__).parents[1] / \"freqai\" / \"test_models\")\n\n if 'ReinforcementLearner' in model:\n model_save_ext = 'zip'\n freqai_conf = make_rl_config(freqai_conf)\n # test the RL guardrails\n freqai_conf['freqai']['feature_parameters'].update({\"use_SVM_to_remove_outliers\": True})\n freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True})\n\n if 'test_3ac' in model or 'test_4ac' in model:\n freqai_conf[\"freqaimodel_path\"] = str(Path(__file__).parents[1] / \"freqai\" / \"test_models\")\n\n strategy = get_patched_freqai_strategy(mocker, freqai_conf)\n exchange = get_patched_exchange(mocker, freqai_conf)\n strategy.dp = DataProvider(freqai_conf, exchange)\n strategy.freqai_info = freqai_conf.get(\"freqai\", {})\n freqai = strategy.freqai\n freqai.live = True\n freqai.dk = FreqaiDataKitchen(freqai_conf)\n freqai.dk.set_paths('ADA/BTC', 10000)\n timerange = TimeRange.parse_timerange(\"20180110-20180130\")\n freqai.dd.load_all_pair_histories(timerange, freqai.dk)\n\n freqai.dd.pair_dict = MagicMock()\n\n data_load_timerange = TimeRange.parse_timerange(\"20180125-20180130\")\n new_timerange = TimeRange.parse_timerange(\"20180127-20180130\")\n freqai.dk.set_paths('ADA/BTC', None)\n\n freqai.train_timer(\"start\", \"ADA/BTC\")\n freqai.extract_data_and_train_model(\n new_timerange, \"ADA/BTC\", strategy, freqai.dk, data_load_timerange)\n freqai.train_timer(\"stop\", \"ADA/BTC\")\n freqai.dd.save_metric_tracker_to_disk()\n freqai.dd.save_drawer_to_disk()\n\n assert Path(freqai.dk.full_path / \"metric_tracker.json\").is_file()\n assert Path(freqai.dk.full_path / \"pair_dictionary.json\").is_file()\n assert Path(freqai.dk.data_path /\n f\"{freqai.dk.model_filename}_model.{model_save_ext}\").is_file()\n assert Path(freqai.dk.data_path / f\"{freqai.dk.model_filename}_metadata.json\").is_file()\n assert Path(freqai.dk.data_path / f\"{freqai.dk.model_filename}_trained_df.pkl\").is_file()\n\n shutil.rmtree(Path(freqai.dk.full_path))\n\n\n@pytest.mark.parametrize('model, strat', [\n ('LightGBMRegressorMultiTarget', \"freqai_test_multimodel_strat\"),\n ('XGBoostRegressorMultiTarget', \"freqai_test_multimodel_strat\"),\n ('CatboostRegressorMultiTarget', \"freqai_test_multimodel_strat\"),\n ('LightGBMClassifierMultiTarget', \"freqai_test_multimodel_classifier_strat\"),\n ('CatboostClassifierMultiTarget', \"freqai_test_multimodel_classifier_strat\")\n ])", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('model, strat', [\n ('LightGBMRegressorMultiTarget', \"freqai_test_multimodel_strat\"),\n ('XGBoostRegressorMultiTarget', \"freqai_test_multimodel_strat\"),\n ('CatboostRegressorMultiTarget', \"freqai_test_multimodel_strat\"),\n ('LightGBMClassifierMultiTarget', \"freqai_test_multimodel_classifier_strat\"),\n ('CatboostClassifierMultiTarget', \"freqai_test_multimodel_classifier_strat\")\n ])", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 458, "n_words": 208, "vocab_size": 128, "complexity": 11, "nloc": 53, "token_counts": 531, "n_ast_nodes": 1049, "n_identifiers": 52, "random_cut": "def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, dbscan, float32):\n if is" }, { "id": 270931, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer_v1.py", "file_name": "base_layer_v1.py", "fun_name": "_maybe_cast_inputs", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _maybe_cast_inputs(self, inputs):\n \n compute_dtype = self._compute_dtype\n if (\n self._autocast\n and compute_dtype\n and tf.as_dtype(compute_dtype).is_floating\n ):\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 14, "vocab_size": 12, "complexity": 4, "nloc": 11, "token_counts": 48, "n_ast_nodes": 51, "n_identifiers": 9, "random_cut": "def _maybe_cast_inputs(self, inputs):\n \n compute_dtype = self._compute_dtype\n if (\n self._autocast\n and compute_dtype\n and tf.as_dtype(compute_dtype).is_floating\n ):\n" }, { "id": 186660, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/override_centos.py", "file_name": "override_centos.py", "fun_name": "_try_restart_fedora", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def _try_restart_fedora(self) -> None:\n \n\n try:\n util.run_script(['systemctl', 'restart', 'httpd'])\n except errors.SubprocessError as err:\n raise errors.MisconfigurationError(str(err))\n\n # Finish with actual config check to see if systemctl restart helped\n super().config_test()\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 84, "n_words": 27, "vocab_size": 27, "complexity": 2, "nloc": 9, "token_counts": 46, "n_ast_nodes": 85, "n_identifiers": 11, "random_cut": "def _try_restart_fedora(self) -> None:\n \n\n try:\n util.run_script(['systemctl', 'restart', 'httpd'])\n except errors.SubprocessError as err:\n raise errors.MisconfigurationError(str(err))\n\n # Finish with actual config check to see if systemctl restart helped\n " }, { "id": 28593, "commit_id": "34511f97738853af9e7332b89787202ecaa5eb4a", "repo": "saleor", "path": "saleor/graphql/discount/tests/test_bulk_delete.py", "file_name": "test_bulk_delete.py", "fun_name": "test_delete_sales", "commit_message": "fix bulk delete mutation for sales (#10553)", "code": "def test_delete_sales(staff_api_client, sale_list, permission_manage_discounts):\n\n variables = {\n \"ids\": [graphene.Node.to_global_id(\"Sale\", sale.id) for sale in sale_list]\n }\n response = staff_api_client.post_graphql(\n SALE_BULK_DELETE_MUTATION, variables, permissions=[permission_manage_discounts]\n )\n content = get_graphql_content(response)\n\n assert content[\"data\"][\"saleBulkDelete\"][\"count\"] == 3\n assert not Sale.objects.filter(id__in=[sale.id for sale in sale_list]).exists()\n\n\n@mock.patch(\"saleor.plugins.webhook.plugin.get_webhooks_for_event\")\n@mock.patch(\"saleor.plugins.webhook.plugin.trigger_webhooks_async\")", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@mock.patch(\"saleor.plugins.webhook.plugin.get_webhooks_for_event\")\n@mock.patch(\"saleor.plugins.webhook.plugin.trigger_webhooks_async\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 70, "n_words": 38, "vocab_size": 32, "complexity": 3, "nloc": 10, "token_counts": 92, "n_ast_nodes": 173, "n_identifiers": 23, "random_cut": "def test_delete_sales(staff_api_client, sale_list, permission_manage_discounts):\n\n variables = {\n \"ids\": [graphene.Nod" }, { "id": 47196, "commit_id": "6933022e94acf139b2dea9a589bb8b25c62a5d20", "repo": "airflow", "path": "tests/providers/google/cloud/hooks/vertex_ai/test_endpoint_service.py", "file_name": "test_endpoint_service.py", "fun_name": "test_delete_endpoint", "commit_message": "Fix new MyPy errors in main (#22884)\n\nThose MyPe errors are side effect of some new dependencies.", "code": "def test_delete_endpoint(self, mock_client) -> None:\n self.hook.delete_endpoint(\n project_id=TEST_PROJECT_ID,\n region=TEST_REGION,\n endpoint=TEST_ENDPOINT_NAME,\n )\n mock_client.assert_called_once_with(TEST_REGION)\n mock_client.return_value.delete_endpoint.assert_called_once_with(\n request=dict(\n name=mock_client.return_value.endpoint_path.return_value,\n ),\n metadata=(),\n retry=DEFAULT,\n timeout=None,\n )\n mock_client.return_value.endpoint_path.assert_called_once_with(\n TEST_PROJECT_ID,\n TEST_REGION,\n TEST_ENDPOINT_NAME,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 208, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 20, "token_counts": 87, "n_ast_nodes": 127, "n_identifiers": 21, "random_cut": "def test_delete_endpoint(self, mock_client) -> None:\n self.hook.delete_endpoint(\n project_id=TEST_PROJECT_ID,\n region=TEST_REGION,\n endpoint=TEST_ENDPOINT_NAME,\n )\n mock_client.assert_called_once_with(TEST_REGION)\n mock_client.return_value.delete_endpoint.assert_called_once_with(\n request=dict(\n name=mock_client.return_value.endpoint_path.return_value,\n ),\n metadata=(),\n " }, { "id": 106161, "commit_id": "232a43943e87dfedcc328a9a3d3b4d89ea5c6627", "repo": "datasets", "path": "src/datasets/arrow_dataset.py", "file_name": "arrow_dataset.py", "fun_name": "_estimate_nbytes", "commit_message": "Sharded save_to_disk + multiprocessing (#5268)\n\n* add num_shards, num_proc, storage_options to save_to_disk\r\n\r\n* minor\r\n\r\n* add tests\r\n\r\n* remove old s3fs integreation tests\r\n\r\n* style\r\n\r\n* style\r\n\r\n* Update DatasetDict.save_to_disk\r\n\r\n* test dataset dict\r\n\r\n* update dataset dict load_from_disk\r\n\r\n* minor\r\n\r\n* update test\r\n\r\n* update docs\r\n\r\n* backport to_reader to pyarrow < 8\r\n\r\n* typo\r\n\r\n* support both max_shard_size and num_shards\r\n\r\n* style\r\n\r\n* docstrings\r\n\r\n* test _estimate_nbytes\r\n\r\n* add test for num_shards\r\n\r\n* style\r\n\r\n* mario's comment\r\n\r\n* add config.PBAR_REFRESH_TIME_INTERVAL\r\n\r\n* fix docstrings\r\n\r\n* use kwargs_iterable in iflatmap_unordered\r\n\r\n* fix tests", "code": "def _estimate_nbytes(self) -> int:\n dataset_nbytes = self.data.nbytes\n\n # Find decodable columns, because if there are any, we need to\n # adjust the dataset size computation (needed for sharding) to account for possible external files\n decodable_columns = [k for k, v in self.features.items() if require_decoding(v, ignore_decode_attribute=True)]\n\n if decodable_columns:\n # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples\n extra_nbytes = 0\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 125, "n_words": 69, "vocab_size": 53, "complexity": 5, "nloc": 13, "token_counts": 113, "n_ast_nodes": 75, "n_identifiers": 14, "random_cut": "def _estimate_nbytes(self) -> int:\n dataset_nbytes = self.data.nbytes\n\n # Find decodable columns, because if there are any, we need to\n # adjust the dataset size computation (needed for sharding) to account for possible external files\n decodable_columns = [k for k, v in self.features.items() if require_decoding(v, ignore_decode_attribute=True)]\n\n if decodable_columns:\n # Approximate the space needed to store the bytes from the exte" }, { "id": 245538, "commit_id": "d0695e68654ca242be54e655491aef8c959ac345", "repo": "mmdetection", "path": "mmdet/engine/hooks/set_epoch_info_hook.py", "file_name": "set_epoch_info_hook.py", "fun_name": "before_train_epoch", "commit_message": "[Fix] replace mmcv's function and modules imported with mmengine's (#8594)\n\n* use mmengine's load_state_dict and load_checkpoint\r\n\r\n* from mmengine import dump\r\n\r\n* from mmengine import FileClient dump list_from_file\r\n\r\n* remove redundant registry\r\n\r\n* update\r\n\r\n* update\r\n\r\n* update\r\n\r\n* replace _load_checkpoint with CheckpointLoad.load_checkpoint\r\n\r\n* changes according to mmcv #2216\r\n\r\n* changes due to mmengine #447\r\n\r\n* changes due mmengine #447 and mmcv #2217\r\n\r\n* changes due mmengine #447 and mmcv #2217\r\n\r\n* update\r\n\r\n* update\r\n\r\n* update", "code": "def before_train_epoch(self, runner):\n epoch = runner.epoch\n model = runner.model\n if is_model_wrapper(model):\n model = model.module\n model.set_epoch(epoch)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 53, "n_words": 15, "vocab_size": 12, "complexity": 2, "nloc": 6, "token_counts": 34, "n_ast_nodes": 55, "n_identifiers": 8, "random_cut": "def before_train_epoch(self, runner):\n epoch = runner.epoch\n model = runner.model\n if is_mo" }, { "id": 104416, "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "remove_column", "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", "code": "def remove_column(self, i, *args, **kwargs):\n \n table = self.table.remove_column(i, *args, **kwargs)\n name = self.table.column_names[i]\n blocks = []\n for tables in self.blocks:\n blocks.append(\n [\n t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t\n for t in tables\n ]\n )\n return ConcatenationTable(table, blocks)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 172, "n_words": 40, "vocab_size": 29, "complexity": 4, "nloc": 12, "token_counts": 96, "n_ast_nodes": 145, "n_identifiers": 14, "random_cut": "def remove_column(self, i, *args, **kwargs):\n \n table = self.table.remove_column(i, *args, **kwargs)\n name = self.table.column_names[i]\n blocks = []\n for tables in self.blocks:\n blocks.append(\n [\n t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t\n " }, { "id": 63772, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/tenacity/__init__.py", "file_name": "__init__.py", "fun_name": "__call__", "commit_message": "upd; format", "code": "def __call__(self, fn, *args, **kwargs):\n self.begin(fn)\n\n retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n try:\n result = fn(*args, **kwargs)\n except BaseException: # noqa: B902\n retry_state.set_exception(sys.exc_info())\n else:\n retry_state.set_result(result)\n elif isinstance(do, DoSleep):\n retry_state.prepare_for_next_attempt()\n self.sleep(do)\n else:\n return do\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 253, "n_words": 41, "vocab_size": 36, "complexity": 5, "nloc": 17, "token_counts": 116, "n_ast_nodes": 185, "n_identifiers": 22, "random_cut": "def __call__(self, fn, *args, **kwargs):\n self.begin(fn)\n\n retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)\n while True:\n do = self.iter(retry_state=retry_state)\n if isinstance(do, DoAttempt):\n " }, { "id": 75502, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/backends/database/postgres/postgres.py", "file_name": "postgres.py", "fun_name": "add_items", "commit_message": "Reformat with black", "code": "def add_items(self, model, objs):\n search_fields = model.get_search_fields()\n if not search_fields:\n return\n\n indexers = [ObjectIndexer(obj, self.backend) for obj in objs]\n\n # TODO: Delete unindexed objects while dealing with proxy models.\n if indexers:\n content_type_pk = get_content_type_pk(model)\n\n update_method = (\n self.add_items_upsert\n if self._enable_upsert\n else self.add_items_update_then_create\n )\n update_method(content_type_pk, indexers)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 179, "n_words": 45, "vocab_size": 40, "complexity": 5, "nloc": 13, "token_counts": 67, "n_ast_nodes": 104, "n_identifiers": 16, "random_cut": "def add_items(self, model, objs):\n search_fields = model.get_search_fields()\n if not search_fields:\n return\n\n indexers = [ObjectIndexer(obj, self.backend) for obj in objs]\n\n # TODO: Delete unindexed objects while dealing with prox" }, { "id": 60829, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/models/link.py", "file_name": "link.py", "fun_name": "egg_fragment", "commit_message": "upd; format", "code": "def egg_fragment(self):\n # type: () -> Optional[str]\n match = self._egg_fragment_re.search(self._url)\n if not match:\n return None\n return match.group(1)\n\n _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 61, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 65, "n_identifiers": 10, "random_cut": "def egg_fragment(self):\n # type: () -> Optional[str]\n match = self._egg_fragment_re.search(self._url" }, { "id": 247047, "commit_id": "1901cb1d4a8b7d9af64493fbd336e9aa2561c20c", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "test_aggregation_redactions", "commit_message": "Add type hints to `tests/rest/client` (#12084)", "code": "def test_aggregation_redactions(self) -> None:\n \n\n channel = self._send_relation(RelationTypes.ANNOTATION, \"m.reaction\", \"a\")\n self.assertEqual(200, channel.code, channel.json_body)\n to_redact_event_id = channel.json_body[\"event_id\"]\n\n channel = self._send_relation(\n RelationTypes.ANNOTATION, \"m.reaction\", \"a\", access_token=self.user2_token\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n # Now lets redact one of the 'a' reactions\n channel = self.make_request(\n \"POST\",\n \"/_matrix/client/r0/rooms/%s/redact/%s\" % (self.room, to_redact_event_id),\n access_token=self.user_token,\n content={},\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n channel = self.make_request(\n \"GET\",\n \"/_matrix/client/unstable/rooms/%s/aggregations/%s\"\n % (self.room, self.parent_id),\n access_token=self.user_token,\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n self.assertEqual(\n channel.json_body,\n {\"chunk\": [{\"type\": \"m.reaction\", \"key\": \"a\", \"count\": 1}]},\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 305, "n_words": 72, "vocab_size": 46, "complexity": 1, "nloc": 27, "token_counts": 192, "n_ast_nodes": 308, "n_identifiers": 17, "random_cut": "def test_aggregation_redactions(self) -> None:\n \n\n channel = self._send_relation(RelationTypes.ANNOTATION, \"m.reaction\", \"a\")\n self.assertEqual(200, channel.code, channel.json_body)\n to_redact_event_id = channel.json_body[\"event_id\"]\n\n channel = self._send_relation(\n RelationTypes.ANNOTATION, \"m.reaction\", \"a\", access_token=self.user2_token\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n # Now lets redact one of the 'a' reactions\n channel = self.make_request(\n \"POST\",\n \"/_matrix/client/r0/rooms/%s/redact/%s\" % (self.room, to_redact_event_id),\n access_token=self.user_token,\n content={},\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n channel = self.make_request(\n \"GET\",\n \"/_matrix/client/unstable/rooms/%s/aggregations/%s\"\n % (self.room, self.parent_id),\n access_token=self.user_token,\n )\n self.assertEqual(200, channel" }, { "id": 125629, "commit_id": "aaab4abad5f8549cfdadbebf7819c8f046bcdffb", "repo": "ray", "path": "python/ray/data/tests/test_split.py", "file_name": "test_split.py", "fun_name": "_test_equal_split_balanced", "commit_message": "[Data][Split] stable version of split with hints (#26778)\n\nWhy are these changes needed?\r\nIntroduce a stable version of split with hints with a stable equalizing algorithm:\r\n\r\nuse the greedy algorithm to generate the initial unbalanced splits.\r\nfor each splits, first shave them so the number for rows are below the target_size\r\nbased on how many rows needed for each split, do a one time split_at_index to the left over blocks.\r\nmerge the shaved splits with the leftover splits.\r\nThe guarantee of this algorithm is we at most need to split O(split) number of blocks.", "code": "def _test_equal_split_balanced(block_sizes, num_splits):\n blocks = []\n metadata = []\n total_rows = 0\n for block_size in block_sizes:\n block = list(range(total_rows, total_rows + block_size))\n blocks.append(ray.put(block))\n metadata.append(BlockAccessor.for_block(block).get_metadata(None, None))\n total_rows += block_size\n block_list = BlockList(blocks, metadata, owned_by_consumer=True)\n ds = Dataset(\n ExecutionPlan(block_list, DatasetStats.TODO(), run_by_consumer=True),\n 0,\n False,\n )\n\n splits = ds.split(num_splits, equal=True)\n split_counts = [split.count() for split in splits]\n assert len(split_counts) == num_splits\n expected_block_size = total_rows // num_splits\n # Check that all splits are the expected size.\n assert all([count == expected_block_size for count in split_counts])\n expected_total_rows = sum(split_counts)\n # Check that the expected number of rows were dropped.\n assert total_rows - expected_total_rows == total_rows % num_splits\n # Check that all rows are unique (content check).\n split_rows = [row for split in splits for row in split.take(total_rows)]\n assert len(set(split_rows)) == len(split_rows)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 229, "n_words": 124, "vocab_size": 75, "complexity": 6, "nloc": 24, "token_counts": 198, "n_ast_nodes": 307, "n_identifiers": 39, "random_cut": "def _test_equal_split_balanced(block_sizes, num_splits):\n blocks = []\n metadata = []\n total_rows = 0\n for block_size in block_sizes:\n block = list(range(total_rows, total_rows + block_size))\n blocks.append(ray.put(block))\n metadata.append(BlockAcces" }, { "id": 285655, "commit_id": "3d0190e35bae4092f52025377d8604b3a6a17bfa", "repo": "OpenBBTerminal", "path": "openbb_terminal/featflags_controller.py", "file_name": "featflags_controller.py", "fun_name": "call_tbhint", "commit_message": "New path for .env (#2508)\n\n* add log path\r\n\r\n* add test to check if log file is in correct dir\r\n\r\n* env path\r\n\r\n* black\r\n\r\n* mypy fix\r\n\r\n* linting\r\n\r\n* add make_paths and change references\r\n\r\n* terminal change\r\n\r\n* change constants to paths\r\n\r\n* change names\r\n\r\n* black\r\n\r\n* mypy\r\n\r\n* mypy\r\n\r\n* pylint else\r\n\r\n* add make paths\r\n\r\n* remove custom user dir name\r\n\r\nCo-authored-by: Chavithra ", "code": "def call_tbhint(self, _):\n \n if obbff.TOOLBAR_HINT:\n console.print(\"Will take effect when running terminal next.\")\n obbff.TOOLBAR_HINT = not obbff.TOOLBAR_HINT\n set_key(\n obbff.USER_ENV_FILE,\n \"OPENBB_TOOLBAR_HINT\",\n str(obbff.TOOLBAR_HINT),\n )\n console.print(\"\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 108, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 10, "token_counts": 49, "n_ast_nodes": 85, "n_identifiers": 10, "random_cut": "def call_tbhint(self, _):\n \n if obbff.TOOLBAR_HINT:\n console.print(\"Will take effect when running terminal next.\")\n obbff.TOOLBAR_HINT = not obbff.TOOLBAR_HINT\n set_key(\n obbff.USER_ENV_FILE,\n \"OPENBB_TOOLBAR_HINT\",\n str(obbff.TOOLB" }, { "id": 133758, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/maml/maml_tf_policy.py", "file_name": "maml_tf_policy.py", "fun_name": "feed_forward", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def feed_forward(self, obs, policy_vars, policy_config):\n # Hacky for now, reconstruct FC network with adapted weights\n # @mluo: TODO for any network", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 4, "n_whitespaces": 34, "n_words": 21, "vocab_size": 18, "complexity": 5, "nloc": 23, "token_counts": 144, "n_ast_nodes": 17, "n_identifiers": 5, "random_cut": "def feed_forward(self, obs, policy_vars, policy_config):\n # Hacky for now, reconstruct FC network with adapted weights\n # @mluo: TODO for any netwo" }, { "id": 248545, "commit_id": "97053c94060ea31d3b9d41a129221ad4b2a76865", "repo": "synapse", "path": "tests/state/test_v2.py", "file_name": "test_v2.py", "fun_name": "test_ban_vs_pl", "commit_message": "Type annotations for `test_v2` (#12985)", "code": "def test_ban_vs_pl(self) -> None:\n events = [\n FakeEvent(\n id=\"PA\",\n sender=ALICE,\n type=EventTypes.PowerLevels,\n state_key=\"\",\n content={\"users\": {ALICE: 100, BOB: 50}},\n ),\n FakeEvent(\n id=\"MA\",\n sender=ALICE,\n type=EventTypes.Member,\n state_key=ALICE,\n content={\"membership\": Membership.JOIN},\n ),\n FakeEvent(\n id=\"MB\",\n sender=ALICE,\n type=EventTypes.Member,\n state_key=BOB,\n content={\"membership\": Membership.BAN},\n ),\n FakeEvent(\n id=\"PB\",\n sender=BOB,\n type=EventTypes.PowerLevels,\n state_key=\"\",\n content={\"users\": {ALICE: 100, BOB: 50}},\n ),\n ]\n\n edges = [[\"END\", \"MB\", \"MA\", \"PA\", \"START\"], [\"END\", \"PB\", \"PA\"]]\n\n expected_state_ids = [\"PA\", \"MA\", \"MB\"]\n\n self.do_check(events, edges, expected_state_ids)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 486, "n_words": 64, "vocab_size": 44, "complexity": 1, "nloc": 34, "token_counts": 193, "n_ast_nodes": 303, "n_identifiers": 20, "random_cut": "def test_ban_vs_pl(self) -> None:\n events = [\n FakeEvent(\n id=\"PA\",\n sender=ALICE,\n type=EventTypes.PowerLevels,\n state_key" }, { "id": 156908, "commit_id": "81f771e05f57cab2838534c319a9b81f6e5a00cd", "repo": "dask", "path": "dask/tests/test_layers.py", "file_name": "test_layers.py", "fun_name": "test_dataframe_cull_key_dependencies_materialized", "commit_message": "Fix caching-related MaterializedLayer.cull performance regression (#9413)\n\n* allow MaterializedLayer to cache culled_deps\r\n\r\n* format\r\n\r\n* make test more thorough\r\n\r\n* fix import mistake\r\n\r\n* add link to issue in comment\r\n\r\n* improve test", "code": "def test_dataframe_cull_key_dependencies_materialized():\n # Test that caching of MaterializedLayer\n # dependencies during culling doesn't break\n # the result of ``get_all_dependencies``\n\n datasets = pytest.importorskip(\"dask.datasets\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n ddf = datasets.timeseries(end=\"2000-01-15\")\n\n # Build a custom layer to ensure\n # MaterializedLayer is used\n name = \"custom_graph_test\"\n name_0 = \"custom_graph_test_0\"\n dsk = {}\n for i in range(ddf.npartitions):\n dsk[(name_0, i)] = (lambda x: x, (ddf._name, i))\n dsk[(name, i)] = (lambda x: x, (name_0, i))\n dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n result = dd.core.new_dd_object(dsk, name, ddf._meta, ddf.divisions)\n graph = result.dask\n\n # HLG cull\n culled_keys = [k for k in result.__dask_keys__() if k != (name, 0)]\n culled_graph = graph.cull(culled_keys)\n\n # Check that culled_deps are cached\n # See: https://github.com/dask/dask/issues/9389\n cached_deps = culled_graph.key_dependencies.copy()\n deps = culled_graph.get_all_dependencies()\n assert cached_deps == deps\n\n # Manual cull\n deps0 = graph.get_all_dependencies()\n deps0.pop((name, 0))\n deps0.pop((name_0, 0))\n deps0.pop((ddf._name, 0))\n\n # Check that get_all_dependencies results match\n assert deps0 == deps\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 244, "n_words": 141, "vocab_size": 93, "complexity": 4, "nloc": 23, "token_counts": 227, "n_ast_nodes": 365, "n_identifiers": 38, "random_cut": "def test_dataframe_cull_key_dependencies_materialized():\n # Test that caching of MaterializedLayer\n # dependencies during culling doesn't break\n # the result of ``get_all_dependencies``\n\n datasets = pytest.importorskip(\"dask.datasets\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n ddf = datas" }, { "id": 274189, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/tensorflow_op_layer_test.py", "file_name": "tensorflow_op_layer_test.py", "fun_name": "build", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def build(self, input_shape):\n self.bias = self.add_weight(name=\"bias\", dtype=\"float32\")\n self.layer = keras.layers.Dense(10)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 23, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 35, "n_ast_nodes": 58, "n_identifiers": 11, "random_cut": "def build(self, input_shape):\n self.bias = self.add_weight(name=\"bia" }, { "id": 206214, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/context.py", "file_name": "context.py", "fun_name": "update", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def update(self, other_dict):\n \"Push other_dict to the stack of dictionaries in the Context\"\n if not hasattr(other_dict, \"__getitem__\"):\n raise TypeError(\"other_dict must be a mapping (dictionary-like) object.\")\n if isinstance(other_dict, BaseContext):\n other_dict = other_dict.dicts[1:].pop()\n return ContextDict(self, other_dict)\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 83, "n_words": 34, "vocab_size": 31, "complexity": 3, "nloc": 7, "token_counts": 50, "n_ast_nodes": 84, "n_identifiers": 10, "random_cut": "def update(self, other_dict):\n \"Push other_dict to the stack of dictionaries in the Context\"\n if not hasattr(other_dict, \"__getitem__\"):\n raise TypeError(\"other_dict must be a mapping (dictionary-like) object.\")\n if isinstance(other_dict, BaseContext):\n " }, { "id": 161217, "commit_id": "c5d03fb3cbf5105aa45dc131474260cf140b748b", "repo": "MockingBird", "path": "ppg2mel/preprocess.py", "file_name": "preprocess.py", "fun_name": "preprocess_dataset", "commit_message": "Upgrade to new web service (#529)\n\n* Init new GUI\r\n\r\n* Remove unused codes\r\n\r\n* Reset layout\r\n\r\n* Add samples\r\n\r\n* Make framework to support multiple pages\r\n\r\n* Add vc mode\r\n\r\n* Add preprocessing mode\r\n\r\n* Add training mode\r\n\r\n* Remove text input in vc mode\r\n\r\n* Add entry for GUI and revise readme\r\n\r\n* Move requirement together\r\n\r\n* Add error raise when no model folder found\r\n\r\n* Add readme", "code": "def preprocess_dataset(datasets_root, dataset, out_dir, n_processes, ppg_encoder_model_fpath, speaker_encoder_model):\n # Glob wav files\n wav_file_list = sorted(Path(f\"{datasets_root}/{dataset}\").glob(\"**/*.wav\"))\n print(f\"Globbed {len(wav_file_list)} wav files.\")\n\n out_dir.joinpath(\"bnf\").mkdir(exist_ok=True, parents=True)\n out_dir.joinpath(\"f0\").mkdir(exist_ok=True, parents=True)\n out_dir.joinpath(\"embed\").mkdir(exist_ok=True, parents=True)\n ppg_model_local = load_model(ppg_encoder_model_fpath, \"cpu\")\n encoder_model_local = Encoder.load_model(speaker_encoder_model, \"cpu\")\n if n_processes is None:\n n_processes = cpu_count()\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n func = partial(preprocess_one, out_dir=out_dir, ppg_model_local=ppg_model_local, encoder_model_local=encoder_model_local, device=device)\n job = Pool(n_processes).imap(func, wav_file_list)\n list(tqdm(job, \"Preprocessing\", len(wav_file_list), unit=\"wav\"))\n\n # finish processing and mark\n t_fid_file = out_dir.joinpath(\"train_fidlist.txt\").open(\"w\", encoding=\"utf-8\")\n d_fid_file = out_dir.joinpath(\"dev_fidlist.txt\").open(\"w\", encoding=\"utf-8\")\n e_fid_file = out_dir.joinpath(\"eval_fidlist.txt\").open(\"w\", encoding=\"utf-8\")\n for file in sorted(out_dir.joinpath(\"f0\").glob(\"*.npy\")):\n id = os.path.basename(file).split(\".f0.npy\")[0]\n if id.endswith(\"01\"):\n d_fid_file.write(id + \"\\n\")\n elif id.endswith(\"09\"):\n e_fid_file.write(id + \"\\n\")\n else:\n t_fid_file.write(id + \"\\n\")\n t_fid_file.close()\n d_fid_file.close()\n e_fid_file.close()\n return len(wav_file_list)\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 241, "n_words": 104, "vocab_size": 79, "complexity": 6, "nloc": 29, "token_counts": 334, "n_ast_nodes": 593, "n_identifiers": 49, "random_cut": "def preprocess_dataset(datasets_root, dataset, out_dir, n_processes, ppg_encoder_model_fpath, speaker_encoder_model):\n # Glob wav files\n wav_file_list = sorted(Path(f\"{datasets_root}/{dataset}\").glob(\"**" }, { "id": 106530, "commit_id": "d25cf62086443d86a633b8176b5c7e79f4cc569e", "repo": "youtube-dl", "path": "youtube_dl/extractor/neteasemusic.py", "file_name": "neteasemusic.py", "fun_name": "_call_player_api", "commit_message": "[netease] Impove error handling (#31303)\n\n* add warnings for users outside of China\r\n* skip empty song urls\r\n\r\nCo-authored-by: dirkf ", "code": "def _call_player_api(self, song_id, bitrate):\n url = 'https://interface3.music.163.com/eapi/song/enhance/player/url'\n data, headers = self.make_player_api_request_data_and_headers(song_id, bitrate)\n try:\n msg = 'empty result'\n result = self._download_json(\n url, song_id, data=data.encode('ascii'), headers=headers)\n if result:\n return result\n except ExtractorError as e:\n if type(e.cause) in (ValueError, TypeError):\n # JSON load failure\n raise\n except Exception as e:\n msg = error_to_compat_str(e)\n self.report_warning('%s API call (%s) failed: %s' % (\n song_id, bitrate, msg))\n return {}\n", "url": "https://github.com/ytdl-org/youtube-dl.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 244, "n_words": 62, "vocab_size": 49, "complexity": 5, "nloc": 17, "token_counts": 105, "n_ast_nodes": 171, "n_identifiers": 21, "random_cut": "def _call_player_api(self, song_id, bitrate):\n url = 'https://interface3.music.163.com/eapi/son" }, { "id": 122518, "commit_id": "c42bad85ef427b2555464901f2edf2a19ad1564a", "repo": "jax", "path": "jax/_src/sharding.py", "file_name": "sharding.py", "fun_name": "_enable_cpp_named_sharding", "commit_message": "Make `MeshPspecSharding` an alias for `NamedSharding` (it was the other way around before this CL).\n\nPiperOrigin-RevId: 488473538", "code": "def _enable_cpp_named_sharding():\n if xc._version >= 107:\n return xc.NamedSharding\n elif xc._version >= 95:\n return xc.MeshPspecSharding # type: ignore\n else:\n return None\n\n\n@pxla.use_cpp_class(_enable_cpp_named_sharding())", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@pxla.use_cpp_class(_enable_cpp_named_sharding())", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 32, "n_words": 21, "vocab_size": 17, "complexity": 3, "nloc": 7, "token_counts": 30, "n_ast_nodes": 66, "n_identifiers": 7, "random_cut": "def _enable_cpp_named_sharding():\n if xc._version >= 107:\n return xc.NamedSharding\n elif xc._version >= 95:\n return xc.MeshPspecSharding # type: ignore\n else:\n return None\n\n\n@pxla.use_cpp_class(_enable_cpp_named_sharding())" }, { "id": 320601, "commit_id": "b7fd265179f1f646b51430d02ce3495920b7d2dd", "repo": "urh", "path": "src/urh/dev/PCAPNG.py", "file_name": "PCAPNG.py", "fun_name": "_build_pcapng_idb", "commit_message": "Adding Save As pcapng for ProtocolAnalyzer (#970)", "code": "def _build_pcapng_idb(link_type) -> bytes:\n BLOCKTYPE = 0x00000001\n BLOCKLENGTH = 20\n SNAP_LEN = 0\n\n return struct.pack(\">IIHHII\",\n BLOCKTYPE,\n BLOCKLENGTH,\n link_type, 0,\n SNAP_LEN,\n BLOCKLENGTH)\n", "url": "https://github.com/jopohl/urh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 147, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 10, "token_counts": 35, "n_ast_nodes": 52, "n_identifiers": 8, "random_cut": "def _build_pcapng_idb(link_type) -> bytes:\n BLOCKTYPE = 0x00" }, { "id": 86451, "commit_id": "9399434e0a45da2c82209b38f1f214688e1ae4f3", "repo": "sentry", "path": "tests/acceptance/test_account_settings.py", "file_name": "test_account_settings.py", "fun_name": "test_account_subscriptions_settings", "commit_message": "test: Use new settings routes in account settings test (#39587)", "code": "def test_account_subscriptions_settings(self):\n with self.feature(\"organizations:onboarding\"):\n self.browser.get(\"/settings/account/subscriptions/\")\n self.browser.wait_until_not('[data-test-id=\"loading-indicator\"]')\n self.browser.snapshot(\"account subscriptions settings\")\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 5, "token_counts": 37, "n_ast_nodes": 71, "n_identifiers": 7, "random_cut": "def test_account_subscriptions_settings(self):\n with self.feature(\"organizations:onboarding\"):\n self.browser.get(\"/settings/account/s" }, { "id": 116735, "commit_id": "2970711efb4c713b604eb3bac840ef9d21f18273", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/hana_handler/hana_handler.py", "file_name": "hana_handler.py", "fun_name": "check_connection", "commit_message": "minor: use dummy table for sap hana conn check", "code": "def check_connection(self) -> StatusResponse:\n \n\n response = StatusResponse(False)\n need_to_close = self.is_connected is False\n\n try:\n connection = self.connect()\n with connection.cursor() as cur:\n cur.execute('SELECT 1 FROM SYS.DUMMY')\n response.success = True\n except dbapi.Error as e:\n log.error(f'Error connecting to SAP HANA {self.address}, {e}!')\n response.error_message = e\n\n if response.success is True and need_to_close:\n self.disconnect()\n if response.success is False and self.is_connected is True:\n self.is_connected = False\n\n return response\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 209, "n_words": 61, "vocab_size": 42, "complexity": 6, "nloc": 20, "token_counts": 103, "n_ast_nodes": 188, "n_identifiers": 20, "random_cut": "def check_connection(self) -> StatusResponse:\n \n\n response = StatusResponse(False)\n need_to_close = self.is_connected is False\n\n try:\n connection = self.connect" }, { "id": 95889, "commit_id": "78fd2058f3db3ddeea4ac1d370656db6ad192a99", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_spans_performance.py", "file_name": "test_organization_events_spans_performance.py", "fun_name": "suspect_span_examples_snuba_results", "commit_message": "fix(suspect-spans): Use non aggregate variant of span examples query (#31295)\n\nThe original span examples query uses an array join with an group by on the\r\nevent id. This creates too many groupings, 1 per event id, which leads to slow\r\nquery performance. This changes the query to do the same without using array\r\njoin or any aggregates.", "code": "def suspect_span_examples_snuba_results(self, op, event):\n results = {\n \"project.id\": self.project.id,\n \"id\": event.event_id,\n }\n\n if op == \"http.server\":\n results.update(\n {\n \"count_span_time\": 1,\n \"sum_span_time\": 4.0,\n \"max_span_time\": 4.0,\n }\n )\n elif op == \"django.middleware\":\n results.update(\n {\n \"count_span_time\": 2,\n \"sum_span_time\": 6.0,\n \"max_span_time\": 3.0,\n }\n )\n elif op == \"django.view\":\n results.update(\n {\n \"count_span_time\": 3,\n \"sum_span_time\": 3.0,\n \"max_span_time\": 1.0,\n }\n )\n else:\n assert False, f\"Unexpected Op: {op}\"\n\n return results\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 470, "n_words": 62, "vocab_size": 38, "complexity": 4, "nloc": 32, "token_counts": 120, "n_ast_nodes": 190, "n_identifiers": 9, "random_cut": "def suspect_span_examples_snuba_results(self, op, event):\n results = {\n \"project.id\": self.project.id,\n \"id\": event.event_id," }, { "id": 22294, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "Add_two_Linked_List.py", "file_name": "Add_two_Linked_List.py", "fun_name": "Add_two_no", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def Add_two_no(self, First, Second):\r\n prev = None\r\n temp = None\r\n carry = 0\r\n while First is not None or Second is not None:\r\n first_data = 0 if First is None else First.data\r\n second_data = 0 if Second is None else Second.data\r\n Sum = carry + first_data + second_data\r\n carry = 1 if Sum >= 10 else 0\r\n Sum = Sum if Sum < 10 else Sum % 10\r\n temp = Node(Sum)\r\n if self.head is None:\r\n self.head = temp\r\n else:\r\n prev.next = temp\r\n prev = temp\r\n if First is not None:\r\n First = First.next\r\n if Second is not None:\r\n Second = Second.next\r\n if carry > 0:\r\n temp.next = Node(carry)\r\n\r", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 334, "n_words": 108, "vocab_size": 40, "complexity": 11, "nloc": 22, "token_counts": 141, "n_ast_nodes": 217, "n_identifiers": 14, "random_cut": "def Add_two_no(self, First, Second):\r\n prev = None\r\n temp = None\r\n carry = 0\r\n while First is not None or Second is not None:\r\n first_data = 0 if First is None else First.data\r\n second_data = 0 if Second is None" }, { "id": 256211, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "haystack/modeling/data_handler/samples.py", "file_name": "samples.py", "fun_name": "offset_to_token_idx_vecorized", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def offset_to_token_idx_vecorized(token_offsets, ch_idx):\n \n # case ch_idx is at end of tokens\n if ch_idx >= np.max(token_offsets):\n # TODO check \"+ 1\" (it is needed for making end indices compliant with old offset_to_token_idx() function)\n # check whether end token is incluse or exclusive\n idx = np.argmax(token_offsets) + 1\n # looking for the first occurence of token_offsets larger than ch_idx and taking one position to the left.\n # This is needed to overcome n special_tokens at start of sequence\n # and failsafe matching (the character start might not always coincide with a token offset, e.g. when starting at whitespace)\n else:\n idx = np.argmax(token_offsets > ch_idx) - 1\n return idx\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 158, "n_words": 106, "vocab_size": 77, "complexity": 2, "nloc": 6, "token_counts": 44, "n_ast_nodes": 79, "n_identifiers": 7, "random_cut": "def offset_to_token_idx_vecorized(token_offsets, ch_idx):\n \n # case ch_idx is at end of tokens\n if ch_idx >= np.max(token_offsets):\n # TODO check \"+ 1\" (it is needed for making end indices compliant with old offset_to_token_idx() function)\n # check whether end token is incluse or exclusive\n idx = np.argmax(token_offsets) + 1\n # looking for the first occurence of token_offsets larger than ch_idx and taking one position to the left.\n # This is needed to overcome n special_tokens at start of sequence\n # and failsafe matching (the character start might" }, { "id": 131138, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/ludwig/test_ludwig.py", "file_name": "test_ludwig.py", "fun_name": "run_api_experiment", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def run_api_experiment(config, data_parquet):\n # Sanity check that we get 4 slots over 1 host\n kwargs = get_horovod_kwargs()\n assert kwargs.get(\"num_hosts\") == 1\n assert kwargs.get(\"num_slots\") == 2\n\n # Train on Parquet\n dask_backend = RayBackend()\n train_with_backend(dask_backend, config, dataset=data_parquet, evaluate=False)\n\n\n@spawn", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@spawn", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 56, "n_words": 37, "vocab_size": 32, "complexity": 1, "nloc": 6, "token_counts": 49, "n_ast_nodes": 86, "n_identifiers": 12, "random_cut": "def run_api_experiment(config, data_parquet):\n # Sanity check that we get 4 slots over 1 host\n kwargs = get_horovod_kwargs()\n assert kwargs.get(\"num_hosts\") == 1\n assert kwargs.get(\"num_slots\") == 2\n" }, { "id": 173518, "commit_id": "7eef44f73ccd19762ae3356d6c0ac70228ff3302", "repo": "calibre-web", "path": "cps/admin.py", "file_name": "admin.py", "fun_name": "list_restriction", "commit_message": "Make drive letters available in file picker", "code": "def list_restriction(res_type, user_id):\n if res_type == 0: # Tags as template\n restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)}\n for i, x in enumerate(config.list_denied_tags()) if x != '']\n allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)}\n for i, x in enumerate(config.list_allowed_tags()) if x != '']\n json_dumps = restrict + allow\n elif res_type == 1: # CustomC as template\n restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)}\n for i, x in enumerate(config.list_denied_column_values()) if x != '']\n allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)}\n for i, x in enumerate(config.list_allowed_column_values()) if x != '']\n json_dumps = restrict + allow\n elif res_type == 2: # Tags per user\n if isinstance(user_id, int):\n usr = ub.session.query(ub.User).filter(ub.User.id == user_id).first()\n else:\n usr = current_user\n restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)}\n for i, x in enumerate(usr.list_denied_tags()) if x != '']\n allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)}\n for i, x in enumerate(usr.list_allowed_tags()) if x != '']\n json_dumps = restrict + allow\n elif res_type == 3: # CustomC per user\n if isinstance(user_id, int):\n usr = ub.session.query(ub.User).filter(ub.User.id == user_id).first()\n else:\n usr = current_user\n restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)}\n for i, x in enumerate(usr.list_denied_column_values()) if x != '']\n allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)}\n for i, x in enumerate(usr.list_allowed_column_values()) if x != '']\n json_dumps = restrict + allow\n else:\n json_dumps = \"\"\n js = json.dumps(json_dumps)\n response = make_response(js)\n response.headers[\"Content-Type\"] = \"application/json; charset=utf-8\"\n return response\n\n\n@admi.route(\"/ajax/fullsync\", methods=[\"POST\"])\n@login_required", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "@admi.route(\"/ajax/fullsync\", methods=[\"POST\"])\n@login_required", "n_ast_errors": 1, "ast_levels": 18, "n_whitespaces": 581, "n_words": 250, "vocab_size": 65, "complexity": 23, "nloc": 39, "token_counts": 492, "n_ast_nodes": 887, "n_identifiers": 37, "random_cut": "def list_restriction(res_type, user_id):\n if res_type == 0: # Tags as template\n restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)}\n for i, x in enumerate(config.list_denied_tags()) if x != '']\n allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)}\n for i, x in enumerate(config.list_allowed_tags()) if x != '']\n json_dumps = restrict + allow\n elif res_type == 1: # CustomC as template\n restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)}\n for i, x in enumerate(config.list_denied_column_values()) if x != '']\n allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)}\n for i, x in enumerate(config.list_allowed_column_values()) if x != '']\n json_dumps = restrict + allow\n elif res_type == 2: # Tags per user\n if isinstance(user_id, int):\n usr = ub.session.query(ub.User).filter(ub.User.id == user_id).first()\n else:\n usr = current_user\n restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)}\n for i, x in enumerate(usr.list_denied_tags()) if x != '']\n allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)}\n for i, x in enumerate(usr.list_allowed_tags()) if x != '']\n json_dumps = restrict + allow\n elif res_type == 3: # CustomC per user\n if isinstance(user_id, int):\n usr = ub.session.query(ub.User).filter(ub.User.id == user_id).first()\n else:\n usr = current_user\n restrict = [{'Element': x, 'type': _('Deny'), 'id': 'd' + str(i)}\n for i, x in enumerate(usr.list_denied_column_values()) if x != '']\n allow = [{'Element': x, 'type': _('Allow'), 'id': 'a' + str(i)}\n for i, x in enumerate(usr.list_allowed_column_values()) if x != '']\n json_dumps = restrict + allow\n else:\n json_dumps = \"\"\n js = json.dumps(json_dumps)\n response = make_response(js)\n response.headers[\"Content-Type\"] = \"application/json; charset=utf-8\"\n return response\n\n\n@admi.route(\"/ajax/fullsync\", methods=[\"POST\"])\n@login_required" }, { "id": 130761, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/experimental/tf_utils.py", "file_name": "tf_utils.py", "fun_name": "get_flat", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_flat(self):\n \n # Eager mode.\n if not self.sess:\n return np.concatenate(\n [v.numpy().flatten() for v in self.variables.values()]\n )\n # Graph mode.\n return np.concatenate(\n [v.eval(session=self.sess).flatten() for v in self.variables.values()]\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 117, "n_words": 27, "vocab_size": 18, "complexity": 4, "nloc": 8, "token_counts": 71, "n_ast_nodes": 118, "n_identifiers": 12, "random_cut": "def get_flat(self):\n " }, { "id": 303635, "commit_id": "f90d007e73b52cd06b2a450b2f9a215b4b0b384d", "repo": "core", "path": "tests/components/android_ip_webcam/conftest.py", "file_name": "conftest.py", "fun_name": "aioclient_mock_fixture", "commit_message": "Add config flow to `android_ip_webcam` (#76222)\n\nCo-authored-by: Martin Hjelmare ", "code": "def aioclient_mock_fixture(aioclient_mock) -> None:\n \n aioclient_mock.get(\n \"http://1.1.1.1:8080/status.json?show_avail=1\",\n text=load_fixture(\"android_ip_webcam/status_data.json\"),\n status=HTTPStatus.OK,\n headers={\"Content-Type\": CONTENT_TYPE_JSON},\n )\n aioclient_mock.get(\n \"http://1.1.1.1:8080/sensors.json\",\n text=load_fixture(\"android_ip_webcam/sensor_data.json\"),\n status=HTTPStatus.OK,\n headers={\"Content-Type\": CONTENT_TYPE_JSON},\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 89, "n_words": 18, "vocab_size": 13, "complexity": 1, "nloc": 14, "token_counts": 64, "n_ast_nodes": 108, "n_identifiers": 10, "random_cut": "def aioclient_mock_fixture(aioclient_mock) -> None:\n \n aioclient_mock.get(\n \"http://1.1.1.1:8080/status.json?show_avail=1\",\n text=load_fixture(\"android_ip_webcam/status_data.json\"),\n status=HTTPStatus.OK,\n headers={\"Content-Type\": CONTENT_TYPE_JSON},\n )\n aioclient_mock.get(\n \"http://1.1.1.1:8080/sensors.json\",\n text=load_fixture(\"android_ip_webcam/sensor_data.j" }, { "id": 200201, "commit_id": "0ea6acbf9547be893df8b1df918b712b6ad9ba21", "repo": "sympy", "path": "sympy/physics/mechanics/joint.py", "file_name": "joint.py", "fun_name": "_set_angular_velocity", "commit_message": "Implement WeldJoint", "code": "def _set_angular_velocity(self):\n self.child_interframe.set_ang_vel(self.parent_interframe, 0)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 27, "n_identifiers": 5, "random_cut": "def _set_angular_velocity(self):\n self.child_interframe.set_" }, { "id": 193371, "commit_id": "1ea73f5832f6b7ccf7c74dacb38a63b7ea2dd720", "repo": "vision", "path": "test/test_prototype_transforms.py", "file_name": "test_prototype_transforms.py", "fun_name": "test__extract_image_targets_assertion", "commit_message": "Rename features.SegmentationMask to features.Mask (#6579)\n\n* rename features.SegmentationMask -> features.Mask\r\n\r\n* rename kernels *_segmentation_mask -> *_mask and cleanup input name\r\n\r\n* cleanup\r\n\r\n* rename module _segmentation_mask.py -> _mask.py\r\n\r\n* fix test", "code": "def test__extract_image_targets_assertion(self, mocker):\n transform = transforms.SimpleCopyPaste()\n\n flat_sample = [\n # images, batch size = 2\n self.create_fake_image(mocker, features.Image),\n # labels, bboxes, masks\n mocker.MagicMock(spec=features.Label),\n mocker.MagicMock(spec=features.BoundingBox),\n mocker.MagicMock(spec=features.Mask),\n # labels, bboxes, masks\n mocker.MagicMock(spec=features.BoundingBox),\n mocker.MagicMock(spec=features.Mask),\n ]\n\n with pytest.raises(TypeError, match=\"requires input sample to contain equal sized list of Images\"):\n transform._extract_image_targets(flat_sample)\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 181, "n_words": 44, "vocab_size": 35, "complexity": 1, "nloc": 12, "token_counts": 102, "n_ast_nodes": 160, "n_identifiers": 20, "random_cut": "def test__extract_image_targets_assertion(self, mocker):\n transform = transforms.SimpleCopyPaste()\n\n flat_sample = [\n # images, batch size = 2\n self.create_fake_image(mocker, features.Image),\n # labels, bboxes, masks\n mocker.MagicMock(spec=features.Label),\n mocker.MagicMock(spec=features.BoundingBox),\n mocker.MagicMock(spec=features.Mask),\n # labels, bboxes, masks\n mocker.MagicMock(spec=features.BoundingBox),\n mocker.MagicMock(spec=features.Mas" }, { "id": 321204, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/webkit/webkittab.py", "file_name": "webkittab.py", "fun_name": "down", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def down(self, count=1):\n self._key_press(Qt.Key.Key_Down, count, 'scrollBarMaximum', Qt.Orientation.Vertical)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 29, "n_ast_nodes": 44, "n_identifiers": 9, "random_cut": "def down(self, count=1):\n self._key_press(Qt.Key.Key_Down, count, 'scrollBarMaximum', Qt.Orient" }, { "id": 198366, "commit_id": "7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c", "repo": "sympy", "path": "sympy/combinatorics/tensor_can.py", "file_name": "tensor_can.py", "fun_name": "canonicalize", "commit_message": "Cleanup loops and ranges", "code": "def canonicalize(g, dummies, msym, *v):\n \n from sympy.combinatorics.testutil import canonicalize_naive\n if not isinstance(msym, list):\n if msym not in (0, 1, None):\n raise ValueError('msym must be 0, 1 or None')\n num_types = 1\n else:\n num_types = len(msym)\n if not all(msymx in (0, 1, None) for msymx in msym):\n raise ValueError('msym entries must be 0, 1 or None')\n if len(dummies) != num_types:\n raise ValueError(\n 'dummies and msym must have the same number of elements')\n size = g.size\n num_tensors = 0\n v1 = []\n for base_i, gens_i, n_i, sym_i in v:\n # check that the BSGS is minimal;\n # this property is used in double_coset_can_rep;\n # if it is not minimal use canonicalize_naive\n if not _is_minimal_bsgs(base_i, gens_i):\n mbsgs = get_minimal_bsgs(base_i, gens_i)\n if not mbsgs:\n can = canonicalize_naive(g, dummies, msym, *v)\n return can\n base_i, gens_i = mbsgs\n v1.append((base_i, gens_i, [[]] * n_i, sym_i))\n num_tensors += n_i\n\n if num_types == 1 and not isinstance(msym, list):\n dummies = [dummies]\n msym = [msym]\n flat_dummies = []\n for dumx in dummies:\n flat_dummies.extend(dumx)\n\n if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)):\n raise ValueError('dummies is not valid')\n\n # slot symmetry of the tensor\n size1, sbase, sgens = gens_products(*v1)\n if size != size1:\n raise ValueError(\n 'g has size %d, generators have size %d' % (size, size1))\n free = [i for i in range(size - 2) if i not in flat_dummies]\n num_free = len(free)\n\n # g1 minimal tensor under slot symmetry\n g1 = canonical_free(sbase, sgens, g, num_free)\n if not flat_dummies:\n return g1\n # save the sign of g1\n sign = 0 if g1[-1] == size - 1 else 1\n\n # the free indices are kept fixed.\n # Determine free_i, the list of slots of tensors which are fixed\n # since they are occupied by free indices, which are fixed.\n start = 0\n for i, (base_i, gens_i, n_i, sym_i) in enumerate(v):\n free_i = []\n len_tens = gens_i[0].size - 2\n # for each component tensor get a list od fixed islots\n for j in range(n_i):\n # get the elements corresponding to the component tensor\n h = g1[start:(start + len_tens)]\n fr = []\n # get the positions of the fixed elements in h\n for k in free:\n if k in h:\n fr.append(h.index(k))\n free_i.append(fr)\n start += len_tens\n v1[i] = (base_i, gens_i, free_i, sym_i)\n # BSGS of the tensor with fixed free indices\n # if tensor_gens fails in gens_product, use canonicalize_naive\n size, sbase, sgens = gens_products(*v1)\n\n # reduce the permutations getting rid of the free indices\n pos_free = [g1.index(x) for x in range(num_free)]\n size_red = size - num_free\n g1_red = [x - num_free for x in g1 if x in flat_dummies]\n if sign:\n g1_red.extend([size_red - 1, size_red - 2])\n else:\n g1_red.extend([size_red - 2, size_red - 1])\n map_slots = _get_map_slots(size, pos_free)\n sbase_red = [map_slots[i] for i in sbase if i not in pos_free]\n sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens]\n dummies_red = [[x - num_free for x in y] for y in dummies]\n transv_red = get_transversals(sbase_red, sgens_red)\n g1_red = _af_new(g1_red)\n g2 = double_coset_can_rep(\n dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red)\n if g2 == 0:\n return 0\n # lift to the case with the free indices\n g3 = _lift_sgens(size, pos_free, free, g2)\n return g3\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1079, "n_words": 523, "vocab_size": 243, "complexity": 35, "nloc": 76, "token_counts": 638, "n_ast_nodes": 980, "n_identifiers": 69, "random_cut": "def canonicalize(g, dummies, msym, *v):\n \n from sympy.combinatorics.testutil import canonicalize_naive\n if not isinstance(msym, list):\n if msym not in (0, 1, None):\n raise ValueError('msym must be 0, 1 or None')\n num_types = 1\n else:\n num_types = len(msym)\n if not all(msymx in (0, 1, None) for msymx in msym):\n raise ValueError('msym entries must be 0, 1 or None')\n if len(dummies) != num_types:\n raise ValueError(\n 'dummies and msym must have the same number of elements')\n size = g.size\n num_tensors = 0\n v1 = []\n for base_i, gens_i, n_i, sym_i in v:\n # check that the BSGS is minimal;\n # this property is used in double_coset_can_rep;\n # if it is not minimal use canonicalize_naive\n if not _is_minimal_bsgs(base_i, gens_i):\n mbsgs = get_minimal_bsgs(base_i, gens_i)\n if not mbsgs:\n can = canonicalize_naive(g, dummies, msym, *v)\n return can\n base_i, gens_i = mbsgs\n v1.append((base_i, gens_i, [[]] * n_i, sym_i))\n num_tensors += n_i\n\n if num_types == 1 and not isinstance(msym, list):\n dummies = [dummies]\n msym = [msym]\n flat_dummies = []\n for dumx in dummies" }, { "id": 80402, "commit_id": "a4a3ba65d736045733cb49430d7076b73aec23bb", "repo": "awx", "path": "awx/main/tests/unit/test_tasks.py", "file_name": "test_tasks.py", "fun_name": "test_custom_environment_injectors_with_reserved_env_var", "commit_message": "Refactored tasks.py to a package\n--- Added 3 new sub-package : awx.main.tasks.system , awx.main.tasks.jobs , awx.main.tasks.receptor\n--- Modified the functional tests and unit tests accordingly", "code": "def test_custom_environment_injectors_with_reserved_env_var(self, private_data_dir, job):\n task = tasks.jobs.RunJob()\n task.instance = job\n some_cloud = CredentialType(\n kind='cloud',\n name='SomeCloud',\n managed=False,\n inputs={'fields': [{'id': 'api_token', 'label': 'API Token', 'type': 'string'}]},\n injectors={'env': {'JOB_ID': 'reserved'}},\n )\n credential = Credential(pk=1, credential_type=some_cloud, inputs={'api_token': 'ABC123'})\n job.credentials.add(credential)\n\n env = task.build_env(job, private_data_dir)\n\n assert env['JOB_ID'] == str(job.pk)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 153, "n_words": 43, "vocab_size": 39, "complexity": 1, "nloc": 14, "token_counts": 124, "n_ast_nodes": 212, "n_identifiers": 25, "random_cut": "def test_custom_environment_injectors_with_reserved_env_var(self, private_data_dir, job):\n task = tasks.jobs.RunJob()\n task.instance = job\n some_cloud = CredentialType(\n kind='cloud',\n name='SomeCloud',\n managed=False,\n inputs={'fields': [{'id': 'api_token', 'label': 'API Token', 'type': 'string'}]},\n injectors={'env': {'JOB_ID': 'reserved'}},\n )\n credential = Credential(pk=1, credential_type=some_cloud, inputs={'api_token': 'ABC123'})\n job.credentials.add(credential)\n\n env = task.b" }, { "id": 94083, "commit_id": "2e24063442134bf50a485b69698b154d0090a361", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_auditlogs.py", "file_name": "test_organization_auditlogs.py", "fun_name": "test_simple", "commit_message": "feat(auditlog): Remove versioning from endpoint (#37301)\n\n* feat(auditlog): Remove versioning from endpoint\r\n\r\n* Update tests", "code": "def test_simple(self):\n now = timezone.now()\n\n org2 = self.create_organization(owner=self.user)\n\n entry1 = AuditLogEntry.objects.create(\n organization=self.organization,\n event=audit_log.get_event_id(\"ORG_EDIT\"),\n actor=self.user,\n datetime=now,\n )\n entry2 = AuditLogEntry.objects.create(\n organization=self.organization,\n event=audit_log.get_event_id(\"ORG_EDIT\"),\n actor=self.user,\n datetime=now + timedelta(seconds=1),\n )\n AuditLogEntry.objects.create(\n organization=org2,\n event=audit_log.get_event_id(\"ORG_EDIT\"),\n actor=self.user,\n datetime=now,\n )\n\n response = self.get_success_response(self.organization.slug)\n assert len(response.data[\"rows\"]) == 2\n assert response.data[\"rows\"][0][\"id\"] == str(entry2.id)\n assert response.data[\"rows\"][1][\"id\"] == str(entry1.id)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 262, "n_words": 47, "vocab_size": 29, "complexity": 1, "nloc": 25, "token_counts": 193, "n_ast_nodes": 303, "n_identifiers": 28, "random_cut": "def test_simple(self):\n now = timezone.now()\n\n org2 = self.create_organization(owner=self.user)\n\n entry1 = AuditLogEntry.objects.create(\n organization=self.organization,\n event=audit_log.get_event_id(\"ORG_EDIT\"),\n actor=self.user,\n datetime=now,\n )\n entry2 = AuditLogEnt" }, { "id": 85106, "commit_id": "4e4689949438735622bdf669f05d218c671e7e01", "repo": "zulip", "path": "zerver/webhooks/github/tests.py", "file_name": "tests.py", "fun_name": "test_push_50_commits_filtered_by_branches", "commit_message": "webhooks: Pick a more reasonable length for short sha.\n\n7 characters are not enough for large projects, so we change\nit to reasonably longer. As an example, The Linux kernel needs\nat least 11 characters of sha in its shortened form to identify\na revision. We pick 11 so it should work for most of the projects.\n\nSigned-off-by: Zixuan James Li ", "code": "def test_push_50_commits_filtered_by_branches(self) -> None:\n self.url = self.build_webhook_url(branches=\"master,changes\")\n commit_info = \"* Update README.md ([0d1a26e67d8](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\\n\"\n expected_message = f\"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 50 commits to branch changes.\\n\\n{commit_info * COMMITS_LIMIT}[and 30 more commit(s)]\"\n self.check_webhook(\"push__50_commits\", TOPIC_BRANCH, expected_message)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 57, "n_words": 30, "vocab_size": 28, "complexity": 1, "nloc": 5, "token_counts": 36, "n_ast_nodes": 73, "n_identifiers": 10, "random_cut": "def test_push_50_commits_filtered_by_branches(self) -> None:\n self.url = self.build_webhook_url(branches=\"master,changes\")\n commit_info = \"* Update README.md ([0d1a26e67d8](https://github.com/baxterthehacker/public-repo/commit/0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c))\\n\"\n expected_message = f\"baxterthehacker [pushed](https://github.com/baxterthehacker/public-repo/compare/9049f1265b7d...0d1a26e67d8f) 50 commits to branch changes.\\n\\n{commit_info * COMMITS_LIMIT" }, { "id": 51945, "commit_id": "2e373966a7fd3119c205350fb14d0b7bfe74185d", "repo": "PaddleHub", "path": "modules/image/Image_editing/super_resolution/swinir_l_real_sr_x4/swinir.py", "file_name": "swinir.py", "fun_name": "extra_repr", "commit_message": "add swinir_l_real_sr_x4 (#2076)\n\n* git add swinir_l_real_sr_x4\r\n\r\n* fix typo\r\n\r\n* fix typo\r\n\r\nCo-authored-by: chenjian ", "code": "def extra_repr(self) -> str:\n return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 38, "n_identifiers": 6, "random_cut": "def extra_repr(self) -> str:\n return " }, { "id": 22048, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/adapters.py", "file_name": "adapters.py", "fun_name": "cert_verify", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def cert_verify(self, conn, url, verify, cert):\n \n if url.lower().startswith(\"https\") and verify:\n\n cert_loc = None\n\n # Allow self-specified cert location.\n if verify is not True:\n cert_loc = verify\n\n if not cert_loc:\n cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)\n\n if not cert_loc or not os.path.exists(cert_loc):\n raise OSError(\n f\"Could not find a suitable TLS CA certificate bundle, \"\n f\"invalid path: {cert_loc}\"\n )\n\n conn.cert_reqs = \"CERT_REQUIRED\"\n\n if not os.path.isdir(cert_loc):\n conn.ca_certs = cert_loc\n else:\n conn.ca_cert_dir = cert_loc\n else:\n conn.cert_reqs = \"CERT_NONE\"\n conn.ca_certs = None\n conn.ca_cert_dir = None\n\n if cert:\n if not isinstance(cert, basestring):\n conn.cert_file = cert[0]\n conn.key_file = cert[1]\n else:\n conn.cert_file = cert\n conn.key_file = None\n if conn.cert_file and not os.path.exists(conn.cert_file):\n raise OSError(\n f\"Could not find the TLS certificate file, \"\n f\"invalid path: {conn.cert_file}\"\n )\n if conn.key_file and not os.path.exists(conn.key_file):\n raise OSError(\n f\"Could not find the TLS key file, invalid path: {conn.key_file}\"\n )\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 631, "n_words": 133, "vocab_size": 63, "complexity": 14, "nloc": 37, "token_counts": 205, "n_ast_nodes": 357, "n_identifiers": 23, "random_cut": "def cert_verify(self, conn, url, verify, cert):\n \n if url.lower().startswith(\"https\") and verify:\n\n cert_loc = None\n\n # Allow self-specified cert location.\n if verify is not True:\n cert_loc = verify\n\n if not cert_loc:\n cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)\n\n if not cert_loc or not os.path.exists(cert_loc):\n raise OSError(\n f\"Could not find a suitable TLS CA certificate bundle, \"\n f\"invalid path: {cert_loc}\"\n )\n\n conn.cert_reqs = \"CERT_REQUIRED\"\n\n if not os.path.isdir(cert_loc):\n conn.ca_certs = cert_loc\n else:\n conn.ca_cert_dir = cert_loc\n else:\n conn.cert_reqs = \"CERT_NONE\"\n conn.ca_certs = None\n conn.ca_cert_dir = None\n\n if cert:\n if not isinstance(cert, basestring):\n conn.cert_file = cert[0]\n conn.key_file = cert[1]\n else:\n conn.cert_file = cert\n conn.key_file = None\n if conn.cert_file and not os.path.exists(conn.cert_file):\n raise OSError(\n f\"Could not find the TLS certificate file, \"\n f\"invalid path: {conn.cert_file}\"\n )\n if conn.key_file and not os.path.exists(conn.key_file):\n raise OSError(\n f\"Could not find the TLS key file, invalid path: " }, { "id": 171266, "commit_id": "289f32df5a565848adbc0adc8949fa4066542316", "repo": "pandas", "path": "pandas/plotting/_matplotlib/core.py", "file_name": "core.py", "fun_name": "_make_plot", "commit_message": "STYLE: fix pylint reimported warnings (#49645)\n\n* STYLE: fix pylint reimported warnings\r\n\r\n* fixup! STYLE: fix pylint reimported warnings", "code": "def _make_plot(self) -> None:\n colors = self._get_colors()\n ncolors = len(colors)\n\n pos_prior = neg_prior = np.zeros(len(self.data))\n K = self.nseries\n\n for i, (label, y) in enumerate(self._iter_data(fillna=0)):\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n if self._is_series:\n kwds[\"color\"] = colors\n elif isinstance(colors, dict):\n kwds[\"color\"] = colors[label]\n else:\n kwds[\"color\"] = colors[i % ncolors]\n\n errors = self._get_errorbars(label=label, index=i)\n kwds = dict(kwds, **errors)\n\n label = pprint_thing(label)\n label = self._mark_right_label(label, index=i)\n\n if ((\"yerr\" in kwds) or (\"xerr\" in kwds)) and (kwds.get(\"ecolor\") is None):\n kwds[\"ecolor\"] = mpl.rcParams[\"xtick.color\"]\n\n start = 0\n if self.log and (y >= 1).all():\n start = 1\n start = start + self._start_base\n\n if self.subplots:\n w = self.bar_width / 2\n rect = self._plot(\n ax,\n self.ax_pos + w,\n y,\n self.bar_width,\n start=start,\n label=label,\n log=self.log,\n **kwds,\n )\n ax.set_title(label)\n elif self.stacked:\n mask = y > 0\n start = np.where(mask, pos_prior, neg_prior) + self._start_base\n w = self.bar_width / 2\n rect = self._plot(\n ax,\n self.ax_pos + w,\n y,\n self.bar_width,\n start=start,\n label=label,\n log=self.log,\n **kwds,\n )\n pos_prior = pos_prior + np.where(mask, y, 0)\n neg_prior = neg_prior + np.where(mask, 0, y)\n else:\n w = self.bar_width / K\n rect = self._plot(\n ax,\n self.ax_pos + (i + 0.5) * w,\n y,\n w,\n start=start,\n label=label,\n log=self.log,\n **kwds,\n )\n self._append_legend_handles_labels(rect, label)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1151, "n_words": 189, "vocab_size": 95, "complexity": 11, "nloc": 67, "token_counts": 445, "n_ast_nodes": 672, "n_identifiers": 49, "random_cut": "def _make_plot(self) -> None:\n colors = self._get_colors()\n ncolors = len(colors)\n\n pos_prior = neg_prior = np.zeros(len(self.data))\n K = self.nseries\n\n for i, (label, y) in enumerate(self._iter_data(fillna=0)):\n ax = self._get_ax(i)\n kwds = self.kwds.copy()\n if self._is_series:\n kwds[\"color\"] = colors\n elif isinstance(colors, dict):\n kwds[\"color\"] = colors[label]\n else:\n kwds[\"color\"] = colors[i % ncolors]\n\n errors = self._get_errorbars(label=label, index=i)\n kwds = dict(kwds, **errors)\n\n label = pprint_thing(label)\n label = self._mark_right_label(label, index=i)\n\n if ((\"yerr\" in kwds) or (\"xerr\" in kwds)) and (kwds.get(\"ecolor\") is None):\n kwds[\"ecolor\"] = mpl.rcParams[\"xtick.color\"]\n\n start = 0\n if self.log and (y >= 1).all():\n start = 1\n start = start + self._start_base\n\n if self.subplots:\n w = self.bar_width / 2\n rect = self._plot(\n ax,\n self.ax_pos + w,\n y,\n self.bar_width,\n start=start,\n label=label,\n log=self.log,\n **kwds,\n )\n ax.set_title(label)\n elif self.stacked:\n mask = y > 0\n start = np.where(mask, pos_prior, neg_prior) + self._start_base\n w = self.bar_width / 2\n rect = self._plot(\n ax,\n self.ax_pos + w,\n y,\n self.bar_width,\n start=start,\n label=label,\n log=self.log,\n **kwds,\n )\n pos_prior = pos_prior + np.where(mask, y, 0)\n neg_prior = neg_prior + np.where(mask, 0, y)\n else:\n w = self.bar" }, { "id": 83055, "commit_id": "dd1c9c45c778dc5280c2b02c3b9fb327d2507cc1", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_pick_colors", "commit_message": "stream colors: Try harder to avoid collisions.\n\nWe now use recipient_id % 24 for new stream colors\nwhen users have already used all 24 of our canned\ncolors.\n\nThis fix doesn't address the scenario that somebody\ndislikes one of our current canned colors, so if a\nuser continually changes canned color N to some other\ncolor for new streams, their new streams will continue\nto include color N (and the user will still need to\nchange them).\n\nThis fix doesn't address the fact that it can be expensive\nduring bulk-add situations to query for all the colors\nthat users have already used up.\n\nSee https://chat.zulip.org/#narrow/stream/3-backend/topic/assigning.20stream.20colors\nfor more discussion.", "code": "def test_pick_colors(self) -> None:\n used_colors: Set[str] = set()\n color_map: Dict[int, str] = {}\n recipient_ids = list(range(30))\n user_color_map = pick_colors(used_colors, color_map, recipient_ids)\n self.assertEqual(\n user_color_map,\n {\n 0: \"#76ce90\",\n 1: \"#fae589\",\n 2: \"#a6c7e5\",\n 3: \"#e79ab5\",\n 4: \"#bfd56f\",\n 5: \"#f4ae55\",\n 6: \"#b0a5fd\",\n 7: \"#addfe5\",\n 8: \"#f5ce6e\",\n 9: \"#c2726a\",\n 10: \"#94c849\",\n 11: \"#bd86e5\",\n 12: \"#ee7e4a\",\n 13: \"#a6dcbf\",\n 14: \"#95a5fd\",\n 15: \"#53a063\",\n 16: \"#9987e1\",\n 17: \"#e4523d\",\n 18: \"#c2c2c2\",\n 19: \"#4f8de4\",\n 20: \"#c6a8ad\",\n 21: \"#e7cc4d\",\n 22: \"#c8bebf\",\n 23: \"#a47462\",\n # start repeating\n 24: \"#76ce90\",\n 25: \"#fae589\",\n 26: \"#a6c7e5\",\n 27: \"#e79ab5\",\n 28: \"#bfd56f\",\n 29: \"#f4ae55\",\n },\n )\n\n color_map = {98: \"color98\", 99: \"color99\"}\n used_colors = set(STREAM_ASSIGNMENT_COLORS) - {\"#c6a8ad\", \"#9987e1\"}\n recipient_ids = [99, 98, 1, 2, 3, 4]\n user_color_map = pick_colors(used_colors, color_map, recipient_ids)\n self.assertEqual(\n user_color_map,\n {98: \"color98\", 99: \"color99\", 1: \"#9987e1\", 2: \"#c6a8ad\", 3: \"#e79ab5\", 4: \"#bfd56f\"},\n )\n\n \n used_colors = set(STREAM_ASSIGNMENT_COLORS)\n color_map = {}\n recipient_ids = [2, 26, 50, 74]\n user_color_map = pick_colors(used_colors, color_map, recipient_ids)\n self.assertEqual(\n user_color_map,\n {2: \"#a6c7e5\", 26: \"#a6c7e5\", 50: \"#a6c7e5\", 74: \"#a6c7e5\"},\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 832, "n_words": 157, "vocab_size": 106, "complexity": 1, "nloc": 70, "token_counts": 315, "n_ast_nodes": 520, "n_identifiers": 16, "random_cut": "def test_pick_colors(self) -> None:\n used_colors: Set[str] = set()\n color_map: Dict[int, str] = {}\n recipient_ids = list(range(30))\n user_color_map = pick_colors(used_colors, color_map, recipient_ids)\n self.assertEqual(\n user_color_map,\n {\n 0: \"#76ce90\",\n 1: \"#fae589\",\n 2: \"#a6c7e5\",\n 3: \"#e79ab5\",\n 4: \"#bfd56f\",\n 5: \"#f4ae55\",\n 6: \"#b0a5fd\",\n 7: \"#addfe5\",\n 8: \"#f5ce6e\",\n 9: \"#c2726a\",\n 10: \"#94c849\",\n 11: \"#bd86e5\",\n 12: \"#ee7e4a\",\n 13: \"#a6dcbf\",\n 14: \"#95a5fd\",\n 15: \"#53a063\",\n 16: \"#9987e1\",\n 17: \"#e4523d\",\n 18: \"#c2c2c2\",\n 19: \"#4f8de4\",\n 20: \"#c6a8ad\",\n 21: \"#e7cc4d\",\n 22: \"#c8bebf\",\n 23: \"#a47462\",\n # start repeating\n 24: \"#76ce90\",\n 25: \"#fae589\",\n 26: \"#a6c7e5\",\n 27: \"#e79ab5\",\n 28: \"#bfd56f\",\n 29: \"#f4ae55\",\n " }, { "id": 270852, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer_test.py", "file_name": "base_layer_test.py", "fun_name": "test_dynamic_layer_error_running_in_graph_mode", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_dynamic_layer_error_running_in_graph_mode(self):\n with tf.compat.v1.get_default_graph().as_default():\n model = test_utils.get_model_from_layers(\n [DynamicLayer(dynamic=True)], input_shape=(3,)\n )\n self.assertEqual(model.dynamic, True)\n # But then you cannot run the model since you're in a graph scope.\n with self.assertRaisesRegex(\n ValueError, \"You must enable eager execution\"\n ):\n model.compile(rmsprop.RMSprop(0.001), loss=\"mse\")\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 154, "n_words": 37, "vocab_size": 35, "complexity": 1, "nloc": 10, "token_counts": 79, "n_ast_nodes": 129, "n_identifiers": 20, "random_cut": "def test_dynamic_layer_error_running_in_graph_mode(self):\n with tf.compat.v1.get_default_graph().as_default():\n model = test_utils.get_model_fr" }, { "id": 206340, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/templatetags/tz.py", "file_name": "tz.py", "fun_name": "timezone_constructor", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def timezone_constructor(tzname):\n if settings.USE_DEPRECATED_PYTZ:\n import pytz\n\n try:\n return pytz.timezone(tzname)\n except pytz.UnknownTimeZoneError:\n raise UnknownTimezoneException\n try:\n return zoneinfo.ZoneInfo(tzname)\n except zoneinfo.ZoneInfoNotFoundError:\n raise UnknownTimezoneException\n\n\n# HACK: datetime instances cannot be assigned new attributes. Define a subclass\n# in order to define new attributes in do_timezone().", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 104, "n_words": 41, "vocab_size": 33, "complexity": 4, "nloc": 11, "token_counts": 44, "n_ast_nodes": 75, "n_identifiers": 11, "random_cut": "def timezone_constructor(tzname):\n if settings.USE_DEPRECATED_PYTZ:\n import pytz\n\n try:\n return pytz.timezone(tzname)\n except pytz.UnknownTimeZoneError:\n raise UnknownTimezone" }, { "id": 96286, "commit_id": "1bd1c98d520fb44c5e69f3159102cd8f07b84911", "repo": "sentry", "path": "tests/sentry/api/serializers/test_organization.py", "file_name": "test_organization.py", "fun_name": "test_trusted_relay_serializer", "commit_message": "feat(apidocs): type organization serializer (#31787)", "code": "def test_trusted_relay_serializer(self):\n completion_seen = timezone.now()\n serializer = OnboardingTasksSerializer()\n task = OrganizationOnboardingTask.objects.create(\n organization=self.organization,\n task=OnboardingTask.FIRST_PROJECT,\n status=OnboardingTaskStatus.PENDING,\n user=self.user,\n completion_seen=completion_seen,\n )\n\n result = serialize(task, self.user, serializer)\n assert result[\"task\"] == \"create_project\"\n assert result[\"status\"] == \"pending\"\n assert result[\"completionSeen\"] == completion_seen\n assert result[\"data\"] == {}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 155, "n_words": 38, "vocab_size": 28, "complexity": 1, "nloc": 15, "token_counts": 95, "n_ast_nodes": 153, "n_identifiers": 20, "random_cut": "def test_trusted_relay_serializer(self):\n completion_seen = timezone.now()\n serializer = OnboardingTasksSerializer()\n task = OrganizationOnboardingTask.objects.create(\n organization=self.organization,\n task" }, { "id": 189163, "commit_id": "1a6b498657ec5dd29ddf4f6b240c6fc0c5d88f7a", "repo": "aws-cli", "path": "tests/functional/eks/test_kubeconfig.py", "file_name": "test_kubeconfig.py", "fun_name": "test_load_noexist", "commit_message": "Deprecate Kubernetes client API version v1alpha1\n\nKubernetes has deprecated v1alpha1, v1beta1 has been available since Kubernetes\nv1.11 (kubernetes/kubernetes#64482), and EKS currently supports Kubernetes\nversions v1.16 through v1.21. This is a breaking change for clients running\nversions v1.10 and older, which haven't been supported by EKS since September\n2019.\n\n\"aws eks get-token\" now respects the KUBERNETES_EXEC_INFO environment\nvariable and conservatively falls back to v1alpha1, which is supported\nby Kubernetes versions 1.10 through 1.22 (released upstream August 2021, to be\nreleased by EKS in Q4 2021). It also now supports \"v1beta1\" and \"v1\".\n\n\"aws eks update-kubeconfig\" now writes \"v1beta1\" in the kubeconfig which\nwill be supported by Kubernetes until 1.29 (aproximately December 2023).\nAt or around that date, we can change the default version written to\nkubeconfigs to \"v1\"\n\nSigned-off-by: Micah Hausler ", "code": "def test_load_noexist(self):\n no_exist_path = os.path.join(self._temp_directory,\n \"this_does_not_exist\")\n loaded_config = self._loader.load_kubeconfig(no_exist_path)\n self.assertEqual(loaded_config.content,\n _get_new_kubeconfig_content())\n self._validator.validate_config.called_with(\n Kubeconfig(no_exist_path, _get_new_kubeconfig_content()))\n", "url": "https://github.com/aws/aws-cli.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 112, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 8, "token_counts": 58, "n_ast_nodes": 94, "n_identifiers": 17, "random_cut": "def test_load_noexist(self):\n no_exist_path = os.path.join(self._temp_directory,\n \"this_does_not_exist\")\n loaded_config = self._loader.load_kubeconfig(no_exist_" }, { "id": 90358, "commit_id": "34bb5f4c3909e671b6502eceeb849f9693d9794e", "repo": "sentry", "path": "tests/sentry/eventstream/kafka/test_consumer.py", "file_name": "test_consumer.py", "fun_name": "test_consumer_start_from_committed_offset", "commit_message": "ci: compile devservices args (#34891)", "code": "def test_consumer_start_from_committed_offset(requires_kafka):\n consumer_group = f\"consumer-{uuid.uuid1().hex}\"\n synchronize_commit_group = f\"consumer-{uuid.uuid1().hex}\"\n\n messages_delivered = defaultdict(list)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 19, "n_words": 11, "vocab_size": 8, "complexity": 7, "nloc": 58, "token_counts": 386, "n_ast_nodes": 61, "n_identifiers": 10, "random_cut": "def test_consumer_start_from_committed_offset(requires_kafka):\n consumer_group = f\"consumer-{uuid.uuid1().hex}\"\n synchronize_commit_group = f\"consumer-{uuid.uuid1().hex}\"\n\n messages_delivered = defaultdict(list)\n" }, { "id": 205486, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/expressions.py", "file_name": "expressions.py", "fun_name": "window_frame_start_end", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def window_frame_start_end(self, connection, start, end):\n raise NotImplementedError(\"Subclasses must implement window_frame_start_end().\")\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 25, "n_identifiers": 6, "random_cut": "def window_frame_start_end(self, connection, start, end):\n raise" }, { "id": 23359, "commit_id": "49ecf9c3bc9e6154360a84f402d8b669580b6dd3", "repo": "PaddleOCR", "path": "tools/program.py", "file_name": "program.py", "fun_name": "preprocess", "commit_message": "add use_xpu config for det_mv3_db.yml", "code": "def preprocess(is_train=False):\n FLAGS = ArgsParser().parse_args()\n profiler_options = FLAGS.profiler_options\n config = load_config(FLAGS.config)\n config = merge_config(config, FLAGS.opt)\n profile_dic = {\"profiler_options\": FLAGS.profiler_options}\n config = merge_config(config, profile_dic)\n\n if is_train:\n # save_config\n save_model_dir = config['Global']['save_model_dir']\n os.makedirs(save_model_dir, exist_ok=True)\n with open(os.path.join(save_model_dir, 'config.yml'), 'w') as f:\n yaml.dump(\n dict(config), f, default_flow_style=False, sort_keys=False)\n log_file = '{}/train.log'.format(save_model_dir)\n else:\n log_file = None\n logger = get_logger(name='root', log_file=log_file)\n\n # check if set use_gpu=True in paddlepaddle cpu version\n use_gpu = config['Global']['use_gpu']\n check_gpu(use_gpu)\n\n # check if set use_xpu=True in paddlepaddle cpu/gpu version\n use_xpu = False\n if 'use_xpu' in config['Global']:\n use_xpu = config['Global']['use_xpu']\n check_xpu(use_xpu)\n\n alg = config['Architecture']['algorithm']\n assert alg in [\n 'EAST', 'DB', 'SAST', 'Rosetta', 'CRNN', 'STARNet', 'RARE', 'SRN',\n 'CLS', 'PGNet', 'Distillation', 'NRTR', 'TableAttn', 'SAR', 'PSE',\n 'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM'\n ]\n\n device = 'cpu'\n if use_gpu:\n device = 'gpu:{}'.format(dist.ParallelEnv().dev_id)\n if use_xpu:\n device = 'xpu'\n device = paddle.set_device(device)\n\n config['Global']['distributed'] = dist.get_world_size() != 1\n\n if config['Global']['use_visualdl'] and dist.get_rank() == 0:\n from visualdl import LogWriter\n save_model_dir = config['Global']['save_model_dir']\n vdl_writer_path = '{}/vdl/'.format(save_model_dir)\n os.makedirs(vdl_writer_path, exist_ok=True)\n vdl_writer = LogWriter(logdir=vdl_writer_path)\n else:\n vdl_writer = None\n print_dict(config, logger)\n logger.info('train with paddle {} and device {}'.format(paddle.__version__,\n device))\n return config, device, logger, vdl_writer\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 472, "n_words": 175, "vocab_size": 119, "complexity": 7, "nloc": 48, "token_counts": 368, "n_ast_nodes": 646, "n_identifiers": 50, "random_cut": "def preprocess(is_train=False):\n FLAGS = ArgsParser().parse_args()\n profiler_options = FLAGS.profiler_options\n config = load_config(FLAGS.config)\n config = merge_config(config, FLAGS.opt)\n profile_dic = {\"profiler_options\": FLAGS.profiler_options}\n config = merge_config(config, profile_dic)\n\n if is_train:\n # save_config\n save_model_dir = config['Global']['save_model_dir']\n os.makedirs(save_model_dir, exist_ok=True)\n with open(os.path.join(save_model_dir, 'config.yml'), 'w') as f:\n yaml.dump(\n dict(config), f, default_flow_style=False, sort_keys=False)\n log_file = '{}/train.log'.format(save_model_dir)\n else:\n log_file = None\n logger = get_logger(name='root', log_file=log_file)\n\n # check if set use_gpu=True in paddlepaddle cpu version\n use_gpu = config['Global']['use_gpu']\n check_gpu(use_gpu)\n\n # check if set use_xpu=True in paddlepaddle cpu/gpu version\n use_xpu = False\n if 'use_xpu' in config['Global']:\n use_xpu = config['Global']['use_xpu']\n check_xpu(use_xpu)\n\n alg = config['Architecture']['algorithm']\n assert alg in [\n 'EAST', 'DB', 'SAST', 'Rosetta', 'CRNN', 'STARNet', 'RARE', 'SRN',\n 'CLS', 'PGNet', 'Distillation', 'NRTR', 'TableAttn', 'SAR', 'PSE',\n 'SEED', 'SDMGR', 'LayoutXLM', 'LayoutLM'\n ]\n\n device = 'cpu'\n if use_gpu:\n device = 'gpu:{}'.format(dist.ParallelEnv().dev_id)\n if use_xpu:\n device = 'xpu'\n device = paddle.set_device(device)\n\n config['Global']['distributed'] = dist.get_world_size() != 1\n\n if " }, { "id": 318241, "commit_id": "20fec104e2a11b1a5164d7fe779eb0d894e098cf", "repo": "core", "path": "homeassistant/components/iglo/light.py", "file_name": "light.py", "fun_name": "max_mireds", "commit_message": "Improve type hints in light [a-i] (#75936)\n\n* Improve type hints in ads light\r\n\r\n* Improve type hints in avea light\r\n\r\n* Improve type hints in avion light\r\n\r\n* Improve type hints in broadlink light\r\n\r\n* More type hints\r\n\r\n* One more", "code": "def max_mireds(self) -> int:\n \n return math.ceil(\n color_util.color_temperature_kelvin_to_mired(self._lamp.min_kelvin)\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 40, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 9, "random_cut": "def max_mireds(self) -> int:\n \n return math.ceil(\n color_util.color_temperature_kelvin_to_mired(self._la" }, { "id": 178643, "commit_id": "24bc8e9a4949b34772d2d2eb8342f3fd74ec3d5c", "repo": "Nuitka", "path": "nuitka/build/inline_copy/clcache/clcache/caching.py", "file_name": "caching.py", "fun_name": "getFileHash", "commit_message": "Scons: The clcache was hashing the same files over and over\n\n* This might now be dead code that we no longer use direct mode", "code": "def getFileHash(filePath, additionalData=None):\n key = (filePath, additionalData)\n\n if key in _hash_cache:\n return _hash_cache[key]\n\n hasher = HashAlgorithm()\n with open(filePath, \"rb\") as inFile:\n hasher.update(inFile.read())\n if additionalData is not None:\n # Encoding of this additional data does not really matter\n # as long as we keep it fixed, otherwise hashes change.\n # The string should fit into ASCII, so UTF8 should not change anything\n hasher.update(additionalData.encode(\"UTF-8\"))\n\n _hash_cache[key] = hasher.hexdigest()\n return _hash_cache[key]\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 129, "n_words": 67, "vocab_size": 53, "complexity": 3, "nloc": 11, "token_counts": 83, "n_ast_nodes": 140, "n_identifiers": 13, "random_cut": "def getFileHash(filePath, additionalData=None):\n key = (filePath, additionalData)\n\n if key in _hash_cache:\n return _hash_cache[key]\n\n hasher = HashAlgorithm()\n with open(filePath, \"rb\") as inFile:\n hasher.update(inFile.read())\n if additionalData is not None:\n # Encod" }, { "id": 132529, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/tests/test_logger.py", "file_name": "test_logger.py", "fun_name": "_validate_json_result", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _validate_json_result(self, config):\n # Check result logs\n results = []\n result_file = os.path.join(self.test_dir, EXPR_RESULT_FILE)\n with open(result_file, \"rt\") as fp:\n for row in fp.readlines():\n results.append(json.loads(row))\n\n self.assertEqual(len(results), 3)\n self.assertSequenceEqual(\n [int(row[\"episode_reward_mean\"]) for row in results], [4, 5, 6]\n )\n\n # Check json saved config file\n config_file = os.path.join(self.test_dir, EXPR_PARAM_FILE)\n with open(config_file, \"rt\") as fp:\n loaded_config = json.load(fp)\n\n self.assertEqual(loaded_config, config)\n\n # Check pickled config file\n config_file = os.path.join(self.test_dir, EXPR_PARAM_PICKLE_FILE)\n with open(config_file, \"rb\") as fp:\n loaded_config = cloudpickle.load(fp)\n\n self.assertEqual(loaded_config, config)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 238, "n_words": 75, "vocab_size": 47, "complexity": 3, "nloc": 18, "token_counts": 172, "n_ast_nodes": 281, "n_identifiers": 27, "random_cut": "def _validate_json_result(self, config):\n # Check result logs\n results = []\n result_file = os.path.join(self.test_dir, EXPR_RESULT_FILE)\n with open(result_file, \"rt\") as fp:\n for row in fp.readlines():\n results.append(json.loads(row))\n\n self.assertEqual(len(results), 3)\n self.assertSequenceEqual(\n [int(row[\"episode_reward_mean\"]) for row in results], [4, 5, 6]\n )\n\n # Check json saved config file\n config_file = os.path.join(self.test_dir, EXPR_PARAM_FILE)\n with open(config_file, \"rt\") as fp:\n loaded_con" }, { "id": 269989, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks_test.py", "file_name": "callbacks_test.py", "fun_name": "test_TensorBoard_autoTrace_profileBatchRange", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_TensorBoard_autoTrace_profileBatchRange(self):\n model = self._get_seq_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir,\n histogram_freq=1,\n profile_batch=\"1,3\",\n write_graph=False,\n )\n\n model.fit(\n x,\n y,\n batch_size=4,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk],\n )\n summary_file = list_summaries(self.logdir)\n\n self.assertEqual(\n summary_file.tensors,\n {\n # Trace will be logged once at the batch it stops profiling.\n _ObservedSummary(logdir=self.train_dir, tag=\"batch_3\"),\n },\n )\n self.assertEqual(1, self._count_trace_file(logdir=self.logdir))\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 297, "n_words": 55, "vocab_size": 47, "complexity": 1, "nloc": 25, "token_counts": 147, "n_ast_nodes": 218, "n_identifiers": 28, "random_cut": "def test_TensorBoard_autoTrace_profileBatchRange(self):\n model = self._get_seq_model()\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir,\n histogram_freq=1,\n profile_batch=\"1,3\",\n write_graph=False,\n )\n\n model.fit(\n x,\n y,\n batch_size=4,\n epochs=2,\n validation_data=(x, y),\n callbacks=[tb_cbk],\n )\n summary_file = list_summaries(self.logdir)\n\n self.assertEqual(\n summary_file.tensors,\n {\n # Trace will be logged once at the batch it stops profiling.\n _ObservedSummary(logdir=self.train_dir, tag=\"batch_3\"),\n },\n )\n self.assertEqual(1, self._count_trace_file(logdir=sel" }, { "id": 288007, "commit_id": "b043a6ba887e8f925cfa97f3edf66b6f6d7fe4af", "repo": "core", "path": "tests/components/forked_daapd/test_browse_media.py", "file_name": "test_browse_media.py", "fun_name": "test_async_browse_image_missing", "commit_message": "Cleanup add browse media forked daapd #79009 (#79157)", "code": "async def test_async_browse_image_missing(hass, hass_client, config_entry, caplog):\n \n\n with patch(\n \"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI\",\n autospec=True,\n ) as mock_api:\n config_entry.add_to_hass(hass)\n await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n client = await hass_client()\n mock_api.return_value.full_url = lambda x: \"http://owntone_instance/\" + x\n mock_api.return_value.get_track.return_value = {}\n\n media_content_id = create_media_content_id(\n \"title\", media_type=MediaType.TRACK, id_or_path=\"456\"\n )\n resp = await client.get(\n f\"/api/media_player_proxy/{TEST_MASTER_ENTITY_NAME}/browse_media/{MediaType.TRACK}/{media_content_id}\"\n )\n assert resp.status == HTTPStatus.INTERNAL_SERVER_ERROR\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 171, "n_words": 49, "vocab_size": 40, "complexity": 1, "nloc": 18, "token_counts": 110, "n_ast_nodes": 202, "n_identifiers": 30, "random_cut": "async def test_async_browse_image_missing(hass, hass_client, config_entry, caplog):\n \n\n with patch(\n \"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI\",\n autospec=True,\n ) as mock_api:\n config_entry.add_to_hass(hass)\n await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_don" }, { "id": 166461, "commit_id": "7c054d6a256fd0186befe03acf9e9e86d81668d6", "repo": "pandas", "path": "pandas/tests/groupby/transform/test_transform.py", "file_name": "test_transform.py", "fun_name": "test_transform_axis_1_reducer", "commit_message": "DEPR: groupby numeric_only default (#47025)", "code": "def test_transform_axis_1_reducer(request, reduction_func):\n # GH#45715\n if reduction_func in (\n \"corrwith\",\n \"idxmax\",\n \"idxmin\",\n \"ngroup\",\n \"nth\",\n ):\n marker = pytest.mark.xfail(reason=\"transform incorrectly fails - GH#45986\")\n request.node.add_marker(marker)\n if reduction_func == \"mad\":\n warn = FutureWarning\n msg = \"The 'mad' method is deprecated\"\n elif reduction_func in (\"sem\", \"std\"):\n warn = FutureWarning\n msg = \"The default value of numeric_only\"\n else:\n warn = None\n msg = \"\"\n\n df = DataFrame({\"a\": [1, 2], \"b\": [3, 4], \"c\": [5, 6]}, index=[\"x\", \"y\"])\n with tm.assert_produces_warning(warn, match=msg):\n result = df.groupby([0, 0, 1], axis=1).transform(reduction_func)\n if reduction_func == \"size\":\n # size doesn't behave in the same manner; hardcode expected result\n expected = DataFrame(2 * [[2, 2, 1]], index=df.index, columns=df.columns)\n else:\n warn = FutureWarning if reduction_func == \"mad\" else None\n with tm.assert_produces_warning(warn, match=\"The 'mad' method is deprecated\"):\n expected = df.T.groupby([0, 0, 1]).transform(reduction_func).T\n tm.assert_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 298, "n_words": 129, "vocab_size": 87, "complexity": 6, "nloc": 29, "token_counts": 237, "n_ast_nodes": 394, "n_identifiers": 27, "random_cut": "def test_transform_axis_1_reducer(request, reduction_func):\n # GH#45715\n if reduction_func in (\n \"corrwith\",\n \"idxmax\",\n \"idxmin\",\n \"ngroup\",\n \"nth\",\n ):\n marker = pytest.mark.xfail(reason=\"transform incorrectly fails - GH#45986\")\n request.node.add_marker(marker)\n if reduction_func == \"mad\":\n warn = Future" }, { "id": 160658, "commit_id": "d7e2582cd33b22a767286e8a3d95b336dfe51a34", "repo": "numpy", "path": "numpy/lib/tests/test_arraysetops.py", "file_name": "test_arraysetops.py", "fun_name": "test_in1d_hit_alternate_algorithm", "commit_message": "MAINT: bool instead of np.bool_ dtype", "code": "def test_in1d_hit_alternate_algorithm(self):\n \n # Need extreme range to hit standard code\n # This hits it without the use of method='dictionary'\n a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64)\n b = np.array([2, 3, 4, 1e9], dtype=np.int64)\n expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool)\n assert_array_equal(expected, in1d(a, b))\n assert_array_equal(np.invert(expected), in1d(a, b, invert=True))\n\n a = np.array([5, 7, 1, 2], dtype=np.int64)\n b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64)\n ec = np.array([True, False, True, True])\n c = in1d(a, b, assume_unique=True)\n assert_array_equal(c, ec)\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 173, "n_words": 82, "vocab_size": 51, "complexity": 1, "nloc": 11, "token_counts": 195, "n_ast_nodes": 271, "n_identifiers": 16, "random_cut": "def test_in1d_hit_alternate_algorithm(self):\n \n # Need extreme range to hit standard code\n # This hits it without the use of method='dictionary'\n a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64)\n b = np.array([2, 3, 4, 1e9], dtype=np.int64)\n expected = np.arra" }, { "id": 131453, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_component_failures.py", "file_name": "test_component_failures.py", "fun_name": "test_dying_driver_wait", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_dying_driver_wait(ray_start_regular):\n # Start the Ray processes.\n address_info = ray_start_regular\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 15, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 24, "token_counts": 145, "n_ast_nodes": 15, "n_identifiers": 3, "random_cut": "def test_dying_driver_wait(ray_start_regular):\n # Start" }, { "id": 29529, "commit_id": "67df28935c555fdd673f17e8c9183e24dde7c51f", "repo": "saleor", "path": "saleor/tax/migrations/0002_add_default_tax_configs.py", "file_name": "0002_add_default_tax_configs.py", "fun_name": "add_tax_configuration_for_channels", "commit_message": "Simple (flat rate) taxes API (#9784)\n\n* Add empty tax module\r\n\r\n* Add tax models (#9839)\r\n\r\n* Add tax API queries (#9856)\r\n\r\n* Add MANAGE_TAXES permission\r\n\r\n* Add tax configuration queries\r\n\r\n* Create tax configuration when channel is created\r\n\r\n* Drop sorters for now\r\n\r\n* Add TaxConfigurationPerCountry type\r\n\r\n* Update migration\r\n\r\n* Add metadata to TaxConfiguration type\r\n\r\n* Add tests for tax configuration queries\r\n\r\n* Add TaxClass types\r\n\r\n* Improve tests\r\n\r\n* Add queries for tax configuration per country\r\n\r\n* Fix query in tests\r\n\r\n* Update query cost map\r\n\r\n* Add tax API mutations (#9934)\r\n\r\n* Add taxConfigurationUpdate mutation\r\n\r\n* Update schema\r\n\r\n* Add tax class CRUD mutations\r\n\r\n* Add mutations to update/delete tax class rates per country\r\n\r\n* Review fixes\r\n\r\n* Add taxClass field to ProductType type (#9999)\r\n\r\n* Add taxClass field to ProductType type\r\n\r\n* Add taxClass field to Product type\r\n\r\n* Add taxClass field to shipping method type\r\n\r\n* Add displayGrossPrices to ProductPricingInfo (#10008)\r\n\r\n* Add displayGrossPrices to ProductPricingInfo\r\n\r\n* Add displayGrossPrices to Checkout\r\n\r\n* Add displayGrossPrices to Order\r\n\r\n* Add tests\r\n\r\n* Add ADDED_IN_35 label to new fields' descriptions\r\n\r\n* Use new display_gross_prices flag (#10121)\r\n\r\n* Use new display_gross_prices flag\r\n\r\n* Update tests\r\n\r\n* Add tests\r\n\r\n* Review fixes\r\n\r\n* Drop Vatlayer (#10335)\r\n\r\n* Add migration from Vatlayer to simple taxes\r\n\r\n* Review fixes\r\n\r\n* Review fixes\r\n\r\n* Drop usages of global include_taxes_in_prices flag (#10406)\r\n\r\n* Drop `include_taxes_in_prices` function from site settings\r\n\r\n* Adjust tests\r\n\r\n* Review fixes\r\n\r\n* Drop the `charge_taxes_on_shipping` flag from site settings. (#10466)\r\n\r\n* Include migrating Avatax tax codes in tax class migration\r\n\r\n* Drop `charge_taxes_on_shipping` function\r\n\r\n* Add tax_class to ShippingMethodData\r\n\r\n* Review fixes\r\n\r\n* Always calculate shipping tax with Avalara\r\n\r\n* Add default country rate (#10497)\r\n\r\n* Allow setting default tax rate for a country (without providing a tax class)\r\n\r\n* Add validation to allow settings only one default rate at once\r\n\r\n* Code review fixes\r\n\r\n* Add taxCalculationStrategy field\r\n\r\n* Add tests\r\n\r\n* CR fixes\r\n\r\n* Adjust resolver to use new tax configuration (#10533)\r\n\r\n* CR fixes\r\n\r\n* Add database router to fix false positives on relation mismatch. (#10524)\r\n\r\n* Add database router to fix false positives on relation mismatch.\r\n\r\n* The db router should have only 'allow_relation' implemented.\r\n\r\n* The 'db_for_write' part should stay.\r\n\r\n* Subscription for sync tax webooks (#10433)\r\n\r\n* Add proposed changes to schema\r\n\r\n* Add base implementation for sync tax subscription\r\n\r\n* Add test for empty order\r\n\r\n* Add clean up and missing part for tests\r\n\r\n* Use subscription for tax webhooks. Add more tests\r\n\r\n* Improve descriptions for tax objects\r\n\r\n* Adjust resolver to use new tax configuration (#10533)\r\n\r\n* Add taxCalculationStrategy field (#10532)\r\n\r\n* Add taxCalculationStrategy field\r\n\r\n* Add tests\r\n\r\n* CR fixes\r\n\r\n* CR fixes\r\n\r\n* Add datamigration to populate taxCalculationStrategy\r\n\r\n* Migrate Product.charge_taxes to new tax configuration (#10585)\r\n\r\n* Migrate Product.charge_taxes field to new tax configuration\r\n\r\n* Rename function\r\n\r\n* Fix tests\r\n\r\n* Change assign_tax_code_to_object_meta function to support tax classes\r\n\r\n* Update tax class fixtures\r\n\r\n* Improve dataloader\r\n\r\n* CR fixes\r\n\r\n* CR fixes\r\n\r\n* Add deprecation notice to dataloader\r\n\r\n* Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647)\r\n\r\n* Allow deleting rates in taxCountryConfigurationUpdate mutation\r\n\r\n* Change tax rates ordering to keep default rates first (with null tax classes)\r\n\r\n* Update existing migration\r\n\r\n* Remove TaxClass.is_default field (#10660)\r\n\r\n* Change tax rates ordering to keep default rates first (with null tax classes)\r\n\r\n* Update existing migration\r\n\r\n* Drop is_default field from TaxClass model\r\n\r\n* Drop extra Avalara config (#10673)\r\n\r\n* Drop extra Avatax config options\r\n\r\n* Adjust tests\r\n\r\n* Use flat rates in tax calculations (#10747)\r\n\r\n* WIP Use new tax configuration in tax calculations\r\n\r\n* Use new tax calculations for checkout\r\n\r\n* Adjust tests\r\n\r\n* Add flat rates calculations for checkout and order\r\n\r\n* Calculate flat rates in product pricing objects\r\n\r\n* Adjust tests\r\n\r\n* Add tests for order calculations\r\n\r\n* Add tests for product queries tax calculations\r\n\r\n* Add tests for order calculations\r\n\r\n* Use base calculations to get default checkout shipping price\r\n\r\n* Add tests for using tax_class from product_type\r\n\r\n* Add tests for get_order_country\r\n\r\n* Adjust tests\r\n\r\n* Code review fixes\r\n\r\n* Drop update_taxes_for_order_lines (#11000)\r\n\r\n* Fix calls to Avalara not validating order (#11012)\r\n\r\n* Add validation to disallow creating negative rates (#11010)\r\n\r\n* Add missing recalculation of order.undiscounted_total (#11039)\r\n\r\n* Optimize getting tax class country rates (#11040)\r\n\r\n* Tax API adjustments for dashboard (#11042)\r\n\r\n* Ignore null rates in taxCountryConfigurationUpdate mutation\r\n\r\n* Allow to pass null rates in taxClassUpdate mutation\r\n\r\n* Improve tests\r\n\r\n* Update saleor/graphql/tax/mutations/tax_class_update.py\r\n\r\nCo-authored-by: Krzysztof Waliczek \r\n\r\n* Update schema\r\n\r\nCo-authored-by: Krzysztof Waliczek \r\n\r\n* Cleanup before release (#11049)\r\n\r\n* Update ADDED_IN labels\r\n\r\n* Fix skippeded test\r\n\r\n* Regenerate migrations\r\n\r\n* Deprecate CountryDisplay.vat field\r\n\r\n* Add changelog\r\n\r\n* Update order.undiscounted_total calculation to not include taxes (#11068)\r\n\r\n* Fix assigning rates to tax classes (#11105)\r\n\r\n* Allow all staff users and apps to query tax-related data (#11113)\r\n\r\n* Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127)\r\n\r\nBumps:\r\n- cryptography to 38.0.3\r\n- pillow to 9.3.0\r\n\r\n* Fix using tax code from product and product type's tax class (#11111)\r\n\r\n* Fix using tax code from product and product type's tax class\r\n\r\n* Extract function\r\n\r\n* Replace synchronous load_site with promise (#11165)\r\n\r\n* Denormalize tax class for order lines and orders (#11172)\r\n\r\n* WIP Denormalize tax class for order lines and orders\r\n\r\n* Add denormalized fields in GraphQL types\r\n\r\n* Add tests for denormalized API fields\r\n\r\n* Return 0 rate in API when rate is null\r\n\r\n* Add preview/version notes in new field descriptions\r\n\r\n* Update changelog\r\n\r\nCo-authored-by: Dominik Kozaczko \r\nCo-authored-by: Maciej Korycinski \r\nCo-authored-by: Krzysztof Waliczek \r\nCo-authored-by: Mika <6186720+NyanKiyoshi@users.noreply.github.com>\r\nCo-authored-by: Krzysztof Kwaśniak ", "code": "def add_tax_configuration_for_channels(apps, schema_editor):\n Channel = apps.get_model(\"channel\", \"Channel\")\n TaxConfiguration = apps.get_model(\"tax\", \"TaxConfiguration\")\n SiteSettings = apps.get_model(\"site\", \"SiteSettings\")\n\n site_settings = SiteSettings.objects.first()\n\n tax_configurations = []\n for channel in Channel.objects.all():\n tax_configurations.append(\n TaxConfiguration(\n channel=channel,\n charge_taxes=True,\n display_gross_prices=site_settings.display_gross_prices,\n prices_entered_with_tax=site_settings.include_taxes_in_prices,\n )\n )\n TaxConfiguration.objects.bulk_create(tax_configurations)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 150, "n_words": 34, "vocab_size": 29, "complexity": 2, "nloc": 16, "token_counts": 97, "n_ast_nodes": 160, "n_identifiers": 19, "random_cut": "def add_tax_configuration_for_channels(apps, schema_editor):\n Channel = apps.get_model(\"channel\", \"Channel\")\n TaxConfiguration = apps.get_model(\"tax\", \"TaxConfiguration\")\n SiteSettings = apps.get_model(\"site\", \"SiteSettings\")\n\n site_settings = SiteSettings.objects.first()\n\n tax_configurations = []\n fo" }, { "id": 204592, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/base.py", "file_name": "base.py", "fun_name": "handle_default_options", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def handle_default_options(options):\n \n if options.settings:\n os.environ[\"DJANGO_SETTINGS_MODULE\"] = options.settings\n if options.pythonpath:\n sys.path.insert(0, options.pythonpath)\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 11, "vocab_size": 10, "complexity": 3, "nloc": 5, "token_counts": 38, "n_ast_nodes": 64, "n_identifiers": 9, "random_cut": "def handle_default_options(options):\n \n if options.set" }, { "id": 138505, "commit_id": "9ee24530abf1b5e3239869b5257dd7b678337b90", "repo": "ray", "path": "python/ray/data/impl/plan.py", "file_name": "plan.py", "fun_name": "is_read_stage", "commit_message": "[Datasets] [Out-of-Band Serialization: 2/3] Refactor `ExecutionPlan` to maintain complete lineage and eagerly unlink block references. (#23931)\n\nThis PR refactors ExecutionPlan to maintain complete stage lineage, even for eagerly computed datasets, while ensuring that block references are unlinked as early as possible in order to more eagerly release block memory. This PR is the final precursor to adding the actual out-of-band serialization APIs (PR 3/3).\r\n\r\nThe fully lineage has to be maintained, even for eagerly computed datasets, since the lineage is needed for out-of-band serialization of datasets.", "code": "def is_read_stage(self) -> bool:\n \n return (\n self.has_lazy_input()\n and not self._stages_before_snapshot\n and not self._stages_after_snapshot\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 68, "n_words": 14, "vocab_size": 12, "complexity": 3, "nloc": 7, "token_counts": 26, "n_ast_nodes": 45, "n_identifiers": 6, "random_cut": "def is_read_stage(self) -> bool:\n \n return (\n sel" }, { "id": 272547, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/core/einsum_dense_test.py", "file_name": "einsum_dense_test.py", "fun_name": "test_unspecified_output_dim_fails", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_unspecified_output_dim_fails(self):\n input_tensor = keras.Input(shape=(32,))\n layer = einsum_dense.EinsumDense(equation=\"ab,bc->cd\", output_shape=64)\n with self.assertRaisesRegex(\n ValueError,\n \".*Dimension 'd' was specified in the output 'cd' but has \"\n \"no corresponding dim.*\",\n ):\n _ = layer(input_tensor)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 101, "n_words": 30, "vocab_size": 28, "complexity": 1, "nloc": 9, "token_counts": 50, "n_ast_nodes": 86, "n_identifiers": 14, "random_cut": "def test_unspecified_output_dim_fails(self):\n input_tensor = keras.Input(shape=(32,))\n layer = einsum_dense.EinsumDense(equation=\"ab,bc->cd\", output_shape=64)\n with self.assertRaisesRegex(\n ValueError,\n \".*Dimension 'd' was specified in the outp" }, { "id": 67992, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/utils.py", "file_name": "utils.py", "fun_name": "get_incoming_rate", "commit_message": "style: format code with black", "code": "def get_incoming_rate(args, raise_error_if_no_rate=True):\n\t\n\tfrom erpnext.stock.stock_ledger import (\n\t\tget_batch_incoming_rate,\n\t\tget_previous_sle,\n\t\tget_valuation_rate,\n\t)\n\n\tif isinstance(args, str):\n\t\targs = json.loads(args)\n\n\tvoucher_no = args.get(\"voucher_no\") or args.get(\"name\")\n\n\tin_rate = None\n\tif (args.get(\"serial_no\") or \"\").strip():\n\t\tin_rate = get_avg_purchase_rate(args.get(\"serial_no\"))\n\telif args.get(\"batch_no\") and frappe.db.get_value(\n\t\t\"Batch\", args.get(\"batch_no\"), \"use_batchwise_valuation\", cache=True\n\t):\n\t\tin_rate = get_batch_incoming_rate(\n\t\t\titem_code=args.get(\"item_code\"),\n\t\t\twarehouse=args.get(\"warehouse\"),\n\t\t\tbatch_no=args.get(\"batch_no\"),\n\t\t\tposting_date=args.get(\"posting_date\"),\n\t\t\tposting_time=args.get(\"posting_time\"),\n\t\t)\n\telse:\n\t\tvaluation_method = get_valuation_method(args.get(\"item_code\"))\n\t\tprevious_sle = get_previous_sle(args)\n\t\tif valuation_method in (\"FIFO\", \"LIFO\"):\n\t\t\tif previous_sle:\n\t\t\t\tprevious_stock_queue = json.loads(previous_sle.get(\"stock_queue\", \"[]\") or \"[]\")\n\t\t\t\tin_rate = (\n\t\t\t\t\t_get_fifo_lifo_rate(previous_stock_queue, args.get(\"qty\") or 0, valuation_method)\n\t\t\t\t\tif previous_stock_queue\n\t\t\t\t\telse 0\n\t\t\t\t)\n\t\telif valuation_method == \"Moving Average\":\n\t\t\tin_rate = previous_sle.get(\"valuation_rate\") or 0\n\n\tif in_rate is None:\n\t\tin_rate = get_valuation_rate(\n\t\t\targs.get(\"item_code\"),\n\t\t\targs.get(\"warehouse\"),\n\t\t\targs.get(\"voucher_type\"),\n\t\t\tvoucher_no,\n\t\t\targs.get(\"allow_zero_valuation\"),\n\t\t\tcurrency=erpnext.get_company_currency(args.get(\"company\")),\n\t\t\tcompany=args.get(\"company\"),\n\t\t\traise_error_if_no_rate=raise_error_if_no_rate,\n\t\t\tbatch_no=args.get(\"batch_no\"),\n\t\t)\n\n\treturn flt(in_rate)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 64, "n_words": 112, "vocab_size": 76, "complexity": 15, "nloc": 48, "token_counts": 333, "n_ast_nodes": 566, "n_identifiers": 36, "random_cut": "def get_incoming_rate(args, raise_error_if_no_rate=True):\n\t\n\tfrom erpnext.stock.stock_ledger import (\n\t\tget_batch_incoming_rate,\n\t\tget_previous_sle,\n\t\tget_valuation_rate,\n\t)\n\n\tif isinstance(args, str):\n\t\targs = json.loads(args)\n\n\tvoucher_no = args.get(\"voucher_no\") or args.get(\"name\")\n\n\tin_rate = None\n\tif (args.get(\"serial_no\") or \"\").strip():\n\t\tin_rate = get_avg_purchase_rate(args.get(\"serial_no\"))\n\telif args.get(\"batch_no\") and frappe.db.get_value(\n\t\t\"Batch\", args.get(\"batch_no\"), \"use_batchwise_valuation\", cache=True\n\t):\n\t\tin_rate = get_batch_incoming_rate(\n\t\t\titem_code=args.get(\"item_code\"),\n\t\t\twarehouse=args.get(\"warehouse\"),\n\t\t\tbatch_no=args.get(\"batch_no\"),\n\t\t\tposting_date=args.get(\"posting_date\"),\n\t\t\tposting_time=args.get(\"posting_time\"),\n\t\t)\n\telse:\n\t\tvaluation_method = get_valuation_method(args.get(\"item_code\"))\n\t\tprevious_sle = get_previous_sle(args)\n\t\tif valuation_method in (\"FIFO\", \"LIFO\"):\n\t\t\tif previous_sle:\n\t\t\t\tprevious_stock_queue = json.loads(previous_sle.get(\"stock_queue\", \"[]\") or \"[]\")\n\t\t\t\tin_rate = (\n\t\t\t\t\t_get_fifo_lifo_rate(previous_stock_queue, args.get(\"qty\") or 0, valuation_method)\n\t\t\t\t" }, { "id": 269928, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks.py", "file_name": "callbacks.py", "fun_name": "_push_writer", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _push_writer(self, writer, step):\n \n if self.update_freq == \"epoch\":\n return\n\n should_record = lambda: tf.equal(step % self.update_freq, 0)\n # TODO(b/151339474): Fix deadlock when not using .value() here.\n summary_context = (\n writer.as_default(step.value()),\n tf.summary.record_if(should_record),\n )\n self._prev_summary_state.append(summary_context)\n summary_context[0].__enter__()\n summary_context[1].__enter__()\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 130, "n_words": 34, "vocab_size": 33, "complexity": 2, "nloc": 11, "token_counts": 82, "n_ast_nodes": 133, "n_identifiers": 16, "random_cut": "def _push_writer(self, writer, step):\n \n if self.update_freq == \"epoch\":\n return\n\n should_record = lambda: tf.equal(step % self.update_freq, 0)\n # TO" }, { "id": 137072, "commit_id": "1216d5d2d39556895b43e4a6dd8dd7825c3acd30", "repo": "ray", "path": "doc/source/ray-core/doc_code/scheduling.py", "file_name": "scheduling.py", "fun_name": "small_object_func", "commit_message": "[Doc] Revamp ray core scheduling doc (#30675)", "code": "def small_object_func():\n # Small object is returned inline directly to the caller,\n # instead of storing in the distributed memory.\n return [1]\n\n\n@ray.remote", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@ray.remote", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 30, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 23, "n_identifiers": 3, "random_cut": "def small_object_func():\n # Small object is returned inline directly to " }, { "id": 87487, "commit_id": "8d05e775c1f46209ec515d5267dc9c74ab51a106", "repo": "sentry", "path": "tests/acceptance/test_performance_issues.py", "file_name": "test_performance_issues.py", "fun_name": "test_with_one_performance_issue", "commit_message": "test(perf-issues): Add more E2E tests (#40723)\n\n- one test verifies that similar events are grouped\r\n- one test verifies that dissimilar events are not grouped", "code": "def test_with_one_performance_issue(self, mock_now):\n mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(minutes=5)\n event_data = self.create_sample_event(mock_now.return_value.timestamp())\n\n with self.feature(FEATURES):\n event = self.store_event(data=event_data, project_id=self.project.id)\n\n self.page.visit_issue(self.org.slug, event.groups[0].id)\n self.browser.click('[aria-label=\"Show Details\"]')\n\n self.browser.snapshot(\"performance issue details\", desktop_only=True)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 89, "n_words": 25, "vocab_size": 23, "complexity": 1, "nloc": 8, "token_counts": 113, "n_ast_nodes": 182, "n_identifiers": 32, "random_cut": "def test_with_one_performance_issue(self, mock_now):\n mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(minutes=5)\n event_data = self.create_sample_event(mock_now.return_value.timestamp())\n\n with self.feature(FEATURES):\n event = self.store_event(data=event_data, project_id=self.project.id)\n\n self.page.visit_issue(self.org.s" }, { "id": 256459, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "test/test_modeling_question_answering.py", "file_name": "test_modeling_question_answering.py", "fun_name": "test_inference_different_inputs", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_inference_different_inputs(bert_base_squad2):\n qa_format_1 = [\n {\n \"questions\": [\"Who counted the game among the best ever made?\"],\n \"text\": \"Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.\",\n }\n ]\n q = Question(text=\"Who counted the game among the best ever made?\")\n qa_format_2 = QAInput(\n questions=[q],\n doc_text=\"Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.\",\n )\n\n result1 = bert_base_squad2.inference_from_dicts(dicts=qa_format_1)\n result2 = bert_base_squad2.inference_from_objects(objects=[qa_format_2])\n assert result1 == result2\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 270, "n_words": 197, "vocab_size": 89, "complexity": 1, "nloc": 15, "token_counts": 70, "n_ast_nodes": 120, "n_identifiers": 16, "random_cut": "def test_inference_different_inputs(bert_base_squad2):\n qa_format_1 = [\n {\n \"questions\": [\"Who counted the game among the best ever made?\"],\n \"text\": \"Twilight Pri" }, { "id": 266028, "commit_id": "ad40d42dc467940b27021104a8beaee3cce1afaa", "repo": "netbox", "path": "netbox/wireless/tests/test_filtersets.py", "file_name": "test_filtersets.py", "fun_name": "test_status", "commit_message": "Closes #10710: Add status field to WirelessLAN", "code": "def test_status(self):\n params = {'status': [WirelessLANStatusChoices.STATUS_ACTIVE, WirelessLANStatusChoices.STATUS_DISABLED]}\n self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 43, "n_ast_nodes": 68, "n_identifiers": 11, "random_cut": "def test_status(self):\n params = {'status': [WirelessLANStatusChoices.STATUS_ACTIVE, WirelessLANStatusChoices.STATUS_DISABLED]}\n self.assertEqual(self.filterset(params, self.queryset).q" }, { "id": 48634, "commit_id": "ad282da97cf7b23c50a8fa7b7c5cad68c1deedc3", "repo": "django-rest-framework", "path": "rest_framework/request.py", "file_name": "request.py", "fun_name": "is_form_media_type", "commit_message": "Replaced parse_header with parse_header_parameters. (#8556)\n\nAdd a backwards compatibility shim for Django versions that have no (or an incompatible)\r\ndjango.utils.http.parse_header_parameters implementation.\r\n\r\nThanks to Shai Berger for review. \r\n\r\nCo-authored-by: Jaap Roes ", "code": "def is_form_media_type(media_type):\n \n base_media_type, params = parse_header_parameters(media_type)\n return (base_media_type == 'application/x-www-form-urlencoded' or\n base_media_type == 'multipart/form-data')\n\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 34, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 24, "n_ast_nodes": 45, "n_identifiers": 5, "random_cut": "def is_form_media_type(media_type):\n \n base_media_type, params = parse_header_parameters(media_type)\n return (base_media_type == 'application/x-www-form-urlencoded' or\n base_media_type == 'multipart/form-data')\n\n" }, { "id": 337010, "commit_id": "7265dd8cc82e41624b4a979a22f0d15dba55e956", "repo": "diffusers", "path": "src/diffusers/models/resnet.py", "file_name": "resnet.py", "fun_name": "downsample_2d", "commit_message": "renamed x to meaningful variable in resnet.py (#677)\n\n* renamed single letter variables\r\n\r\n* renamed x to meaningful variable in resnet.py\r\n\r\nHello @patil-suraj can you verify it\r\nThanks\r\n\r\n* Reformatted using black\r\n\r\n* renamed x to meaningful variable in resnet.py\r\n\r\nHello @patil-suraj can you verify it\r\nThanks\r\n\r\n* reformatted the files\r\n\r\n* modified unboundlocalerror in line 374\r\n\r\n* removed referenced before error\r\n\r\n* renamed single variable x -> hidden_state, p-> pad_value\r\n\r\nCo-authored-by: Nikhil A V \r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Suraj Patil ", "code": "def downsample_2d(hidden_states, kernel=None, factor=2, gain=1):\n r\n\n assert isinstance(factor, int) and factor >= 1\n if kernel is None:\n kernel = [1] * factor\n\n kernel = torch.tensor(kernel, dtype=torch.float32)\n if kernel.ndim == 1:\n kernel = torch.outer(kernel, kernel)\n kernel /= torch.sum(kernel)\n\n kernel = kernel * gain\n pad_value = kernel.shape[0] - factor\n return upfirdn2d_native(\n hidden_states, kernel.to(device=hidden_states.device), down=factor, pad=((pad_value + 1) // 2, pad_value // 2)\n )\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 111, "n_words": 61, "vocab_size": 45, "complexity": 4, "nloc": 29, "token_counts": 127, "n_ast_nodes": 197, "n_identifiers": 21, "random_cut": "def downsample_2d(hidden_states, kernel=None, factor=2, gain=1):\n r\n\n assert isinstance(factor, int) and factor >= 1\n if kernel is None:\n " }, { "id": 49724, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/cn_clip/clip/bert_tokenizer.py", "file_name": "bert_tokenizer.py", "fun_name": "tokenize", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n\n return split_tokens\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 17, "vocab_size": 14, "complexity": 3, "nloc": 6, "token_counts": 43, "n_ast_nodes": 67, "n_identifiers": 9, "random_cut": "def tokenize(self, text):\n split_tokens = []\n for token in self.basic_tokenizer.tokenize(text):\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n split_tokens.append(sub_token)\n\n return split_tokens\n" }, { "id": 255603, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/shape_inference_test.py", "file_name": "shape_inference_test.py", "fun_name": "test_negative_log_likehood_shape_is_NCd1d2_reduction_sum", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_negative_log_likehood_shape_is_NCd1d2_reduction_sum(self) -> None:\n N, C, d1, d2 = 3, 4, 5, 6\n graph = self._make_graph(\n [(\"input\", TensorProto.FLOAT, (N, C, d1, d2)),\n (\"target\", TensorProto.INT64, (N, d1, d2))],\n [make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='sum')],\n [])\n self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 106, "n_words": 40, "vocab_size": 34, "complexity": 1, "nloc": 8, "token_counts": 108, "n_ast_nodes": 162, "n_identifiers": 15, "random_cut": "def test_negative_log_likehood_shape_is_NCd1d2_reduction_sum(self) -> None:\n N, C, d1, d2 = 3, 4, 5, 6\n graph = self._make_graph(\n [(\"input\", TensorProto.FLOAT, (N, C" }, { "id": 19069, "commit_id": "964f5ab75098c55f028f8acfeeae05df35ea68d5", "repo": "mlflow", "path": "tests/models/test_default_evaluator.py", "file_name": "test_default_evaluator.py", "fun_name": "assert_dict_equal", "commit_message": "Evaluation Default evaluator (#5092)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* rename module\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert black change\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* change module path\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert export\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix curcit import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix conftest.py\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* Revert \"fix conftest.py\"\r\n\r\nThis reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b.\r\n\r\n* fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* default evaluator\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update hash algo\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comment\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add more tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* remove scikitplot dep\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add pr curve\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap.summary_plot\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* log explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve explainer code\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update explainer creating\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update predict_proba\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add multi-class metrics artifacts\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add log_loss metric\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address ben comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* prevent show shap logo, add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* support spark model\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap version check\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update docs, loose classifier label limit\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* multiclass classifier merge metrics/plots\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* zfill feature name\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve label handling\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* black\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* increase plot dpi\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix test fixture\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use matplot rc_context\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix shap import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor EvaluationDataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* limit user specify shap algos\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* clean\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update evaluation dataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use svg fig\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert svg\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* curve dashline, legend display ap/roc, legend move out\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* linewidth 1\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* keyword arguments for evaluate, fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* mark abc.abstractmethod, kw args for ModelEvaluator methods\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def assert_dict_equal(d1, d2, rtol):\n for k in d1:\n assert k in d2\n assert np.isclose(d1[k], d2[k], rtol=rtol)\n\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 16, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 37, "n_ast_nodes": 52, "n_identifiers": 7, "random_cut": "def assert_dict_equal(d1, d2, rtol):\n for k in d1:\n " }, { "id": 190878, "commit_id": "d34fd16034e307b545c3e3adfa4d9d472a582cc6", "repo": "thumbor", "path": "tests/detectors/test_feature_detector.py", "file_name": "test_feature_detector.py", "fun_name": "test_should_detect_multiple_points", "commit_message": "Feature/optional opencv (#1400)\n\n* Removed opencv dependency\r\n\r\nNow OpenCV is optional and detectors are smart to\r\nskip if cv2 could not be imported.\r\nAlso refactored face detector a bit to make it more\r\nmaintainable.\r\nNow thumbor can be installed with\r\npip install thumbor\r\npip install thumbor[all]\r\npip install thumbor[opencv]\r\npip install thumbor[tests]", "code": "async def test_should_detect_multiple_points(self):\n with open(\n abspath(\"./tests/fixtures/images/no_face.jpg\"), \"rb\"\n ) as fixture:\n self.engine.load(fixture.read(), None)\n\n await FeatureDetector(self.context, 0, None).detect()\n detection_result = self.context.request.focal_points\n expect(len(detection_result)).to_be_greater_than(1)\n expect(detection_result[0].origin).to_equal(\"alignment\")\n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 84, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 9, "token_counts": 82, "n_ast_nodes": 140, "n_identifiers": 19, "random_cut": "async def test_should_detect_multiple_points(self):\n with open(\n abspath(\"./tests/fixtures/images/no_face.jpg\"), \"rb\"\n ) as fixture:\n self.engine.load(fixture.read(), None)\n\n await FeatureDetector(self.context, 0, None).detect()\n detection_result = self.context.request.focal_points\n expect(len(detection_result)).to_be_greater_than(1)\n expect(dete" }, { "id": 270391, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/keras_correctness_test_base.py", "file_name": "keras_correctness_test_base.py", "fun_name": "get_batch_size", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_batch_size(global_batch_size, distribution):\n batch_size = global_batch_size\n # TODO(b/118776054): Use global batch size for Keras/DS support.\n use_per_core_batch_size = (\n distribution\n and not distributed_training_utils.global_batch_size_supported(\n distribution\n )\n )\n if use_per_core_batch_size:\n batch_size //= distribution.num_replicas_in_sync\n return batch_size\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 88, "n_words": 32, "vocab_size": 27, "complexity": 3, "nloc": 11, "token_counts": 30, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def get_batch_size(global_batch_size, distribution):\n batch_size = global_batch_size\n # TODO(b/118776054): Use global batch size for Keras/DS support.\n use_per_core_batch_size = (\n distribution\n and not distributed_training_utils.global_batch_size_supported(\n distribution\n " }, { "id": 250533, "commit_id": "a4ca770655a6b067468de3d507292ec133fdc5ca", "repo": "synapse", "path": "tests/crypto/test_keyring.py", "file_name": "test_keyring.py", "fun_name": "test_get_multiple_keys_from_perspectives", "commit_message": "Add missing type hints to tests. (#14687)\n\nAdds type hints to tests.metrics and tests.crypto.", "code": "def test_get_multiple_keys_from_perspectives(self) -> None:\n \n\n fetcher = PerspectivesKeyFetcher(self.hs)\n\n SERVER_NAME = \"server2\"\n\n testkey1 = signedjson.key.generate_signing_key(\"ver1\")\n testverifykey1 = signedjson.key.get_verify_key(testkey1)\n testverifykey1_id = \"ed25519:ver1\"\n\n testkey2 = signedjson.key.generate_signing_key(\"ver2\")\n testverifykey2 = signedjson.key.get_verify_key(testkey2)\n testverifykey2_id = \"ed25519:ver2\"\n\n VALID_UNTIL_TS = 200 * 1000\n\n response1 = self.build_perspectives_response(\n SERVER_NAME,\n testkey1,\n VALID_UNTIL_TS,\n )\n response2 = self.build_perspectives_response(\n SERVER_NAME,\n testkey2,\n VALID_UNTIL_TS,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 211, "n_words": 47, "vocab_size": 33, "complexity": 1, "nloc": 44, "token_counts": 292, "n_ast_nodes": 162, "n_identifiers": 20, "random_cut": "def test_get_multiple_keys_from_perspectives(self) -> None:\n \n\n fetcher = PerspectivesKeyFetcher(self.hs)\n\n SERVER_NAME = \"server2\"\n\n testkey1 = signedjson.key.generate_signing_key(\"ver1\")\n testverifykey1 = signedjson.key.get_verify_key(testkey1)\n testverifykey1_id = \"ed25519:ver1\"\n\n testkey2 = signedjson.key.generate_signing_key(\"ver2\")\n testverifykey2 = signedjson.key.get_verify_key(testkey2)\n testverifykey2_id = \"ed25519:ver2\"\n\n VALID_UNTIL_TS = 200 * 1000\n\n response1 = self.build_perspectives_response(\n SERVER_NAME,\n testkey1,\n VALID_UNTIL_TS,\n )\n response2 = self.build_perspectives_response(\n SERVER_NAME,\n testkey2,\n VALID_UNTIL_TS,\n )\n" }, { "id": 74922, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/wagtail_hooks.py", "file_name": "wagtail_hooks.py", "fun_name": "construct_admin_api", "commit_message": "Reformat with black", "code": "def construct_admin_api(router):\n router.register_endpoint(\"documents\", DocumentsAdminAPIViewSet)\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 6, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 23, "n_identifiers": 4, "random_cut": "def construct_admin_api(router):\n router.register_endpoint(\"documents\", " }, { "id": 9740, "commit_id": "ac3bbcdf87b263f79d5e19cce173e6c709a15f9d", "repo": "gensim", "path": "gensim/test/test_word2vec.py", "file_name": "test_word2vec.py", "fun_name": "test_evaluate_word_analogies", "commit_message": "streamlining most_similar_cosmul and evaluate_word_analogies (#2656)\n\n* streamlining most_similar_cosmul\r\n\r\n* Fix PR requested changes and add unit test\r\n\r\n* fix merge artifacts\r\n\r\nCo-authored-by: n3hrox \r\nCo-authored-by: Michael Penkov ", "code": "def test_evaluate_word_analogies(self):\n \n model = word2vec.Word2Vec(LeeCorpus())\n score, sections = model.wv.evaluate_word_analogies(datapath('questions-words.txt'))\n score_cosmul, sections_cosmul = model.wv.evaluate_word_analogies(\n datapath('questions-words.txt'),\n similarity_function='most_similar_cosmul'\n )\n self.assertEqual(score, score_cosmul)\n self.assertEqual(sections, sections_cosmul)\n self.assertGreaterEqual(score, 0.0)\n self.assertLessEqual(score, 1.0)\n self.assertGreater(len(sections), 0)\n # Check that dict contains the right keys\n first_section = sections[0]\n self.assertIn('section', first_section)\n self.assertIn('correct', first_section)\n self.assertIn('incorrect', first_section)\n", "url": "https://github.com/RaRe-Technologies/gensim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 170, "n_words": 43, "vocab_size": 38, "complexity": 1, "nloc": 16, "token_counts": 127, "n_ast_nodes": 206, "n_identifiers": 21, "random_cut": "def test_evaluate_word_analogies(self):\n \n model = word2vec.Word2Vec(LeeCorpus())\n score, sections = model.wv.evaluate_word_analogies(datapath('questions-words.txt'))\n score_cosmul, sections_cosmul = model.wv.evaluate_word_analogies(\n datapath('questions-words.txt'),\n similarity_function='most_similar_cosmul'\n )\n self.assertEqual(score, score_cosmul)\n self.assertEqual(sections, sections_cosmul)\n self.assertGreaterEqual(score, 0.0)\n self.assertLessEqual(score, 1.0)\n self.assertGreater(len(sections), 0)\n # Check that dict contains the right keys\n first_section = sections[0]\n self.assertIn('sec" }, { "id": 271155, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/data_adapter_test.py", "file_name": "data_adapter_test.py", "fun_name": "test_shuffle_correctness", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_shuffle_correctness(self):\n num_samples = 100\n batch_size = 32\n x = np.arange(num_samples)\n np.random.seed(99)\n adapter = self.adapter_cls(\n x, y=None, batch_size=batch_size, shuffle=True, epochs=2\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 73, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 17, "token_counts": 125, "n_ast_nodes": 78, "n_identifiers": 14, "random_cut": "def test_shuffle_correctness(self):\n num_samples = 100\n batch_size = 32\n x = np.arange(num_samples)\n np.random.seed(99)\n " }, { "id": 164658, "commit_id": "21bbee62e371068896735946c3c0c2ab1f349fda", "repo": "pandas", "path": "pandas/core/internals/blocks.py", "file_name": "blocks.py", "fun_name": "putmask", "commit_message": "REF: standardize patterns in Block Methods (#45840)", "code": "def putmask(self, mask, new) -> list[Block]:\n \n orig_mask = mask\n values = cast(np.ndarray, self.values)\n mask, noop = validate_putmask(values.T, mask)\n assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame))\n\n if new is lib.no_default:\n new = self.fill_value\n\n new = self._standardize_fill_value(new)\n new = extract_array(new, extract_numpy=True)\n\n if noop:\n return [self]\n\n try:\n casted = np_can_hold_element(values.dtype, new)\n putmask_without_repeat(values.T, mask, casted)\n return [self]\n except LossySetitemError:\n\n if self.ndim == 1 or self.shape[0] == 1:\n # no need to split columns\n\n if not is_list_like(new):\n # using just new[indexer] can't save us the need to cast\n return self.coerce_to_target_dtype(new).putmask(mask, new)\n else:\n indexer = mask.nonzero()[0]\n nb = self.setitem(indexer, new[indexer])\n return [nb]\n\n else:\n is_array = isinstance(new, np.ndarray)\n\n res_blocks = []\n nbs = self._split()\n for i, nb in enumerate(nbs):\n n = new\n if is_array:\n # we have a different value per-column\n n = new[:, i : i + 1]\n\n submask = orig_mask[:, i : i + 1]\n rbs = nb.putmask(submask, n)\n res_blocks.extend(rbs)\n return res_blocks\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 656, "n_words": 146, "vocab_size": 97, "complexity": 9, "nloc": 50, "token_counts": 269, "n_ast_nodes": 418, "n_identifiers": 47, "random_cut": "def putmask(self, mask, new) -> list[Block]:\n \n orig_mask = mask\n values = cast(np.ndarray, self.values)\n mask, noop = validate_putmask(values.T, mask)\n assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame))\n\n if new is lib.no_default:\n new = self.fill_value\n\n new = self._standardize_fill_value(new)\n new = extract_array(new, extract_numpy=True)\n\n if noop:\n return [self]\n\n try:\n casted = np_can_hold_element(values.dtype, new)\n putmask_without_repeat(values.T, mask, casted)\n return [self]\n except LossySetitemError:\n\n if self.ndim == 1 or self.shape[0] == 1:\n # no need to split columns\n\n if not is_list_like(new):\n # using just new[indexer] can't save us the need to cast\n return self.co" }, { "id": 309472, "commit_id": "5e3bfabfcfb2a65e68e14bd21bddb2c37df85b6c", "repo": "core", "path": "homeassistant/components/overkiz/select.py", "file_name": "select.py", "fun_name": "current_option", "commit_message": "Add select entity to Overkiz integration (#62916)", "code": "def current_option(self) -> str | None:\n \n if state := self.device.states.get(self.entity_description.key):\n return str(state.value)\n\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 37, "n_ast_nodes": 61, "n_identifiers": 10, "random_cut": "def current_option(self) -> str | None:\n \n if state := self.device.states" }, { "id": 10969, "commit_id": "13edc16d806fb5d77a6849551178ccc75937f25f", "repo": "jina", "path": "tests/daemon/unit/stores/test_peapodstore.py", "file_name": "test_peapodstore.py", "fun_name": "test_podpod_store_multi_add", "commit_message": "refactor: rename pod to deployment (#4230)\n\n* refactor: rename pod to deployment\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: undo daemon mistake\r\n\r\n* refactor: leftover cleanup\r\n\r\n* fix: more test fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more tests\r\n\r\n* fix: fix more tests\r\n\r\n* refactor: fix more tests\r\n\r\n* refactor: more tests fixes\r\n\r\n* refactor: rename pea to pod\r\n\r\n* refactor: adjust docs\r\n\r\n* refactor: complete pea renaming\r\n\r\n* refactor: more fixes\r\n\r\n* fix: pea_type in k8s yamls\r\n\r\n* fix: adjust pod args name\r\n\r\n* refactor: rename peapods parser folder\r\n\r\n* fix: da init\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "async def test_podpod_store_multi_add(model, store, type, workspace):\n s = store()\n for j in range(5):\n id = DaemonID(f'j{type}')\n await s.add(id=id, params=model, workspace_id=workspace, ports={})\n\n assert len(s) == j + 1\n assert id in s\n await s.clear()\n assert not s\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\n 'model, store, id',\n [\n (PodModel(), PodStore, DaemonID(f'jpod')),\n # (PodModel(), PodStore, DaemonID(f'jpod')),\n ],\n)", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "@pytest.mark.asyncio\n@pytest.mark.parametrize(\n 'model, store, id',\n [\n (PodModel(), PodStore, DaemonID(f'jpod')),\n # (PodModel(), PodStore, DaemonID(f'jpod')),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 110, "n_words": 51, "vocab_size": 38, "complexity": 2, "nloc": 9, "token_counts": 75, "n_ast_nodes": 173, "n_identifiers": 22, "random_cut": "async def test_podpod_store_multi_add(model, store, type, workspace):\n s = store()\n for j in range(5):\n id = DaemonID(f'j{type}')\n await s.add(id=id, params=model, workspace_id=workspace, ports={})\n\n assert len(s) == j + 1\n assert id in s\n awa" }, { "id": 10385, "commit_id": "6e9e7ef32f61cab04c6efc7a9f21659d26b50fdb", "repo": "jina", "path": "tests/integration/external_pod/test_external_pod.py", "file_name": "test_external_pod.py", "fun_name": "external_pod_shards_1", "commit_message": "feat: export Flow into set of k8s yamls (#4089)", "code": "def external_pod_shards_1(external_pod_shards_1_args):\n return Pod(external_pod_shards_1_args)\n\n\n@pytest.fixture(scope='function')", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "@pytest.fixture(scope='function')", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 6, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 34, "n_identifiers": 6, "random_cut": "def external_pod_shards_1(external_pod_shards_1_args):\n return Pod(external_pod_shards_1_args)\n\n\n@pyte" }, { "id": 149288, "commit_id": "f5578aba48f174190697ac63908b3d3993c3a10c", "repo": "freqtrade", "path": "tests/exchange/test_exchange.py", "file_name": "test_exchange.py", "fun_name": "test_fetch_trading_fees", "commit_message": "Update trading_fee naming", "code": "def test_fetch_trading_fees(default_conf, mocker):\n api_mock = MagicMock()\n tick = {\n '1INCH/USDT:USDT': {\n 'info': {'user_id': '',\n 'taker_fee': '0.0018',\n 'maker_fee': '0.0018',\n 'gt_discount': False,\n 'gt_taker_fee': '0',\n 'gt_maker_fee': '0',\n 'loan_fee': '0.18',\n 'point_type': '1',\n 'futures_taker_fee': '0.0005',\n 'futures_maker_fee': '0'},\n 'symbol': '1INCH/USDT:USDT',\n 'maker': 0.0,\n 'taker': 0.0005},\n 'ETH/USDT:USDT': {\n 'info': {'user_id': '',\n 'taker_fee': '0.0018',\n 'maker_fee': '0.0018',\n 'gt_discount': False,\n 'gt_taker_fee': '0',\n 'gt_maker_fee': '0',\n 'loan_fee': '0.18',\n 'point_type': '1',\n 'futures_taker_fee': '0.0005',\n 'futures_maker_fee': '0'},\n 'symbol': 'ETH/USDT:USDT',\n 'maker': 0.0,\n 'taker': 0.0005}\n }\n exchange_name = 'gateio'\n default_conf['dry_run'] = False\n default_conf['trading_mode'] = TradingMode.FUTURES\n default_conf['margin_mode'] = MarginMode.ISOLATED\n api_mock.fetch_trading_fees = MagicMock(return_value=tick)\n mocker.patch('freqtrade.exchange.Exchange.exchange_has', return_value=True)\n exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)\n\n assert '1INCH/USDT:USDT' in exchange._trading_fees\n assert 'ETH/USDT:USDT' in exchange._trading_fees\n assert api_mock.fetch_trading_fees.call_count == 1\n\n api_mock.fetch_trading_fees.reset_mock()\n\n ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name,\n \"fetch_trading_fees\", \"fetch_trading_fees\")\n\n api_mock.fetch_trading_fees = MagicMock(return_value={})\n exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)\n exchange.fetch_trading_fees()\n mocker.patch('freqtrade.exchange.Exchange.exchange_has', return_value=True)\n assert exchange.fetch_trading_fees() == {}\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 673, "n_words": 126, "vocab_size": 71, "complexity": 1, "nloc": 50, "token_counts": 292, "n_ast_nodes": 523, "n_identifiers": 21, "random_cut": "def test_fetch_trading_fees(default_conf, mocker):\n api_mock = MagicMock()\n tick = {\n '1INCH/USDT:USDT': {\n 'info': {'user_id': '',\n 'taker_fee': '0.0018',\n 'maker_fee': '0.0018',\n 'gt_discount': False,\n 'gt" }, { "id": 274228, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/legacy_tf_layers/base.py", "file_name": "base.py", "fun_name": "add_loss", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def add_loss(self, losses, inputs=None):\n previous_losses_length = len(self._losses)\n previous_callable_losses_length = len(self._callable_losses)\n super().add_loss(losses, inputs=inputs)\n if not tf.executing_eagerly():\n # TODO(fchollet): deprecate collection below.\n new_losses = self._losses[previous_losses_length:]\n new_callable_losses = self._callable_losses[\n previous_callable_losses_length:\n ]\n for regularizer in new_callable_losses:\n loss_tensor = regularizer()\n if loss_tensor is not None:\n new_losses.append(loss_tensor)\n _add_elements_to_collection(\n new_losses, tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 228, "n_words": 45, "vocab_size": 38, "complexity": 4, "nloc": 16, "token_counts": 101, "n_ast_nodes": 160, "n_identifiers": 22, "random_cut": "def add_loss(self, losses, inputs=None):\n previous_losses_length = len(self._losses)\n previous_callable_losses_length = len(self._callable_losses)\n super().add_loss(losses, inputs=inputs)\n if not tf.executing_eagerly():\n # TODO(fchollet): deprecate collection below.\n new_losses = self._losses[previous_losses_length:]\n new_callable_losses = self._callable_losses[\n previous_callable_losses_length:\n ]\n for regularizer in new_callable_losses:\n loss_tensor = regularizer()\n if loss_tensor is not N" }, { "id": 186675, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/override_gentoo.py", "file_name": "override_gentoo.py", "fun_name": "parse_sysconfig_var", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def parse_sysconfig_var(self) -> None:\n \n defines = apache_util.parse_define_file(self.apacheconfig_filep,\n \"APACHE2_OPTS\")\n for k, v in defines.items():\n self.variables[k] = v\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 95, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 10, "random_cut": "def parse_sysconfig_var(self) -> None:\n \n defines = apache_util.parse_define_file(self.apacheconfig_filep,\n \"APACHE2_OPTS\")\n " }, { "id": 222661, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/build_clib.py", "file_name": "build_clib.py", "fun_name": "run", "commit_message": "add python 3.10.4 for windows", "code": "def run(self):\n if not self.libraries:\n return\n\n # Yech -- this is cut 'n pasted from build_ext.py!\n from distutils.ccompiler import new_compiler\n self.compiler = new_compiler(compiler=self.compiler,\n dry_run=self.dry_run,\n force=self.force)\n customize_compiler(self.compiler)\n\n if self.include_dirs is not None:\n self.compiler.set_include_dirs(self.include_dirs)\n if self.define is not None:\n # 'define' option is a list of (name,value) tuples\n for (name,value) in self.define:\n self.compiler.define_macro(name, value)\n if self.undef is not None:\n for macro in self.undef:\n self.compiler.undefine_macro(macro)\n\n self.build_libraries(self.libraries)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 282, "n_words": 63, "vocab_size": 46, "complexity": 7, "nloc": 17, "token_counts": 126, "n_ast_nodes": 195, "n_identifiers": 20, "random_cut": "def run(self):\n if not self.libraries:\n return\n\n # Yech -- this is cut 'n pasted from build_ext.py!\n from distutils.ccompiler import new_compiler\n self.compiler = new_compiler(compiler=self.comp" }, { "id": 171269, "commit_id": "289f32df5a565848adbc0adc8949fa4066542316", "repo": "pandas", "path": "pandas/tests/io/excel/test_readers.py", "file_name": "test_readers.py", "fun_name": "test_read_from_pathlib_path", "commit_message": "STYLE: fix pylint reimported warnings (#49645)\n\n* STYLE: fix pylint reimported warnings\r\n\r\n* fixup! STYLE: fix pylint reimported warnings", "code": "def test_read_from_pathlib_path(self, read_ext):\n # GH12655\n str_path = \"test1\" + read_ext\n expected = pd.read_excel(str_path, sheet_name=\"Sheet1\", index_col=0)\n\n path_obj = Path(\"test1\" + read_ext)\n actual = pd.read_excel(path_obj, sheet_name=\"Sheet1\", index_col=0)\n\n tm.assert_frame_equal(expected, actual)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 68, "n_words": 27, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 64, "n_ast_nodes": 99, "n_identifiers": 14, "random_cut": "def test_read_from_pathlib_path(self, read_ext):\n # GH12655\n str_path = \"test1\" + read_ext\n expected = pd.read_excel(str_path, sheet_name=\"Sheet1\", index_col=0)\n\n " }, { "id": 62759, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/idna/uts46data.py", "file_name": "uts46data.py", "fun_name": "_seg_5", "commit_message": "upd; format", "code": "def _seg_5():\n return [\n (0x20D, 'V'),\n (0x20E, 'M', 'ȏ'),\n (0x20F, 'V'),\n (0x210, 'M', 'ȑ'),\n (0x211, 'V'),\n (0x212, 'M', 'ȓ'),\n (0x213, 'V'),\n (0x214, 'M', 'ȕ'),\n (0x215, 'V'),\n (0x216, 'M', 'ȗ'),\n (0x217, 'V'),\n (0x218, 'M', 'ș'),\n (0x219, 'V'),\n (0x21A, 'M', 'ț'),\n (0x21B, 'V'),\n (0x21C, 'M', 'ȝ'),\n (0x21D, 'V'),\n (0x21E, 'M', 'ȟ'),\n (0x21F, 'V'),\n (0x220, 'M', 'ƞ'),\n (0x221, 'V'),\n (0x222, 'M', 'ȣ'),\n (0x223, 'V'),\n (0x224, 'M', 'ȥ'),\n (0x225, 'V'),\n (0x226, 'M', 'ȧ'),\n (0x227, 'V'),\n (0x228, 'M', 'ȩ'),\n (0x229, 'V'),\n (0x22A, 'M', 'ȫ'),\n (0x22B, 'V'),\n (0x22C, 'M', 'ȭ'),\n (0x22D, 'V'),\n (0x22E, 'M', 'ȯ'),\n (0x22F, 'V'),\n (0x230, 'M', 'ȱ'),\n (0x231, 'V'),\n (0x232, 'M', 'ȳ'),\n (0x233, 'V'),\n (0x23A, 'M', 'ⱥ'),\n (0x23B, 'M', 'ȼ'),\n (0x23C, 'V'),\n (0x23D, 'M', 'ƚ'),\n (0x23E, 'M', 'ⱦ'),\n (0x23F, 'V'),\n (0x241, 'M', 'ɂ'),\n (0x242, 'V'),\n (0x243, 'M', 'ƀ'),\n (0x244, 'M', 'ʉ'),\n (0x245, 'M', 'ʌ'),\n (0x246, 'M', 'ɇ'),\n (0x247, 'V'),\n (0x248, 'M', 'ɉ'),\n (0x249, 'V'),\n (0x24A, 'M', 'ɋ'),\n (0x24B, 'V'),\n (0x24C, 'M', 'ɍ'),\n (0x24D, 'V'),\n (0x24E, 'M', 'ɏ'),\n (0x24F, 'V'),\n (0x2B0, 'M', 'h'),\n (0x2B1, 'M', 'ɦ'),\n (0x2B2, 'M', 'j'),\n (0x2B3, 'M', 'r'),\n (0x2B4, 'M', 'ɹ'),\n (0x2B5, 'M', 'ɻ'),\n (0x2B6, 'M', 'ʁ'),\n (0x2B7, 'M', 'w'),\n (0x2B8, 'M', 'y'),\n (0x2B9, 'V'),\n (0x2D8, '3', ' ̆'),\n (0x2D9, '3', ' ̇'),\n (0x2DA, '3', ' ̊'),\n (0x2DB, '3', ' ̨'),\n (0x2DC, '3', ' ̃'),\n (0x2DD, '3', ' ̋'),\n (0x2DE, 'V'),\n (0x2E0, 'M', 'ɣ'),\n (0x2E1, 'M', 'l'),\n (0x2E2, 'M', 's'),\n (0x2E3, 'M', 'x'),\n (0x2E4, 'M', 'ʕ'),\n (0x2E5, 'V'),\n (0x340, 'M', '̀'),\n (0x341, 'M', '́'),\n (0x342, 'V'),\n (0x343, 'M', '̓'),\n (0x344, 'M', '̈́'),\n (0x345, 'M', 'ι'),\n (0x346, 'V'),\n (0x34F, 'I'),\n (0x350, 'V'),\n (0x370, 'M', 'ͱ'),\n (0x371, 'V'),\n (0x372, 'M', 'ͳ'),\n (0x373, 'V'),\n (0x374, 'M', 'ʹ'),\n (0x375, 'V'),\n (0x376, 'M', 'ͷ'),\n (0x377, 'V'),\n ]\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 577, "n_words": 272, "vocab_size": 171, "complexity": 1, "nloc": 103, "token_counts": 729, "n_ast_nodes": 1157, "n_identifiers": 1, "random_cut": "def _seg_5():\n return [\n (0x20D, 'V'),\n (0x20E, 'M', 'ȏ'),\n (0x20F, 'V'),\n (0x210, 'M', 'ȑ'),\n (0x211, 'V'),\n (0x212, 'M', 'ȓ'),\n (0x213, 'V'),\n (0x214, 'M', 'ȕ'),\n (0x215, 'V'),\n (0x216, 'M', 'ȗ'),\n (0x217, 'V'),\n (0x218, 'M', 'ș'),\n (0x219, 'V'),\n (0x21A, 'M', 'ț'),\n (0x21B, 'V'),\n (0x21C, 'M', 'ȝ'),\n (0x21D, 'V'),\n (0x21E, 'M', 'ȟ'),\n (0x21F, 'V'),\n (0x220, 'M', 'ƞ'),\n (0x221, 'V'),\n (0x222, 'M', 'ȣ'),\n (0x223, 'V'),\n (0x224, 'M', 'ȥ'),\n (0x225, 'V'),\n (0x226, 'M', 'ȧ'),\n (0x227, 'V'),\n (0x228, 'M', 'ȩ'),\n (0x229, 'V'),\n (0x22A, 'M', 'ȫ'),\n (0x22B, 'V'),\n (0x22C, 'M', 'ȭ'),\n (0x22D, 'V'),\n (0x22E, 'M', 'ȯ'),\n (0x22F, 'V'),\n (0x230, 'M', 'ȱ'),\n (0x231, 'V'),\n (0x232, 'M', 'ȳ'),\n (0x233, 'V'),\n (0x23A, 'M', 'ⱥ'),\n (0x23B, 'M', 'ȼ'),\n (0x23C, 'V'),\n (0x23D, 'M', 'ƚ'),\n (0x23E, 'M', 'ⱦ'),\n (0x23F, 'V'),\n (0x241, 'M', 'ɂ'),\n (0x242, 'V'),\n (0x243, 'M', 'ƀ'),\n (0x244, 'M', 'ʉ'),\n (0x245, 'M', 'ʌ'),\n (0x246, 'M', 'ɇ'),\n (0x247, 'V'),\n (0x248, 'M', 'ɉ'),\n (0x249, 'V'),\n (0x24A, 'M', 'ɋ'),\n (0x24B, 'V'),\n (0x24C, 'M', 'ɍ'),\n (0x24D, 'V'),\n (0x24E, 'M', 'ɏ'),\n (0x24F, 'V'),\n (0x2B0, 'M', 'h'),\n (0x2B1, 'M', 'ɦ'),\n (0x2B2, 'M', 'j'),\n (0x2B3, 'M', '" }, { "id": 280211, "commit_id": "6fed9116cb32d5cd9f10cfa38062cae4a27e4743", "repo": "keras", "path": "keras/layers/rnn/gru_test.py", "file_name": "gru_test.py", "fun_name": "_test_runtime_with_model", "commit_message": "tf.cond optimization\nReformatting\nDisabling a test that fails on fallback path", "code": "def _test_runtime_with_model(self, model):\n (x_train, y_train), _ = test_utils.get_test_data(\n train_samples=self.batch,\n test_samples=0,\n input_shape=(self.timestep, self.input_shape),\n num_classes=self.output_shape,\n )\n y_train = np_utils.to_categorical(y_train, self.output_shape)\n\n model.compile(optimizer=\"sgd\", loss=[\"categorical_crossentropy\", None])\n\n existing_loss = 0\n for _ in range(self.epoch):\n history = model.fit(x_train, y_train)\n loss_value = history.history[\"loss\"][0]\n\n self.assertNotEqual(existing_loss, loss_value)\n existing_loss = loss_value\n\n _, runtime_value = model.predict(x_train)\n if not tf.sysconfig.get_build_info()[\"is_rocm_build\"]:\n if tf.test.is_gpu_available():\n self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)\n else:\n self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 249, "n_words": 54, "vocab_size": 43, "complexity": 4, "nloc": 21, "token_counts": 181, "n_ast_nodes": 279, "n_identifiers": 38, "random_cut": "def _test_runtime_with_model(self, model):\n (x_train, y_train), _ = test_utils.get_test_data(\n train_samples=self.batch,\n test_samples=0,\n input_shape=(self.timestep, self.input_shape),\n num_classes=self.output_shape,\n )\n y_train = np_utils.to_categorical(y_train, self.output_shape)\n\n model.compile(optimizer=\"sgd\", loss=[\"categorical_crossentropy\", None])\n\n existing_loss = 0\n for _ in range(self.epoch):\n history = model.fit(x_train, y_train)\n loss_value = history.history[\"loss\"][0]\n\n self.assertNotEqual(existing_loss, loss_value)\n existing_loss = loss_value\n\n _, runtime_value = model.predict(x_train)\n if not" }, { "id": 97350, "commit_id": "b52d8e5fa16670e5d4b071ca72457e187ed7eeeb", "repo": "sentry", "path": "tests/sentry/snuba/metrics/test_fields.py", "file_name": "test_fields.py", "fun_name": "test_generate_bottom_up_derived_metrics_dependencies", "commit_message": "feat(metrics): Adds support for CompositeEntityDerivedMetrics [INGEST-924 INGEST-1044 INGEST-1064] (#32829)\n\n* feat(metrics): Adds support for CompositeEntityDerivedMetrics\r\n\r\nAdds support for CompositeEntityDerivedMetrics,\r\nAdds derived metric for sessions.errored, renames\r\nRawMetric class to RawAggregatedMetric. Modifies\r\nQueryBuilder to always perform post query operations\r\n\r\n* Incorporate PR feedback", "code": "def test_generate_bottom_up_derived_metrics_dependencies(self):\n assert list(self.sessions_errored.generate_bottom_up_derived_metrics_dependencies()) == [\n (None, \"session.errored_set\"),\n (None, \"session.errored_preaggregated\"),\n (None, \"session.errored\"),\n ]\n\n assert list(\n MOCKED_DERIVED_METRICS[\n \"random_composite\"\n ].generate_bottom_up_derived_metrics_dependencies()\n ) == [\n (None, \"session.errored_set\"),\n (None, \"session.errored_preaggregated\"),\n (None, \"session.errored\"),\n (None, \"random_composite\"),\n ]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 178, "n_words": 30, "vocab_size": 17, "complexity": 1, "nloc": 16, "token_counts": 76, "n_ast_nodes": 121, "n_identifiers": 6, "random_cut": "def test_generate_bottom_up_derived_metrics_dependencies(self):\n assert list(self.sessions_errored.generate_bottom_up_derived_metrics_dependencies()) == [\n (None, \"session.errored_set\"),\n (None, \"session.errored_preaggregated\"),\n (None, \"session.errored\"),\n ]\n\n assert list(\n MOCKED_DERIVED_METRICS[\n \"random_composite\"\n ].generate_bottom_up_derived_metrics_dependencies()\n ) == [\n (None, \"session.errored_set\"),\n (None, \"session.errored_preaggregated\"),\n (None, \"session.errore" }, { "id": 120094, "commit_id": "648a512488a5184caa8dc1bced58e9f8ab7269f2", "repo": "jax", "path": "jaxlib/cusparse.py", "file_name": "cusparse.py", "fun_name": "_validate_csr_mhlo", "commit_message": "[MHLO] Add direct MHLO lowerings for sparse primitives.\n\nPiperOrigin-RevId: 440374054", "code": "def _validate_csr_mhlo(data, indices, indptr, shape):\n data_type = ir.RankedTensorType(data.type)\n indices_type = ir.RankedTensorType(indices.type)\n indptr_type = ir.RankedTensorType(indptr.type)\n\n nnz, = data_type.shape\n assert indices_type.shape == [nnz]\n assert indptr_type.element_type == indices_type.element_type\n assert indptr_type.shape == [shape[0] + 1]\n return data_type.element_type, indices_type.element_type, nnz\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 35, "vocab_size": 28, "complexity": 1, "nloc": 9, "token_counts": 86, "n_ast_nodes": 130, "n_identifiers": 13, "random_cut": "def _validate_csr_mhlo(data, indices, indptr, shape):\n data_type = ir.RankedTensorType(data.type)\n indices_type = ir.RankedTensorType(indices.type)\n indptr_type = ir.RankedTensorType(indptr.type)\n\n nnz, = data_type.shape\n assert indices_type.shape == [nnz]\n assert indptr_type.element_type == indices_type.element_type\n assert indptr_type.shape == [shape[0] + 1]\n return da" }, { "id": 188915, "commit_id": "eb78a761a99ac20a6364f85e12059fec6517d890", "repo": "calibre", "path": "src/calibre/ebooks/css_transform_rules.py", "file_name": "css_transform_rules.py", "fun_name": "export_rules", "commit_message": "Automated upgrade of code to python 3.7+\n\nDone by https://github.com/asottile/pyupgrade\nConsists mainly of moving string formatting to f-strings and removing\nencoding declarations", "code": "def export_rules(serialized_rules):\n lines = []\n for rule in serialized_rules:\n lines.extend('# ' + l for l in rule_to_text(rule).splitlines())\n lines.extend('{}: {}'.format(k, v.replace('\\n', ' ')) for k, v in iteritems(rule) if k in allowed_keys)\n lines.append('')\n return '\\n'.join(lines).encode('utf-8')\n\n", "url": "https://github.com/kovidgoyal/calibre.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 63, "n_words": 34, "vocab_size": 27, "complexity": 5, "nloc": 7, "token_counts": 84, "n_ast_nodes": 147, "n_identifiers": 17, "random_cut": "def export_rules(serialized_rules):\n lines =" }, { "id": 144680, "commit_id": "48adb6f7bb335b28fb0fb0d1190bd6c5dfc8ddfa", "repo": "ray", "path": "python/ray/serve/tests/test_deployment_state.py", "file_name": "test_deployment_state.py", "fun_name": "test_deploy_with_consistent_constructor_failure", "commit_message": "[serve] Introduce DeploymentStatus, poll for statuses instead of using async goals (#22121)", "code": "def test_deploy_with_consistent_constructor_failure(mock_deployment_state):\n \n deployment_state, timer = mock_deployment_state\n\n b_info_1, b_version_1 = deployment_info(num_replicas=2)\n updating = deployment_state.deploy(b_info_1)\n assert updating\n assert deployment_state.curr_status_info.status == DeploymentStatus.UPDATING\n _constructor_failure_loop_two_replica(deployment_state, 3)\n\n assert deployment_state._replica_constructor_retry_counter == 6\n assert deployment_state.curr_status_info.status == DeploymentStatus.FAILED\n check_counts(deployment_state, total=0)\n assert deployment_state.curr_status_info.message != \"\"\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 68, "n_words": 35, "vocab_size": 25, "complexity": 1, "nloc": 11, "token_counts": 79, "n_ast_nodes": 127, "n_identifiers": 20, "random_cut": "def test_deploy_with_consistent_constructor_failure(mock_deployment_state):\n \n deployment_state, timer = mock_deployment_state\n\n b_info_1, b_version_1 = deployment_info(num_replicas=2)\n updating = deployment_state.deploy(b_info_1)\n assert updating\n assert deployment_state.curr_status_info.status == DeploymentStatus.UPDATING\n _constructor_failure_loop_two_replica(deployment_state, 3)\n\n assert deployment_state._replica_constructor_retry_counter == 6\n assert deployment_state.curr_status_info.status == DeploymentStatus.FAILED\n check_counts(deployment_state, total=0)\n assert deployment_state.curr_status_info.message != \"\"\n\n" }, { "id": 42802, "commit_id": "60eb9e106f5915398eafd6aa339ec710c102dc09", "repo": "airflow", "path": "kubernetes_tests/test_kubernetes_pod_operator_backcompat.py", "file_name": "test_kubernetes_pod_operator_backcompat.py", "fun_name": "test_envs_from_configmaps", "commit_message": "Use KubernetesHook to create api client in KubernetesPodOperator (#20578)\n\nAdd support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them.\r\n\r\nKPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn.", "code": "def test_envs_from_configmaps(self, mock_monitor, mock_start):\n # GIVEN\n configmap = 'test-configmap'\n # WHEN\n k = KubernetesPodOperator(\n namespace='default',\n image=\"ubuntu:16.04\",\n cmds=[\"bash\", \"-cx\"],\n arguments=[\"echo 10\"],\n labels={\"foo\": \"bar\"},\n name=\"test\",\n task_id=\"task\",\n in_cluster=False,\n do_xcom_push=False,\n configmaps=[configmap],\n )\n # THEN\n mock_pod = MagicMock()\n mock_pod.status.phase = 'Succeeded'\n mock_monitor.return_value = mock_pod\n context = create_context(k)\n k.execute(context)\n assert mock_start.call_args[1]['pod'].spec.containers[0].env_from == [\n k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name=configmap))\n ]\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 260, "n_words": 49, "vocab_size": 41, "complexity": 1, "nloc": 22, "token_counts": 135, "n_ast_nodes": 224, "n_identifiers": 33, "random_cut": "def test_envs_from_configmaps(self, mock_monitor, mock_start):\n # GIVEN\n configmap = 'test-configmap'\n # WHEN\n k = KubernetesPodOperator(\n namespace='default',\n image=\"ubuntu:16.04\",\n cmds=[\"bash\", \"-cx\"],\n arguments=[\"echo 10\"],\n labels={\"foo\": \"bar\"},\n name=\"test\",\n task_id=\"task\",\n in_cluster=False,\n do_xcom_push=False,\n c" }, { "id": 271153, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/data_adapter_test.py", "file_name": "data_adapter_test.py", "fun_name": "setUp", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def setUp(self):\n super().setUp()\n self.batch_size = 5\n self.numpy_input = np.zeros((50, 10))\n self.numpy_target = np.ones(50)\n self.tensor_input = tf.constant(2.0, shape=(50, 10))\n self.tensor_target = tf.ones((50,))\n self.arraylike_input = DummyArrayLike(self.numpy_input)\n self.arraylike_target = DummyArrayLike(self.numpy_target)\n self.dataset_input = (\n tf.data.Dataset.from_tensor_slices(\n (self.numpy_input, self.numpy_target)\n )\n .shuffle(50)\n .batch(self.batch_size)\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 165, "n_words": 37, "vocab_size": 28, "complexity": 1, "nloc": 27, "token_counts": 218, "n_ast_nodes": 200, "n_identifiers": 23, "random_cut": "def setUp(self):\n super().setUp()\n self.batch_size = " }, { "id": 109990, "commit_id": "cf8e04ddc1686dd285afdcc6e3ea8d9f29ff869b", "repo": "matplotlib", "path": "lib/matplotlib/tri/_triangulation.py", "file_name": "_triangulation.py", "fun_name": "get_trifinder", "commit_message": "Make all matplotlib.tri submodules private\n\nUsers should access all elements through the outer namespace\nmatplotlib.tri.\n\nBack-compatibility for the old module names will be added in a separate\ncommit. If done in the same commit, git would interpret this as\na modified file plus a new file and not as a rename. With the separation\nand the rename we keep the history.", "code": "def get_trifinder(self):\n \n if self._trifinder is None:\n # Default TriFinder class.\n from matplotlib.tri._trifinder import TrapezoidMapTriFinder\n self._trifinder = TrapezoidMapTriFinder(self)\n return self._trifinder\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 33, "n_ast_nodes": 55, "n_identifiers": 6, "random_cut": "def get_trifinder(self):\n \n if self._trifinder is None:\n # Default TriFinder class.\n from matplotlib.tri._trifinder import TrapezoidMapTriFinder\n self._trifinder = TrapezoidMapTriFinder(self)\n return self._trifinder\n" }, { "id": 287742, "commit_id": "02731efc4cb3f7ee94b0c08aecc10e3a5209dbf4", "repo": "core", "path": "homeassistant/components/ibeacon/coordinator.py", "file_name": "coordinator.py", "fun_name": "_async_update_rssi", "commit_message": "Handle iBeacons that broadcast multiple different uuids (#79011)\n\n* Handle iBeacons that broadcast multiple different uuids\r\n\r\n* fix flip-flopping between uuids\r\n\r\n* naming", "code": "def _async_update_rssi(self) -> None:\n \n for (\n unique_id,\n ibeacon_advertisement,\n ) in self._last_ibeacon_advertisement_by_unique_id.items():\n address = unique_id.split(\"_\")[-1]\n if (\n service_info := bluetooth.async_last_service_info(\n self.hass, address, connectable=False\n )\n ) and service_info.rssi != ibeacon_advertisement.rssi:\n ibeacon_advertisement.update_rssi(service_info.rssi)\n async_dispatcher_send(\n self.hass,\n signal_seen(unique_id),\n ibeacon_advertisement,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 261, "n_words": 34, "vocab_size": 28, "complexity": 4, "nloc": 23, "token_counts": 86, "n_ast_nodes": 134, "n_identifiers": 17, "random_cut": "def _async_update_rssi(self) -> None:\n \n for (\n unique_id,\n ibeacon_advertisement,\n ) in self._last_ibeacon_advertisement_by_unique_id.items():\n address = unique_id.split(\"_\")[-1]\n if (\n servic" }, { "id": 263293, "commit_id": "64ccb7aea824fbec57f7ed1bbe483ec486183c13", "repo": "pyinstaller", "path": "bootloader/waflib/Tools/c_preproc.py", "file_name": "c_preproc.py", "fun_name": "eval_macro", "commit_message": "Bootloader: Building: Unpack waf's lib archive.\n\nDoing so makes it easier to modify. This is a temporary measure until the next\nwaf version is released (although I'm tempted to keep it since it's much more\nIDE completion friendly).", "code": "def eval_macro(lst, defs):\n reduce_tokens(lst, defs, [])\n if not lst:\n raise PreprocError('missing tokens to evaluate')\n if lst:\n p, v = lst[0]\n if p == IDENT and v not in defs:\n raise PreprocError('missing macro %r' % lst)\n p, v = reduce_eval(lst)\n return int(v) != 0\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 89, "n_words": 43, "vocab_size": 33, "complexity": 5, "nloc": 10, "token_counts": 68, "n_ast_nodes": 110, "n_identifiers": 10, "random_cut": "def eval_macro(lst, defs):\n reduce_tokens(lst, defs, [])\n if not lst:\n raise PreprocError('missing tokens to evaluate')\n if lst:\n p, v = lst[0]\n if p == IDENT and v not in defs:\n raise PreprocError('missing macro %r' % lst)\n p, v = reduce_eval(lst)\n re" }, { "id": 94836, "commit_id": "ab993b32614bb83d17d10e1041817e43dd6f5980", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events.py", "file_name": "test_organization_events.py", "fun_name": "test_in_query_events_stack", "commit_message": "fix(tests): Fix dnd backend test flakes (#37916)\n\nThis PR fixes 3 major flakes:\r\n\r\nFixes SENTRY-TESTS-3J5: Just sort the project id order\r\n\r\nFixes SENTRY-TESTS-3HQ: Flakes because we calculate the retention\r\nin the test once and the value returned in the response is calculated\r\na little while after. We don't need to test for seconds granularity\r\nso replacing seconds to 0.\r\n\r\nFixes SENTRY-TESTS-3J0: Successively calling before_now results in some flakes\r\nparticularly in tests that are calculating aggregates\r\non transaction.duration. Introduced a load_data method\r\nthat takes a datetime object timestamp and a timedelta duration\r\ncalculates the offset based on timestamp to get start_timestamp.", "code": "def test_in_query_events_stack(self):\n test_js = self.store_event(\n self.load_data(\n platform=\"javascript\",\n timestamp=before_now(minutes=10),\n duration=timedelta(seconds=5),\n ),\n project_id=self.project.id,\n )\n test_java = self.store_event(\n self.load_data(\n platform=\"java\",\n timestamp=before_now(minutes=10),\n duration=timedelta(seconds=5),\n ),\n project_id=self.project.id,\n )\n self.run_test_in_query(\n \"stack.filename:[../../sentry/scripts/views.js]\", [test_js], [test_java]\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 235, "n_words": 27, "vocab_size": 18, "complexity": 1, "nloc": 20, "token_counts": 105, "n_ast_nodes": 161, "n_identifiers": 17, "random_cut": "def test_in_query_events_stack(self):\n test_js = self.store_event(\n self.load_data(\n platform=\"javascript\",\n timestamp=before_now(minutes=10),\n duration=timedelta(seconds=5),\n ),\n project_id=self.project.id,\n )\n test_java = self.store_event(\n self.load_data(\n platform=\"java\",\n timestamp=before_now(minutes=10),\n duration=timedelt" }, { "id": 169899, "commit_id": "90b4add77859d1349530fff3c8cadeef95f36f39", "repo": "pandas", "path": "pandas/tests/arrays/test_datetimes.py", "file_name": "test_datetimes.py", "fun_name": "test_add_timedeltalike_scalar_mismatched_reso", "commit_message": "REF: _reso->_creso (#49107)", "code": "def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar):\n dta, dti = dta_dti\n\n td = pd.Timedelta(scalar)\n exp_reso = max(dta._creso, td._creso)\n exp_unit = npy_unit_to_abbrev(exp_reso)\n\n expected = (dti + td)._data._as_unit(exp_unit)\n result = dta + scalar\n tm.assert_extension_array_equal(result, expected)\n\n result = scalar + dta\n tm.assert_extension_array_equal(result, expected)\n\n expected = (dti - td)._data._as_unit(exp_unit)\n result = dta - scalar\n tm.assert_extension_array_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 132, "n_words": 49, "vocab_size": 25, "complexity": 1, "nloc": 13, "token_counts": 107, "n_ast_nodes": 166, "n_identifiers": 20, "random_cut": "def test_add_timedeltalike_scalar_mismatched_reso(self, dta_dti, scalar):\n dta, dti = dta_dti\n\n td = pd.Timedelta(scalar)\n exp_reso = max(dta._creso, td._creso)\n exp_unit = npy_un" }, { "id": 23691, "commit_id": "8b228a1f9b011aba935963431cadb81c7fe361d5", "repo": "PaddleOCR", "path": "PPOCRLabel/PPOCRLabel.py", "file_name": "PPOCRLabel.py", "fun_name": "cellreRecognition", "commit_message": "new", "code": "def cellreRecognition(self):\n \n img = cv2.imread(self.filePath)\n for shape in self.canvas.selectedShapes:\n box = [[int(p.x()), int(p.y())] for p in shape.points]\n\n if len(box) > 4:\n box = self.gen_quad_from_poly(np.array(box))\n assert len(box) == 4\n\n # pad around bbox for better text recognition accuracy\n _box = boxPad(box, img.shape, 6)\n img_crop = get_rotate_crop_image(img, np.array(_box, np.float32))\n if img_crop is None:\n msg = 'Can not recognise the detection box in ' + self.filePath + '. Please change manually'\n QMessageBox.information(self, \"Information\", msg)\n return\n\n # merge the text result in the cell\n texts = ''\n probs = 0. # the probability of the cell is avgerage prob of every text box in the cell\n bboxes = self.ocr.ocr(img_crop, det=True, rec=False, cls=False)\n if len(bboxes) > 0:\n bboxes.reverse() # top row text at first\n for _bbox in bboxes:\n patch = get_rotate_crop_image(img_crop, np.array(_bbox, np.float32))\n rec_res = self.ocr.ocr(patch, det=False, rec=True, cls=False)\n text = rec_res[0][0]\n if text != '':\n texts += text + (' ' if text[0].isalpha() else '') # add space between english word\n probs += rec_res[0][1]\n probs = probs / len(bboxes)\n result = [(texts.strip(), probs)]\n\n if result[0][0] != '':\n result.insert(0, box)\n print('result in reRec is ', result)\n if result[1][0] == shape.label:\n print('label no change')\n else:\n shape.label = result[1][0]\n else:\n print('Can not recognise the box')\n if self.noLabelText == shape.label:\n print('label no change')\n else:\n shape.label = self.noLabelText\n self.singleLabel(shape)\n self.setDirty()\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 827, "n_words": 211, "vocab_size": 126, "complexity": 12, "nloc": 42, "token_counts": 378, "n_ast_nodes": 610, "n_identifiers": 48, "random_cut": "def cellreRecognition(self):\n \n img = cv2.imread(self.filePath)\n for shape in self.canvas.selectedShapes:\n box = [[int(p.x()), int(p.y())] for p in shape.points]\n\n if len(box) > 4:\n box = self.gen_quad_from_poly(np.array(box))\n assert len(box) == 4\n\n # pad around bbox for better text recognition accuracy\n _box = boxPad(box, img.shape, 6)\n img_crop = get_rotate_crop_image(img, np.array(_box, np.float32))\n if img_crop is None:\n msg = 'Can not recognise the detection box in ' + self.filePath + '. Please change manually'\n QMessageBox.information(self, \"Information\", msg)\n return\n\n # merge the text result in the cell\n texts = ''\n probs = 0. # the probability of the cell is avgerage prob of every text box in the cell\n bboxes = self.ocr.ocr(img_crop, det=True, rec=False, cls=False)\n if len(bboxes) > 0:\n bboxes.reverse() # top row text at first\n for _bbox in bboxes:\n patch = get_rotate_crop_image(img_crop, np.array(_bbox, np.float32))\n rec_res = self.ocr.ocr(patch, det=False, rec=True, cls=False)\n text = rec_res[0][0]\n if text != '':\n texts += text + (' ' if text[0].isalpha() else '') # add space between english word\n probs += rec_res[0][1]\n probs = probs / len(bboxes)\n result = [(texts.strip(), probs)]\n\n if result[0][0] != '':\n result.insert(0, box)\n print('resu" }, { "id": 266413, "commit_id": "76220c4a7bf90c97113fe104ea33957a9881b8a9", "repo": "ansible", "path": "test/units/galaxy/test_collection.py", "file_name": "test_collection.py", "fun_name": "test_validate_certs", "commit_message": "ansible-galaxy - fix the --ignore-certs flag for the implicit galaxy server (#76735)\n\n* ansible-galaxy - fix the --ignore-certs flag for the implicit galaxy server\r\n\r\n* changelog\r\n\r\n* Add a test without the server config\r\n\r\n* Fix respecting --ignore-certs for individual --server URLs also\r\n\r\n* Update changelogs/fragments/76735-ansible-galaxy-fix-ignore-certs.yaml", "code": "def test_validate_certs(global_ignore_certs, monkeypatch):\n cli_args = [\n 'ansible-galaxy',\n 'collection',\n 'install',\n 'namespace.collection:1.0.0',\n ]\n if global_ignore_certs:\n cli_args.append('--ignore-certs')\n\n galaxy_cli = GalaxyCLI(args=cli_args)\n mock_execute_install = MagicMock()\n monkeypatch.setattr(galaxy_cli, '_execute_install_collection', mock_execute_install)\n galaxy_cli.run()\n\n assert len(galaxy_cli.api_servers) == 1\n assert galaxy_cli.api_servers[0].validate_certs is not global_ignore_certs\n\n\n@pytest.mark.parametrize('global_ignore_certs', [True, False])", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('global_ignore_certs', [True, False])", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 96, "n_words": 36, "vocab_size": 33, "complexity": 2, "nloc": 15, "token_counts": 77, "n_ast_nodes": 153, "n_identifiers": 18, "random_cut": "def test_validate_certs(global_ignore_certs, monkeypatch):\n cli_args = [\n 'ansible-galaxy',\n 'collection',\n 'install',\n 'namespace.collection:1.0.0',\n ]\n if global_ignore_certs:\n cli_args.append('--ignore-certs')\n\n galaxy_cli = Galaxy" }, { "id": 60539, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py", "file_name": "parser.py", "fun_name": "_update_defaults", "commit_message": "upd; format", "code": "def _update_defaults(self, defaults):\n # type: (Dict[str, Any]) -> Dict[str, Any]\n \n\n # Accumulate complex default state.\n self.values = optparse.Values(self.defaults)\n late_eval = set()\n # Then set the options with those values\n for key, val in self._get_ordered_configuration_items():\n # '--' because configuration supports only long names\n option = self.get_option(\"--\" + key)\n\n # Ignore options not present in this parser. E.g. non-globals put\n # in [global] by users that want them to apply to all applicable\n # commands.\n if option is None:\n continue\n\n assert option.dest is not None\n\n if option.action in (\"store_true\", \"store_false\"):\n try:\n val = strtobool(val)\n except ValueError:\n self.error(\n \"{} is not a valid value for {} option, \" # noqa\n \"please specify a boolean value like yes/no, \"\n \"true/false or 1/0 instead.\".format(val, key)\n )\n elif option.action == \"count\":\n with suppress(ValueError):\n val = strtobool(val)\n with suppress(ValueError):\n val = int(val)\n if not isinstance(val, int) or val < 0:\n self.error(\n \"{} is not a valid value for {} option, \" # noqa\n \"please instead specify either a non-negative integer \"\n \"or a boolean value like yes/no or false/true \"\n \"which is equivalent to 1/0.\".format(val, key)\n )\n elif option.action == \"append\":\n val = val.split()\n val = [self.check_default(option, key, v) for v in val]\n elif option.action == \"callback\":\n assert option.callback is not None\n late_eval.add(option.dest)\n opt_str = option.get_opt_string()\n val = option.convert_value(opt_str, val)\n # From take_action\n args = option.callback_args or ()\n kwargs = option.callback_kwargs or {}\n option.callback(option, opt_str, val, self, *args, **kwargs)\n else:\n val = self.check_default(option, key, val)\n\n defaults[option.dest] = val\n\n for key in late_eval:\n defaults[key] = getattr(self.values, key)\n self.values = None\n return defaults\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1029, "n_words": 254, "vocab_size": 148, "complexity": 14, "nloc": 47, "token_counts": 308, "n_ast_nodes": 518, "n_identifiers": 35, "random_cut": "def _update_defaults(self, defaults):\n # type: (Dict[str, Any]) -> Dict[str, Any]\n \n\n # Accumulate complex default state.\n self.values = optparse.Values(self.defaults)\n late_eval = set()\n " }, { "id": 210982, "commit_id": "6d91289fc71f4b7440515c7eed4302066a524a22", "repo": "PaddleDetection", "path": "ppdet/modeling/backbones/mobileone.py", "file_name": "mobileone.py", "fun_name": "_pad_1x1_to_3x3_tensor", "commit_message": "Add SIoU and MobileOne block (#6312)\n\n* Add SIoU and MobileOne block\r\n\r\n* add paddle copyright\r\n\r\n* mobileone block k>1 bugfix\r\n\r\n* format code style", "code": "def _pad_1x1_to_3x3_tensor(self, kernel1x1):\n if kernel1x1 is None:\n return 0\n else:\n padding_size = (self.kernel_size - 1) // 2\n return nn.functional.pad(\n kernel1x1,\n [padding_size, padding_size, padding_size, padding_size])\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 100, "n_words": 24, "vocab_size": 22, "complexity": 2, "nloc": 8, "token_counts": 45, "n_ast_nodes": 68, "n_identifiers": 8, "random_cut": "def _pad_1x1_to_3x3_tensor(self, kernel1x1):\n if kernel1x1 is None:\n return 0\n else:\n padding_size = (self.kernel_size - 1) // 2\n return nn.functional.pad(\n kernel1x1,\n " }, { "id": 261256, "commit_id": "5ceb8a6a031ddff26a7ede413db1b53edb64166a", "repo": "scikit-learn", "path": "sklearn/ensemble/_hist_gradient_boosting/grower.py", "file_name": "grower.py", "fun_name": "split_next", "commit_message": "ENH FEA add interaction constraints to HGBT (#21020)\n\nCo-authored-by: Loïc Estève ", "code": "def split_next(self):\n \n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (\n sample_indices_left,\n sample_indices_right,\n right_child_pos,\n ) = self.splitter.split_indices(node.split_info, node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(\n depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n value=node.split_info.value_left,\n )\n right_child_node = TreeNode(\n depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n value=node.split_info.value_right,\n )\n\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n # set interaction constraints (the indices of the constraints sets)\n if self.interaction_cst is not None:\n # Calculate allowed_features and interaction_cst_indices only once. Child\n # nodes inherit them before they get split.\n (\n left_child_node.allowed_features,\n left_child_node.interaction_cst_indices,\n ) = self._compute_interactions(node)\n right_child_node.interaction_cst_indices = (\n left_child_node.interaction_cst_indices\n )\n right_child_node.allowed_features = left_child_node.allowed_features\n\n if not self.has_missing_values[node.split_info.feature_idx]:\n # If no missing values are encountered at fit time, then samples\n # with missing values during predict() will go to whichever child\n # has the most samples.\n node.split_info.missing_go_to_left = (\n left_child_node.n_samples > right_child_node.n_samples\n )\n\n self.n_nodes += 2\n self.n_categorical_splits += node.split_info.is_categorical\n\n if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n if self.with_monotonic_cst:\n # Set value bounds for respecting monotonic constraints\n # See test_nodes_values() for details\n if (\n self.monotonic_cst[node.split_info.feature_idx]\n == MonotonicConstraint.NO_CST\n ):\n lower_left = lower_right = node.children_lower_bound\n upper_left = upper_right = node.children_upper_bound\n else:\n mid = (left_child_node.value + right_child_node.value) / 2\n if (\n self.monotonic_cst[node.split_info.feature_idx]\n == MonotonicConstraint.POS\n ):\n lower_left, upper_left = node.children_lower_bound, mid\n lower_right, upper_right = mid, node.children_upper_bound\n else: # NEG\n lower_left, upper_left = mid, node.children_upper_bound\n lower_right, upper_right = node.children_lower_bound, mid\n left_child_node.set_children_bounds(lower_left, upper_left)\n right_child_node.set_children_bounds(lower_right, upper_right)\n\n # Compute histograms of children, and compute their best possible split\n # (if needed)\n should_split_left = not left_child_node.is_leaf\n should_split_right = not right_child_node.is_leaf\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices\n )\n largest_child.histograms = (\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms\n )\n )\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n # Release memory used by histograms as they are no longer needed\n # for leaf nodes since they won't be split.\n for child in (left_child_node, right_child_node):\n if child.is_leaf:\n del child.histograms\n\n # Release memory used by histograms as they are no longer needed for\n # internal nodes once children histograms have been computed.\n del node.histograms\n\n return left_child_node, right_child_node\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1959, "n_words": 492, "vocab_size": 251, "complexity": 19, "nloc": 112, "token_counts": 642, "n_ast_nodes": 1022, "n_identifiers": 78, "random_cut": "def split_next(self):\n \n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (\n sample_indices_left,\n sample_indices_right,\n right_child_pos,\n ) = self.splitter.split_indices(node.split_info, node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(\n depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n value=node.split_info.value_left,\n )\n right_child_node = TreeNode(\n depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n value=node.split_info.value_right,\n )\n\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n # set interaction constraints (the indices of the constraints sets)\n if self.interaction_cst is not None:\n # Calculate allowed_features and interaction_cst_indices only once. Child\n # nodes inherit them before they get split.\n (\n left_child_node.allowed_features,\n left_child_node.interaction_cst_indices,\n ) = self._compute_interactions(node)\n right_child_node.interaction_cst_indices = (\n left_child_node.interaction_cst_indices\n )\n right_child_node.allowed_features = left_child_node.allowed_features\n\n if not self.has_missing_values[node.split_info.feature_idx]:\n # If no missing values are encountered at fit time, then samples\n # with missing values during predict() will go to whichever child\n # has the most samples.\n node.split_info.missing_go_to_left = (\n left_child_node.n_samples > right_child_node.n_samples\n )\n\n self.n_nodes += 2\n self.n_categorical_splits += node.split_info.is_categorical\n\n if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n if self.with_monotonic_cst:\n # Set value bounds for respecting monotonic constraints\n # See test_nodes_values() for details\n if (\n self.monotonic_cst[node.split_info.feature_idx]\n == MonotonicConstraint.NO_CST\n ):\n lower_left = lower_right = node.children_lower_bound\n upper_left = upper_right = node.children_upper_bound\n else:\n mid = (left_child_node.value + right_child_node.value) / 2\n if (\n self.monotonic_cst[node.split_info.feature_idx]\n == MonotonicConstraint.POS\n ):\n lower_left, upper_left = node.children_lower_bound, mid\n lower_right, upper_right = mid, node.children_upper_bound\n else: # NEG\n lower_left, upper_left = mid, node.children_upper_bound\n lower_right, upper_right = node.children_lower_bound, mid\n left_child_node.set_children_bounds(lower_left, upp" }, { "id": 48688, "commit_id": "78cdae69997c9fd95211ec15fb4e21f4cd45e30a", "repo": "django-rest-framework", "path": "tests/test_relations.py", "file_name": "test_relations.py", "fun_name": "setup_method", "commit_message": "Fix Pytest's deprecation warnings about nose usage (#8758)\n\nPytest 7.2.0 deprecated plain `setup` and `teardown` functions and\r\nmethods as nose idioms:\r\nhttps://docs.pytest.org/en/latest/changelog.html#pytest-7-2-0-2022-10-23\r\n\r\n`setup` can be safely replaced with `setup_method`:\r\nhttps://docs.pytest.org/en/stable/deprecations.html#setup-teardown\r\n\r\nFixes: https://github.com/encode/django-rest-framework/issues/8757\r\nSigned-off-by: Stanislav Levin \r\n\r\nSigned-off-by: Stanislav Levin ", "code": "def setup_method(self):\n self.default_hyperlink = serializers.Hyperlink('http://example.com', 'test')\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def setup_method(self):\n self.defaul" }, { "id": 307676, "commit_id": "efb482fb1dcf29468e50fca98f046d551d6355c7", "repo": "core", "path": "homeassistant/components/demo/mailbox.py", "file_name": "mailbox.py", "fun_name": "async_get_messages", "commit_message": "Add demo to strict-typing (#77596)\n\n* Add demo to strict-typing\r\n\r\n* Adjust component\r\n\r\n* Adjust PR\r\n\r\n* Update homeassistant/components/demo/mailbox.py\r\n\r\nCo-authored-by: Marc Mueller <30130371+cdce8p@users.noreply.github.com>", "code": "async def async_get_messages(self) -> list[dict[str, Any]]:\n \n return sorted(\n self._messages.values(),\n key=lambda item: item[\"info\"][\"origtime\"], # type: ignore[no-any-return]\n reverse=True,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 72, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 45, "n_ast_nodes": 74, "n_identifiers": 12, "random_cut": "async def async_get_messages(self) -> list[dict[str, Any]]:\n \n " }, { "id": 126931, "commit_id": "4607e788c1277f9396d7f45ea112b2d551383499", "repo": "ray", "path": "rllib/offline/estimators/tests/test_dr_learning.py", "file_name": "test_dr_learning.py", "fun_name": "test_dr_expert_policy_mixed_data", "commit_message": "[RLlib] Fix test_ope flakiness (#27676)", "code": "def test_dr_expert_policy_mixed_data(self):\n print(\"Test DoublyRobust on expert policy on mixed dataset\")\n check_estimate(\n estimator_cls=DoublyRobust,\n gamma=self.gamma,\n q_model_config=self.q_model_config,\n policy=self.expert_policy,\n batch=self.mixed_batch,\n mean_ret=self.expert_reward,\n std_ret=self.expert_std,\n seed=SEED,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 128, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 12, "token_counts": 56, "n_ast_nodes": 82, "n_identifiers": 18, "random_cut": "def test_dr_expert_policy_mixed_data(self):\n print(\"Test DoublyRobust on expert policy on mixed dataset\")\n check_estimate(\n estimator_cls=DoublyRobust,\n gamma=self.gamma,\n q_model_config=self.q_mo" }, { "id": 65808, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/e_commerce/shopping_cart/cart.py", "file_name": "cart.py", "fun_name": "get_shipping_addresses", "commit_message": "style: format code with black", "code": "def get_shipping_addresses(party=None):\n\tif not party:\n\t\tparty = get_party()\n\taddresses = get_address_docs(party=party)\n\treturn [\n\t\t{\"name\": address.name, \"title\": address.address_title, \"display\": address.display}\n\t\tfor address in addresses\n\t\tif address.address_type == \"Shipping\"\n\t]\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 19, "n_words": 29, "vocab_size": 26, "complexity": 4, "nloc": 9, "token_counts": 56, "n_ast_nodes": 105, "n_identifiers": 12, "random_cut": "def get_shipping_addresses(party=None):\n\tif not party:\n\t\tparty = get_party()\n\taddresses = get_address_docs(party=party)\n\t" }, { "id": 172096, "commit_id": "1d5f05c33c613508727ee7b971ad56723d474446", "repo": "pandas", "path": "pandas/tests/tools/test_to_datetime.py", "file_name": "test_to_datetime.py", "fun_name": "test_parsers_timestring", "commit_message": "PDEP0004: implementation (#49024)\n\n* :wastebasket: deprecate infer_datetime_format, make strict\r\n\r\n* :rotating_light: add warning about dayfirst\r\n\r\n* :white_check_mark: add/update tests\r\n\r\n* :rotating_light: add warning if format cant be guessed\r\n\r\n* :goal_net: catch warnings\r\n\r\n* :memo: update docs\r\n\r\n* :memo: add example of reading csv file with mixed formats\r\n\r\n* :wastebasket: removed now outdated tests / clean inputs\r\n\r\n* :memo: clarify whatsnew and user-guide\r\n\r\n* :art:\r\n\r\n* guess %Y-%m format\r\n\r\n* Detect format from first non-na, but also exclude now and today\r\n\r\n* :white_check_mark: fixup tests based on now and today parsing\r\n\r\n* fixup after merge\r\n\r\n* fixup after merge\r\n\r\n* fixup test\r\n\r\n* remove outdated doctest\r\n\r\n* xfail test based on issue 49767\r\n\r\n* wip\r\n\r\n* add back examples of formats which can be guessed\r\n\r\n* start fixing up\r\n\r\n* fixups from reviews\r\n\r\n* lint\r\n\r\n* put tests back\r\n\r\n* shorten diff\r\n\r\n* add example of string which cannot be guessed\r\n\r\n* add deprecated directive, construct expected explicitly, explicit UserWarning, reword row-wise and column-wise\r\n\r\n* remove redundant example\r\n\r\n* restore newline\r\n\r\n* double backticks around False, explicitly raise UserWarning\r\n\r\n* reword warning\r\n\r\n* test both dayfirst True and False\r\n\r\n* postmerge fixup\r\n\r\n* unimportant typo to restart CI\r\n\r\nCo-authored-by: MarcoGorelli <>", "code": "def test_parsers_timestring(self, date_str, exp_def):\n # must be the same as dateutil result\n exp_now = parse(date_str)\n\n result1, _ = parsing.parse_time_string(date_str)\n with tm.assert_produces_warning(UserWarning, match=\"Could not infer format\"):\n result2 = to_datetime(date_str)\n result3 = to_datetime([date_str])\n result4 = Timestamp(date_str)\n result5 = DatetimeIndex([date_str])[0]\n # parse time string return time string based on default date\n # others are not, and can't be changed because it is used in\n # time series plot\n assert result1 == exp_def\n assert result2 == exp_now\n assert result3 == exp_now\n assert result4 == exp_now\n assert result5 == exp_now\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 204, "n_words": 85, "vocab_size": 57, "complexity": 1, "nloc": 13, "token_counts": 88, "n_ast_nodes": 145, "n_identifiers": 21, "random_cut": "def test_parsers_timestring(self, date_str, exp_def):\n # must be the same as dateutil result\n exp_now = parse(date_str)\n\n result1, _ = parsing.parse_time_string(date_str)\n with tm.assert_produces_warning(UserWarning, match=\"Could not infer format\"):\n result2 = to_datetime(date_str)\n result3 = to_datetime([date_str])\n result4 = Timestamp(date_str)\n result5 = DatetimeIndex([date_str])[0]\n # parse time string return time string based on default date\n # others are not, and can't be changed because it is used in\n # time series plot\n assert result1 == exp_def\n assert result2 == exp_now\n " }, { "id": 270583, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/dtensor/layers_test.py", "file_name": "layers_test.py", "fun_name": "setUp", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def setUp(self):\n super().setUp()\n backend.enable_tf_random_generator()\n tf_utils.set_random_seed(1337)\n global_ids = test_util.create_device_ids_array((2, 2))\n local_device_ids = np.ravel(global_ids).tolist()\n mesh_dict = {\n \"CPU\": dtensor.Mesh(\n [\"X\", \"Y\"],\n global_ids,\n local_device_ids,\n test_util.create_device_list((2, 2), \"CPU\"),\n )\n }\n self.mesh = self.configTestMesh(mesh_dict)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 166, "n_words": 29, "vocab_size": 26, "complexity": 1, "nloc": 15, "token_counts": 91, "n_ast_nodes": 149, "n_identifiers": 20, "random_cut": "def setUp(self):\n super()." }, { "id": 174577, "commit_id": "1fbfdc44233486299db4d4364cf8cc8ef98ceacb", "repo": "pip", "path": "src/pip/_internal/models/installation_report.py", "file_name": "installation_report.py", "fun_name": "to_dict", "commit_message": "install report: add version field\n\nAlso, affirm the experimental status of the feature.", "code": "def to_dict(self) -> Dict[str, Any]:\n return {\n \"version\": \"0\",\n \"pip_version\": __version__,\n \"install\": {\n canonicalize_name(ireq.metadata[\"Name\"]): self._install_req_to_dict(\n ireq\n )\n for ireq in self._install_requirements\n },\n # https://peps.python.org/pep-0508/#environment-markers\n # TODO: currently, the resolver uses the default environment to evaluate\n # environment markers, so that is what we report here. In the future, it\n # should also take into account options such as --python-version or\n # --platform, perhaps under the form of an environment_override field?\n # https://github.com/pypa/pip/issues/11198\n \"environment\": default_environment(),\n }\n", "url": "https://github.com/pypa/pip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 273, "n_words": 75, "vocab_size": 64, "complexity": 2, "nloc": 12, "token_counts": 56, "n_ast_nodes": 99, "n_identifiers": 12, "random_cut": "def to_dict(self) -> Dict[str, Any]:\n " }, { "id": 88065, "commit_id": "b38f59d9f6d9eedd7ce0606805df7c072addb000", "repo": "sentry", "path": "src/sentry/api/fields/actor.py", "file_name": "actor.py", "fun_name": "to_internal_value", "commit_message": "ref(hybrid-cloud): Add user services. Start tagging some model tests as stable (#40614)\n\nNotifications uses new hybrid cloud APIUser\r\n\r\nCo-authored-by: Mike Ihbe \r\nCo-authored-by: Zachary Collins \r\nCo-authored-by: Zach Collins ", "code": "def to_internal_value(self, data):\n if not data:\n return None\n\n try:\n actor = ActorTuple.from_actor_identifier(data)\n except Exception:\n raise serializers.ValidationError(\n \"Could not parse actor. Format should be `type:id` where type is `team` or `user`.\"\n )\n try:\n obj: APIUser | Team = actor.resolve()\n except (Team.DoesNotExist, User.DoesNotExist):\n raise serializers.ValidationError(f\"{actor.type.__name__} does not exist\")\n\n if actor.type == Team:\n if obj.organization != self.context[\"organization\"]:\n raise serializers.ValidationError(\"Team is not a member of this organization\")\n elif actor.type == User:\n if not OrganizationMember.objects.filter(\n organization=self.context[\"organization\"], user_id=obj.id\n ).exists():\n raise serializers.ValidationError(\"User is not a member of this organization\")\n return actor\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 298, "n_words": 84, "vocab_size": 59, "complexity": 8, "nloc": 22, "token_counts": 135, "n_ast_nodes": 233, "n_identifiers": 25, "random_cut": "def to_internal_value(self, data):\n if not data:\n return None\n\n try:\n actor = Act" }, { "id": 68999, "commit_id": "930e557fc6e6bdd515984e2f66ab5cea29101bae", "repo": "erpnext", "path": "erpnext/patches/v8_7/sync_india_custom_fields.py", "file_name": "sync_india_custom_fields.py", "fun_name": "execute", "commit_message": "fix: remove HR/Payroll patches", "code": "def execute():\n\tcompany = frappe.get_all(\"Company\", filters={\"country\": \"India\"})\n\tif not company:\n\t\treturn\n\n\tfrappe.reload_doc(\"accounts\", \"doctype\", \"tax_category\")\n\n\tfor doctype in [\"Sales Invoice\", \"Delivery Note\", \"Purchase Invoice\"]:\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\tdoctype,\n\t\t)\n\n\tmake_custom_fields()\n\n\tfrappe.db.sql(\n\t\t\n\t)\n\n\tfrappe.db.sql(\n\t\t\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 17, "n_words": 32, "vocab_size": 28, "complexity": 3, "nloc": 31, "token_counts": 126, "n_ast_nodes": 141, "n_identifiers": 10, "random_cut": "def execute():\n\tcompany = frappe.get_all(\"Company\", filters={\"country\": \"India\"})\n\tif not company:\n\t\t" }, { "id": 248164, "commit_id": "116a4c8340b729ffde43be33df24d417384cb28b", "repo": "synapse", "path": "tests/rest/client/test_sync.py", "file_name": "test_sync.py", "fun_name": "test_knock_room_state", "commit_message": "Implement changes to MSC2285 (hidden read receipts) (#12168)\n\n* Changes hidden read receipts to be a separate receipt type\r\n (instead of a field on `m.read`).\r\n* Updates the `/receipts` endpoint to accept `m.fully_read`.", "code": "def test_knock_room_state(self) -> None:\n \n # Knock on a room\n channel = self.make_request(\n \"POST\",\n f\"/_matrix/client/r0/knock/{self.room_id}\",\n b\"{}\",\n self.knocker_tok,\n )\n self.assertEqual(200, channel.code, channel.result)\n\n # We expect to see the knock event in the stripped room state later\n self.expected_room_state[EventTypes.Member] = {\n \"content\": {\"membership\": \"knock\", \"displayname\": \"knocker\"},\n \"state_key\": \"@knocker:test\",\n }\n\n # Check that /sync includes stripped state from the room\n channel = self.make_request(\n \"GET\",\n self.url % self.next_batch,\n access_token=self.knocker_tok,\n )\n self.assertEqual(channel.code, 200, channel.json_body)\n\n # Extract the stripped room state events from /sync\n knock_entry = channel.json_body[\"rooms\"][\"knock\"]\n room_state_events = knock_entry[self.room_id][\"knock_state\"][\"events\"]\n\n # Validate that the knock membership event came last\n self.assertEqual(room_state_events[-1][\"type\"], EventTypes.Member)\n\n # Validate the stripped room state events\n self.check_knock_room_state_against_room_state(\n room_state_events, self.expected_room_state\n )\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 354, "n_words": 104, "vocab_size": 69, "complexity": 1, "nloc": 25, "token_counts": 157, "n_ast_nodes": 271, "n_identifiers": 19, "random_cut": "def test_knock_room_state(self) -> None:\n \n # Knock on a room\n channel = self.make_request(\n \"POST\",\n f\"/_matrix/client/r0/knock/{self.room_id}\",\n b\"{}\",\n self.knocker_tok,\n )\n self.assertEqual(200, channel.code, channel.result)\n\n # We expect to see the knock event in the stripped room state later\n self.expected_room_state[EventTypes.Member] = {\n \"content\": {\"membership\": \"knock\", \"displayname\": \"knocker\"},\n \"state_key\": \"@knocker:te" }, { "id": 85939, "commit_id": "04077133ca6e56647aca948e5ac21d3260b81f3f", "repo": "sentry", "path": "tests/sentry/snuba/metrics/test_query.py", "file_name": "test_query.py", "fun_name": "test_validate_distribution_functions_in_orderby", "commit_message": "feat(metrics): Make metrics layer accept MRI directly [TET-321] (#39003)\n\nThe metrics layer entrypoint which is the `MetricsQuery` object used to\r\naccept public names. As public names is not the naming contract we\r\nguarantee not to change, this PR allows `MetricQuery` object to directly\r\naccept MRI as that is the naming contract we guarantee", "code": "def test_validate_distribution_functions_in_orderby():\n # Validate no exception is raised when all orderBy fields are presented the select\n metric_field_1 = MetricField(op=\"avg\", metric_mri=TransactionMRI.DURATION.value)\n metric_field_2 = MetricField(op=\"p50\", metric_mri=TransactionMRI.DURATION.value)\n\n metrics_query_dict = (\n MetricsQueryBuilder()\n .with_select([metric_field_1, metric_field_2])\n .with_orderby(\n [\n OrderBy(field=metric_field_1, direction=Direction.ASC),\n OrderBy(field=metric_field_2, direction=Direction.ASC),\n ]\n )\n .to_metrics_query_dict()\n )\n MetricsQuery(**metrics_query_dict)\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 145, "n_words": 41, "vocab_size": 36, "complexity": 1, "nloc": 15, "token_counts": 93, "n_ast_nodes": 148, "n_identifiers": 20, "random_cut": "def test_validate_distribution_functions_in_orderby():\n # Validate no exception is raised when all orderBy fields are presented the select\n metric_field_1 = MetricField(op=\"avg\", metric_mri=TransactionMRI.DURATION.value)\n metric_field_2 = MetricField(op=\"p50\", metric_mri=TransactionMRI.DURATION.value)\n\n metrics_query_dict = (\n MetricsQueryBuilder()\n .with_select([metri" }, { "id": 266557, "commit_id": "b493c590bcee9b64e8ae02c17d4fde2331e0598b", "repo": "ansible", "path": "lib/ansible/modules/git.py", "file_name": "git.py", "fun_name": "write_ssh_wrapper", "commit_message": "Bypass fragile git ssh wrapper (#73404)\n\ngit module now uses env vars exclusively\r\n\r\n - updated docs to clarify usage\r\n - now env vars append instead of overwrite to allow existing custom setups to keep working\r\n fixes #38104, #64673, #64674\r\n - added note for hostkeychecking more securely\r\n fixes #69846\r\n - keep script cause old versions still choke on env\r\n - env var cannot hold more than 'command' for older versions\r\n - all ssh_opts in one place", "code": "def write_ssh_wrapper(module):\n \n try:\n # make sure we have full permission to the module_dir, which\n # may not be the case if we're sudo'ing to a non-root user\n if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK):\n fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/')\n else:\n raise OSError\n except (IOError, OSError):\n fd, wrapper_path = tempfile.mkstemp()\n\n # use existing git_ssh/ssh_command, fallback to 'ssh'\n template = b( % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ssh')))\n\n # write it\n with os.fdopen(fd, 'w+b') as fh:\n fh.write(template)\n\n # set execute\n st = os.stat(wrapper_path)\n os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)\n\n module.debug('Wrote temp git ssh wrapper (%s): %s' % (wrapper_path, template))\n\n # ensure we cleanup after ourselves\n module.add_cleanup_file(path=wrapper_path)\n\n return wrapper_path\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 208, "n_words": 102, "vocab_size": 83, "complexity": 3, "nloc": 18, "token_counts": 154, "n_ast_nodes": 265, "n_identifiers": 30, "random_cut": "def write_ssh_wrapper(module):\n \n try:\n # make sure we have full permission to the module_dir, which\n # may not be the case if we're sudo'ing to a non-root user\n if os.access(module.tmpdir, os.W_OK | os.R_OK | os.X_OK):\n fd, wrapper_path = tempfile.mkstemp(prefix=module.tmpdir + '/')\n else:\n raise OSError\n except (IOError, OSError):\n fd, wrapper_path = tempfile.mkstemp()\n\n # use existing git_ssh/ssh_command, fallback to 'ssh'\n template = b( % os.environ.get('GIT_SSH', os.environ.get('GIT_SSH_COMMAND', 'ss" }, { "id": 198654, "commit_id": "790c4cef5e61644bbb6c467db1b902a8c482ee4b", "repo": "sympy", "path": "sympy/integrals/tests/test_integrals.py", "file_name": "test_integrals.py", "fun_name": "test_issue_23718", "commit_message": "fix(integrals): fix degeneracy checking in heurisch\n\nPreviously heurisch used solve with a single equation rather than a list\ncontaining that equation i.e. solve(eq) rather than solve([eq]). This\ntakes different codepaths in solve and the [eq] codepath is more robust.\nThis commit changes heurisch to use [eq] and also changes the Piecewise\nhandling routine to produce deterministic output when there are multiple\ndegenerate cases to handle.", "code": "def test_issue_23718():\n f = 1/(b*cos(x) + a*sin(x))\n Fpos = (-log(-a/b + tan(x/2) - sqrt(a**2 + b**2)/b)/sqrt(a**2 + b**2)\n +log(-a/b + tan(x/2) + sqrt(a**2 + b**2)/b)/sqrt(a**2 + b**2))\n F = Piecewise(\n # XXX: The zoo case here is for a=b=0 so it should just be zoo or maybe\n # it doesn't really need to be included at all given that the original\n # integrand is really undefined in that case anyway.\n (zoo*(-log(tan(x/2) - 1) + log(tan(x/2) + 1)), Eq(a, 0) & Eq(b, 0)),\n (log(tan(x/2))/a, Eq(b, 0)),\n (-I/(-I*b*sin(x) + b*cos(x)), Eq(a, -I*b)),\n (I/(I*b*sin(x) + b*cos(x)), Eq(a, I*b)),\n (Fpos, True),\n )\n assert integrate(f, x) == F\n\n ap, bp = symbols('a, b', positive=True)\n rep = {a: ap, b: bp}\n assert integrate(f.subs(rep), x) == Fpos.subs(rep)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 321, "n_words": 120, "vocab_size": 82, "complexity": 1, "nloc": 15, "token_counts": 298, "n_ast_nodes": 457, "n_identifiers": 23, "random_cut": "def test_issue_23718():\n f = 1/(b*cos(x) + a*sin(x))\n Fpos = (-log(-a/b + tan(x/2) - sqrt(a**2 + b**2)/b)/sqrt(a**2 + b**2)\n +log(-a/b + tan(x/2) + sqrt(a**2 + b**2)/b)/sqrt(a**2 + b**2))\n F = Piecewise(\n # XXX: The zoo case here is for a=b=0 so it should just be zoo or maybe\n # it doesn't really need to be included at all given that the original\n # integrand is really undefined in that case anyway.\n (zoo*(-log(tan(x/2) - 1) + log(tan(x/2) + 1)), Eq(a, 0) & Eq(b, 0)),\n (log(tan(x/2))/a, Eq(b, 0)),\n (-I/(-I*b*sin(" }, { "id": 135811, "commit_id": "2ed09c54459cc3f74e2dab13406018698559856c", "repo": "ray", "path": "rllib/algorithms/pg/pg.py", "file_name": "pg.py", "fun_name": "validate", "commit_message": "[RLlib] Move all config validation logic into AlgorithmConfig classes. (#29854)", "code": "def validate(self) -> None:\n # Call super's validation method.\n super().validate()\n\n # Check for mismatches between `train_batch_size` and\n # `rollout_fragment_length` (if not \"auto\")..\n # Note: Only check this if `train_batch_size` > 0 (DDPPO sets this\n # to -1 to auto-calculate the actual batch size later).\n if (\n self.rollout_fragment_length != \"auto\"\n and not self.in_evaluation\n and self.train_batch_size > 0\n ):\n min_batch_size = (\n max(self.num_rollout_workers, 1)\n * self.num_envs_per_worker\n * self.rollout_fragment_length\n )\n batch_size = min_batch_size\n while batch_size < self.train_batch_size:\n batch_size += min_batch_size\n if (\n batch_size - self.train_batch_size > 0.1 * self.train_batch_size\n or batch_size - min_batch_size - self.train_batch_size\n > (0.1 * self.train_batch_size)\n ):\n suggested_rollout_fragment_length = self.train_batch_size // (\n self.num_envs_per_worker * (self.num_rollout_workers or 1)\n )\n raise ValueError(\n f\"Your desired `train_batch_size` ({self.train_batch_size}) or a \"\n \"value 10% off of that cannot be achieved with your other \"\n f\"settings (num_rollout_workers={self.num_rollout_workers}; \"\n f\"num_envs_per_worker={self.num_envs_per_worker}; \"\n f\"rollout_fragment_length={self.rollout_fragment_length})! \"\n \"Try setting `rollout_fragment_length` to 'auto' OR \"\n f\"{suggested_rollout_fragment_length}.\"\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 616, "n_words": 145, "vocab_size": 91, "complexity": 8, "nloc": 32, "token_counts": 136, "n_ast_nodes": 251, "n_identifiers": 13, "random_cut": "def validate(self) -> None:\n # Call super's validation method.\n super().validate()\n\n # Check for mismatches between `train_batch_size` and\n # `rollout_fragment_length` (if not \"auto\")..\n # Note: Only check t" }, { "id": 271688, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_generator_test.py", "file_name": "training_generator_test.py", "fun_name": "test_evaluate_generator_method", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_evaluate_generator_method(self):\n model = test_utils.get_small_mlp(\n num_hidden=3, num_classes=4, input_dim=2\n )\n model.compile(\n loss=\"mse\",\n optimizer=rmsprop.RMSprop(1e-3),\n metrics=[\"mae\", metrics_module.CategoricalAccuracy()],\n run_eagerly=test_utils.should_run_eagerly(),\n )\n\n model.evaluate_generator(\n custom_generator_threads(),\n steps=5,\n max_queue_size=10,\n workers=2,\n verbose=1,\n use_multiprocessing=True,\n )\n model.evaluate_generator(\n custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False,\n )\n model.evaluate_generator(\n custom_generator(),\n steps=5,\n max_queue_size=10,\n use_multiprocessing=False,\n workers=0,\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 326, "n_words": 37, "vocab_size": 25, "complexity": 1, "nloc": 31, "token_counts": 138, "n_ast_nodes": 200, "n_identifiers": 26, "random_cut": "def test_evaluate_generator_method(self):\n model = test_utils.get_small_mlp(\n num_hidden=3, num_classes=4, input_dim=2\n )\n model.compile(\n loss=\"mse\",\n optimizer=rmsprop.RMSprop(1e-3),\n metrics=[\"mae\", metrics_module.CategoricalAccuracy()],\n run_eagerly=test_utils.should_run_eagerly(),\n )\n\n model.evaluate_generator(\n custom_generator_threads(),\n steps=5,\n max_queue_size=10,\n workers=2,\n verbose=1,\n use_multiprocessing=True,\n )" }, { "id": 196442, "commit_id": "8fe2c879fe862d9ab6547130e4ff65010eecb549", "repo": "sympy", "path": "sympy/printing/tests/test_pycode.py", "file_name": "test_pycode.py", "fun_name": "test_array_printer", "commit_message": "printing: ArrayExpr support\n\nBetter support for numpy-style arrays in `TensorflowPrinter` and\n`NumPyPrinter`. Printing methods are now collected in the\n`ArrayPrinter` class to avoid code duplications/maintainance errors.\nPrinting for `ZeroArray` and `OneArray` has been added.\n`ArrayDiagonal` printing now also works for multiple diagonals and\ndiagonals spanning more than two indices.\n`ArrayContractiong` printing now also works when its base is not a\n`ArrayTensorProduct`.", "code": "def test_array_printer():\n A = ArraySymbol('A', (4,4,6,6,6))\n I = IndexedBase('I')\n\n prntr = NumPyPrinter()\n assert prntr.doprint(ZeroArray(5)) == 'numpy.zeros((5,))'\n assert prntr.doprint(OneArray(5)) == 'numpy.ones((5,))'\n assert prntr.doprint(ArrayContraction(A, [2,3])) == 'numpy.einsum(\"abccd->abd\", A)'\n assert prntr.doprint(I) == 'I'\n assert prntr.doprint(ArrayDiagonal(A, [2,3,4])) == 'numpy.einsum(\"abccc->abc\", A)'\n assert prntr.doprint(ArrayDiagonal(A, [0,1], [2,3])) == 'numpy.einsum(\"aabbc->cab\", A)'\n assert prntr.doprint(ArrayContraction(A, [2], [3])) == 'numpy.einsum(\"abcde->abe\", A)'\n\n prntr = TensorflowPrinter()\n assert prntr.doprint(ZeroArray(5)) == 'tensorflow.zeros((5,))'\n assert prntr.doprint(OneArray(5)) == 'tensorflow.ones((5,))'\n assert prntr.doprint(ArrayContraction(A, [2,3])) == 'tensorflow.linalg.einsum(\"abccd->abd\", A)'\n assert prntr.doprint(I) == 'I'\n assert prntr.doprint(ArrayDiagonal(A, [2,3,4])) == 'tensorflow.linalg.einsum(\"abccc->abc\", A)'\n assert prntr.doprint(ArrayDiagonal(A, [0,1], [2,3])) == 'tensorflow.linalg.einsum(\"aabbc->cab\", A)'\n assert prntr.doprint(ArrayContraction(A, [2], [3])) == 'tensorflow.linalg.einsum(\"abcde->abe\", A)'\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 144, "n_words": 91, "vocab_size": 37, "complexity": 1, "nloc": 19, "token_counts": 268, "n_ast_nodes": 427, "n_identifiers": 13, "random_cut": "def test_array_printer():\n A = ArraySymbol('A', (4,4,6,6,6))\n I = IndexedBase('I')\n\n prntr = NumPyPrinter()\n assert prntr.doprint(ZeroArray(5)) == 'numpy" }, { "id": 271122, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/data_adapter.py", "file_name": "data_adapter.py", "fun_name": "_is_list_of_scalars", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _is_list_of_scalars(inp):\n if isinstance(inp, (float, int, str, bytes, bytearray)):\n return True\n if isinstance(inp, (list, tuple)) and inp:\n return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0])\n return False\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 21, "vocab_size": 17, "complexity": 4, "nloc": 6, "token_counts": 51, "n_ast_nodes": 73, "n_identifiers": 11, "random_cut": "def _is_list_of_scalars(inp):\n if isinstance(inp, (float, " }, { "id": 8778, "commit_id": "51e763580a130801e4af64221614777761d8b364", "repo": "ludwig", "path": "tests/integration_tests/test_torchscript.py", "file_name": "test_torchscript.py", "fun_name": "test_torchscript_e2e_text_hf_tokenizer_truncated_sequence", "commit_message": "Fix TorchText version in tokenizers ahead of torch 1.13.0 upgrade (#2838)\n\n* fix torchtext version in tokenizers ahead of torch 1.13.0 upgrade\r\n\r\n* add truncation test to torchscript\r\n\r\n* check version before adding hf tokenizer to triton test\r\n\r\n* revert triton in case the changes affected tests?\r\n\r\n* cleanup", "code": "def test_torchscript_e2e_text_hf_tokenizer_truncated_sequence(tmpdir, csv_filename):\n data_csv_path = os.path.join(tmpdir, csv_filename)\n input_features = [text_feature(encoder={\"vocab_size\": 3, \"type\": \"bert\"}, preprocessing={\"max_sequence_length\": 3})]\n output_features = [\n text_feature(decoder={\"vocab_size\": 3}),\n ]\n backend = LocalTestBackend()\n config = {\"input_features\": input_features, \"output_features\": output_features, TRAINER: {\"epochs\": 2}}\n training_data_csv_path = generate_data(input_features, output_features, data_csv_path)\n\n validate_torchscript_outputs(tmpdir, config, backend, training_data_csv_path)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 72, "n_words": 42, "vocab_size": 36, "complexity": 1, "nloc": 10, "token_counts": 104, "n_ast_nodes": 169, "n_identifiers": 20, "random_cut": "def test_torchscript_e2e_text_hf_tokenizer_truncated_sequence(tmpdir, csv_filename):\n data_csv_path = os.path.join(tmpdir, csv_filename)\n input_features = [text_feature(encoder={\"vocab_size\": 3, \"type\": \"bert\"}, preprocessing={\"max_sequence_length\": 3})]\n output_features = [\n text_feature(decoder={\"vocab_size\": 3}),\n ]\n backend = LocalTestBackend()\n config = {\"input_features\": input_features, \"output_features\": output_features, TRAINER: {\"epochs\": 2}}\n training_data_csv_path = generate_data(input_features, output_features, d" }, { "id": 270083, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks_test.py", "file_name": "callbacks_test.py", "fun_name": "test_default_callbacks_no_warning", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_default_callbacks_no_warning(self):\n # Test that without the callback no warning is raised\n model = sequential.Sequential()\n model.add(keras.layers.Dense(1))\n model.compile(\n \"sgd\", loss=\"mse\", run_eagerly=test_utils.should_run_eagerly()\n )\n\n warning_messages = []\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 76, "n_words": 24, "vocab_size": 23, "complexity": 1, "nloc": 16, "token_counts": 119, "n_ast_nodes": 81, "n_identifiers": 15, "random_cut": "def test_default_callbacks_no_warning(self):\n # Test that without the callback no warning is raised\n model = seq" }, { "id": 83907, "commit_id": "c34ac1fcd428b469e85bcd3070938e4f59e60b18", "repo": "zulip", "path": "corporate/tests/test_stripe.py", "file_name": "test_stripe.py", "fun_name": "test_redirect_for_billing_home", "commit_message": "typing: Access url via key \"Location\" instead of attribute \"url\".\n\nThis is a part of #18777.\n\nSigned-off-by: Zixuan James Li <359101898@qq.com>", "code": "def test_redirect_for_billing_home(self) -> None:\n user = self.example_user(\"iago\")\n self.login_user(user)\n response = self.client_get(\"/billing/\")\n self.assertEqual(response.status_code, 302)\n self.assertEqual(\"/upgrade/\", response[\"Location\"])\n\n user.realm.plan_type = Realm.PLAN_TYPE_STANDARD_FREE\n user.realm.save()\n response = self.client_get(\"/billing/\")\n self.assertEqual(response.status_code, 200)\n\n user.realm.plan_type = Realm.PLAN_TYPE_LIMITED\n user.realm.save()\n Customer.objects.create(realm=user.realm, stripe_customer_id=\"cus_123\")\n response = self.client_get(\"/billing/\")\n self.assertEqual(response.status_code, 302)\n self.assertEqual(\"/upgrade/\", response[\"Location\"])\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 141, "n_words": 37, "vocab_size": 21, "complexity": 1, "nloc": 16, "token_counts": 145, "n_ast_nodes": 245, "n_identifiers": 19, "random_cut": "def test_redirect_for_billing_home(self) -> None:\n user = self.example_user(\"iago\")\n self.login_u" }, { "id": 310599, "commit_id": "7781e308cd7b28c67b6cf339f9b115c7190456fe", "repo": "core", "path": "homeassistant/components/amcrest/camera.py", "file_name": "camera.py", "fun_name": "_async_get_motion_recording", "commit_message": "Migrate amcrest integration to new async API (#56294)", "code": "async def _async_get_motion_recording(self) -> bool:\n return await self._api.async_is_record_on_motion_detection()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 5, "random_cut": "async def _async_get_motion_recording(self) -> bool:\n return await self._api.async_is_record_on_motion_detection()\n" }, { "id": 164083, "commit_id": "f46df091df3afea25a273f491d1f6b2c7d20b32c", "repo": "pandas", "path": "pandas/tests/io/parser/dtypes/test_dtypes_basic.py", "file_name": "test_dtypes_basic.py", "fun_name": "test_decimal_and_exponential", "commit_message": "TST: Remove unused fixtures (#45692)\n\n* TST: Remove unused fixtures\r\n\r\n* Undo a removed fixture\r\n\r\n* Add back other fixtures\r\n\r\n* Undo a file\r\n\r\n* Try undoing this?\r\n\r\n* Revert \"Try undoing this?\"\r\n\r\nThis reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.", "code": "def test_decimal_and_exponential(python_parser_only, numeric_decimal, thousands):\n # GH#31920\n decimal_number_check(python_parser_only, numeric_decimal, thousands)\n\n\n@pytest.mark.parametrize(\"thousands\", [\"_\", None])\n@pytest.mark.parametrize(\"float_precision\", [None, \"legacy\", \"high\", \"round_trip\"])", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"thousands\", [\"_\", None])\n@pytest.mark.parametrize(\"float_precision\", [None, \"legacy\", \"high\", \"round_trip\"])", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 20, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 84, "n_identifiers": 8, "random_cut": "def test_decimal_and_exponential(python_parser_only, numeric_decimal, thousands):\n" }, { "id": 201908, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/builtin_server/tests.py", "file_name": "tests.py", "fun_name": "test_file_wrapper_uses_sendfile", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_file_wrapper_uses_sendfile(self):\n env = {\"SERVER_PROTOCOL\": \"HTTP/1.0\"}\n handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)\n handler.run(wsgi_app_file_wrapper)\n self.assertTrue(handler._used_sendfile)\n self.assertEqual(handler.stdout.getvalue(), b\"\")\n self.assertEqual(handler.stderr.getvalue(), b\"\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 72, "n_ast_nodes": 119, "n_identifiers": 14, "random_cut": "def test_file_wrapper_uses_sendfile(self):\n env = {\"SERVER_PROTOCOL\": \"HTTP/1.0\"}\n ha" }, { "id": 264011, "commit_id": "8bd9c6726280aa0094c5e83ffcf31a0dbc7a0336", "repo": "pyinstaller", "path": "PyInstaller/building/api.py", "file_name": "api.py", "fun_name": "_process_toc", "commit_message": "building: delay merging of reference path and name in DEPENDENCY TOC entry\n\nWithin MERGE, do not combine the reference path and target file\nname into a single string and store it as the destination name\n(the first TOC element). Instead, store the target file name as\ndestination name (the first TOC element) and the reference path\ninto the source name (the second TOC element, which is otherwise\nleft unused for DEPENDENCY TOC entries).\n\nHave the CArchive writer perform the final merge, before writing\nthe entry to the PKG file.\n\nThis ensures that the target name remains unchanged within the\nTOC, making it subject of de-duplication codepaths and duplication\nchecks. Previously, an entry for DEPENDENCY may end up duplicating\nanother entry (e.g., EXTENSION) at run-time, due to target name\ncontaining the reference path prefix.\n\nWe can also get rid of DEPENDENCY-specific handling in `checkCache`\n(which returns without any processing if `fnm` contains a colon);\nthis crutch was needed because `PKG.assemble` incorrectly handled\nDEPENDENCY entries and unnecessarily tried running them through\n`checkCache`. So we rework that part of `PKG.assemble` to process\nDEPENDENCY entries as part of general entry handling. At this point,\nthis becomes necessary, because even if we kept the hack in\n`checkCache`, there is no colon in the `fnm` anymore, so the check\nwould fail, leading to error...", "code": "def _process_toc(self, toc, path):\n \n # NOTE: unfortunately, these need to keep two separate lists. See the comment in `_merge_dependencies` on why\n # this is so.\n toc_keep = []\n toc_refs = []\n for i, tpl in enumerate(toc):\n if not tpl[1] in self._dependencies:\n logger.debug(\"Adding dependency %s located in %s\", tpl[1], path)\n self._dependencies[tpl[1]] = path\n # Add entry to list of kept TOC entries\n toc_keep.append(tpl)\n else:\n dep_path = self._get_relative_path(path, self._dependencies[tpl[1]])\n # Ignore references that point to the origin package. This can happen if the same resource is listed\n # multiple times in TOCs (e.g., once as binary and once as data).\n if dep_path.endswith(path):\n logger.debug(\n \"Ignoring self-reference of %s for %s, located in %s - duplicated TOC entry?\", tpl[1], path,\n dep_path\n )\n # The entry is a duplicate, and should be ignored (i.e., do not add it to either of output TOCs).\n continue\n logger.debug(\"Referencing %s to be a dependency for %s, located in %s\", tpl[1], path, dep_path)\n # Create new DEPENDENCY entry; under destination path (first element), we store the original destination\n # path, while source path contains the relative reference path.\n toc_refs.append((tpl[0], dep_path, \"DEPENDENCY\"))\n\n return toc_keep, toc_refs\n\n # TODO: use pathlib.Path.relative_to() instead.", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 565, "n_words": 189, "vocab_size": 130, "complexity": 4, "nloc": 19, "token_counts": 147, "n_ast_nodes": 236, "n_identifiers": 16, "random_cut": "def _process_toc(self, toc, path):\n \n # NOTE: unfortunately, these need to keep two separate lists. See the comment in `_merge_dependencies` on why\n # this is so.\n toc_keep = []\n toc_refs = []\n for i, tpl in enumerate(toc):\n if not tpl[1] in self._dependencies:\n logger.debug(\"Adding dependency %s located in %s\", tpl[1], path)\n self._dependencies[tpl[1]] = path\n # Add entry to list of kept TOC entries\n toc_keep.append(tpl)\n else:\n dep_path = self._get_relative_path(path, self._dependencies[tpl[1]])\n # Ignore references that point to the origin package. This can happen if the same resource is listed\n # multiple times in TOCs (e.g., once as binary and once as data).\n if dep_path.endswith(path):\n logger.debug(\n \"Ignoring self-reference of %s for %s, located in %s - duplicated TOC entry?\", tpl[1], path,\n dep_path\n )\n # The entry is a duplicate, and should be ignored (i.e., do not add it to either of output TOCs).\n continue\n logger.debug(\"Referencing %s to be a dependency for %s, located in %s\", tpl[1], path, dep_path)\n # Create new DEPENDENCY entry; under destination path (first element), we store the original destination\n # path, while source path contains the relative reference path.\n toc_refs.append((tpl[0], dep_path, \"DEPENDENCY\"))\n\n return toc_keep, toc_refs\n\n # TODO: use pathlib.Path.relative_to() instead." }, { "id": 66109, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/exit_interview/exit_interview.py", "file_name": "exit_interview.py", "fun_name": "get_interviews", "commit_message": "style: format code with black", "code": "def get_interviews(interviews):\n\timport json\n\n\tif isinstance(interviews, str):\n\t\tinterviews = json.loads(interviews)\n\n\tif not len(interviews):\n\t\tfrappe.throw(_(\"Atleast one interview has to be selected.\"))\n\n\treturn interviews\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 15, "n_words": 22, "vocab_size": 20, "complexity": 3, "nloc": 7, "token_counts": 41, "n_ast_nodes": 70, "n_identifiers": 10, "random_cut": "def get_interviews(interviews):\n\timport json\n\n\tif isinstance(interviews, str):\n\t\tinterviews = json.loads(interviews)\n\n\tif not len(interviews):\n\t\tfrappe.throw(_(\"Atleast one interview has to be" }, { "id": 163444, "commit_id": "51675d0839480ba7ada44cc93ba8a8df94d33de0", "repo": "pandas", "path": "pandas/tests/frame/indexing/test_indexing.py", "file_name": "test_indexing.py", "fun_name": "test_iloc_row_slice_view", "commit_message": "DEPR: inconsistent series[i:j] slicing with Int64Index GH#45162 (#45324)", "code": "def test_iloc_row_slice_view(self, using_array_manager):\n df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))\n original = df.copy()\n\n # verify slice is view\n # setting it makes it raise/warn\n subset = df.iloc[slice(4, 8)]\n\n assert np.shares_memory(df[2], subset[2])\n\n msg = r\"\\nA value is trying to be set on a copy of a slice from a DataFrame\"\n with pytest.raises(com.SettingWithCopyError, match=msg):\n subset.loc[:, 2] = 0.0\n\n exp_col = original[2].copy()\n # TODO(ArrayManager) verify it is expected that the original didn't change\n if not using_array_manager:\n exp_col._values[4:8] = 0.0\n tm.assert_series_equal(df[2], exp_col)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 183, "n_words": 78, "vocab_size": 60, "complexity": 2, "nloc": 12, "token_counts": 135, "n_ast_nodes": 202, "n_identifiers": 27, "random_cut": "def test_iloc_row_slice_view(self, using_array_manager):\n df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))\n original = df.copy()\n\n # verify slice is view\n # setting it makes it raise/warn\n subset = df.iloc[slice(4, 8)]\n\n assert np.shares_memory(df[2], subset[2])\n\n msg = r\"\\nA value is tryi" }, { "id": 205821, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/compiler.py", "file_name": "compiler.py", "fun_name": "_expr_refs_base_model", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _expr_refs_base_model(cls, expr, base_model):\n if isinstance(expr, Query):\n return expr.model == base_model\n if not hasattr(expr, \"get_source_expressions\"):\n return False\n return any(\n cls._expr_refs_base_model(source_expr, base_model)\n for source_expr in expr.get_source_expressions()\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 97, "n_words": 26, "vocab_size": 23, "complexity": 4, "nloc": 9, "token_counts": 54, "n_ast_nodes": 83, "n_identifiers": 11, "random_cut": "def _expr_refs_base_model(cls, expr, base_model):\n if isinstance(expr, Query):" }, { "id": 68076, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/templates/pages/partners.py", "file_name": "partners.py", "fun_name": "get_context", "commit_message": "style: format code with black", "code": "def get_context(context):\n\tpartners = frappe.db.sql(\n\t\t,\n\t\tas_dict=True,\n\t)\n\n\treturn {\"partners\": partners, \"title\": page_title}\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 7, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 30, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "def get_context(context):\n\tpartners = frappe.db.sql(\n\t\t,\n\t\tas_dict=True,\n\t)\n\n\treturn {\"partners\": partners, \"title\": p" }, { "id": 155242, "commit_id": "7c009c747caa90554607e30b9ac2bd1b190b8c7d", "repo": "modin", "path": "asv_bench/benchmarks/benchmarks.py", "file_name": "benchmarks.py", "fun_name": "time_getitem_slice", "commit_message": "TEST-#5261: port indexing, reindex and fillna benchmarks from pandas github (#5244)\n\nSigned-off-by: arunjose696 \r\nCo-authored-by: Anatoly Myachev ", "code": "def time_getitem_slice(self, shape, index, index_structure):\n execute(self.data[: self.index_to_query])\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 34, "n_identifiers": 8, "random_cut": "def time_getitem_slice(self, shape, index, index_structure):\n execute(self.data[: self.index_to_query])\n" }, { "id": 217166, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/encodings/punycode.py", "file_name": "punycode.py", "fun_name": "decode", "commit_message": "add python 3.10.4 for windows", "code": "def decode(self, input, final=False):\n if self.errors not in ('strict', 'replace', 'ignore'):\n raise UnicodeError(\"Unsupported error handling \"+self.errors)\n return punycode_decode(input, self.errors)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 43, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 4, "token_counts": 43, "n_ast_nodes": 70, "n_identifiers": 7, "random_cut": "def decode(self, input, final=False):\n if self.errors not in ('strict', 'replace', 'ignore'):\n raise UnicodeError(\"Unsupported error handlin" }, { "id": 53811, "commit_id": "60e203e0eef82f49853fca133ed457f600044e8e", "repo": "prefect", "path": "src/prefect/settings.py", "file_name": "settings.py", "fun_name": "unreduce_settings", "commit_message": "Fix display of settings in api reference", "code": "def unreduce_settings(json):\n \n return Settings.parse_raw(json)\n\n\n# Dynamically create a pydantic model that includes all of our settings\n\nSettingsFieldsMixin = create_model(\n \"SettingsFieldsMixin\",\n __base__=BaseSettings,\n **{setting.name: (setting.type, setting.field) for setting in SETTINGS.values()},\n)\n\n\n# Defining a class after this that inherits the dynamic class rather than setting\n# __base__ to the following class ensures that mkdocstrings properly generates\n# reference documentation. It does support module-level variables, even if they have\n# __doc__ set.\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 69, "vocab_size": 58, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 83, "n_identifiers": 14, "random_cut": "def unreduce_settings(json):\n \n return Settings.parse_raw(json)\n\n\n# Dynamically create a pydantic model that includes all of our settings\n\nSettingsFieldsMixin = create_model(\n \"SettingsFieldsMixin\",\n __base__=BaseSettings,\n **{setting.name: (setting.type, setting.field) f" }, { "id": 188451, "commit_id": "03afa4f9743fb8e6892be62a44b19dc48e0ed7f0", "repo": "jumpserver", "path": "apps/settings/models.py", "file_name": "models.py", "fun_name": "refresh_setting", "commit_message": "Fix rbac (#7713)\n\n* fix: token 系统用户增加 protocol\r\n\r\n* fix: 修复清除orphan session时同时清除对应的 session_task\r\n\r\n* perf: 修改 connection token api\r\n\r\n* fix: 修复无法获取系统角色绑定的问题\r\n\r\n* perf: 增加 db terminal 及 magnus 组件\r\n\r\n* perf: 修改 migrations\r\n\r\n* fix: 修复AUTHENTICATION_BACKENDS相关的逻辑\r\n\r\n* fix: 修改判断backend认证逻辑\r\n\r\n* fix: 修复资产账号查看密码跳过mfa\r\n\r\n* fix: 修复用户组授权权限错误\r\n\r\n* feat: 支持COS对象存储\r\n\r\n* feat: 升级依赖 jms_storage==0.0.42\r\n\r\n* fix: 修复 koko api 问题\r\n\r\n* feat: 修改存储翻译信息\r\n\r\n* perf: 修改 ticket 权限\r\n\r\n* fix: 修复获取资产授权系统用户 get_queryset\r\n\r\n* perf: 抽取 ticket\r\n\r\n* perf: 修改 cmd filter 的权限\r\n\r\n* fix: 修改 ticket perm\r\n\r\n* fix: 修复oidc依赖问题\r\n\r\nCo-authored-by: Eric \r\nCo-authored-by: ibuler \r\nCo-authored-by: 小冯 \r\nCo-authored-by: feng626 <1304903146@qq.com>", "code": "def refresh_setting(self):\n setattr(settings, self.name, self.cleaned_value)\n self.refresh_keycloak_to_openid_if_need()\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 35, "n_identifiers": 7, "random_cut": "def refresh_setting(self):\n setattr(settings, self.name, self.cleaned_value)\n self.refresh_keycloak_to_openid" }, { "id": 55904, "commit_id": "168483e9cf038a3629f880f838b5aa9291a48411", "repo": "prefect", "path": "src/prefect/client.py", "file_name": "client.py", "fun_name": "read_block_schemas", "commit_message": "Block capabilities (PrefectHQ/orion#1898)\n\n* Add capabilities to BlockSchemas\r\n\r\n* Remove type field from BlockSchemas\r\n\r\n* Create postgres migration, bump API version", "code": "async def read_block_schemas(self) -> List[schemas.core.BlockSchema]:\n \n response = await self._client.post(f\"/block_schemas/filter\", json={})\n return pydantic.parse_obj_as(List[schemas.core.BlockSchema], response.json())\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 34, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 11, "token_counts": 52, "n_ast_nodes": 84, "n_identifiers": 12, "random_cut": "async def read_block_schemas(self) -> List[schemas.core.BlockSchema]:\n \n response = await self._client.post(f\"/block_sc" }, { "id": 77938, "commit_id": "e0a604e227efbaed6b072d17132e7ca806ef4948", "repo": "wagtail", "path": "wagtail/snippets/views/snippets.py", "file_name": "snippets.py", "fun_name": "history_label", "commit_message": "Add RevisionsCompare view in snippets", "code": "def history_label(self):\n return _(\"{model_name} history\").format(\n model_name=self.model._meta.verbose_name\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 31, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 4, "token_counts": 23, "n_ast_nodes": 39, "n_identifiers": 8, "random_cut": "def history_label(self):\n return _(\"{model_name} history\").format(\n model_name=self.model._meta.ver" }, { "id": 127014, "commit_id": "786c7f45cfb3495527894f81097712eb76f77e63", "repo": "ray", "path": "doc/source/serve/doc_code/deploying_serve_example.py", "file_name": "deploying_serve_example.py", "fun_name": "hello", "commit_message": "[Serve][Doc] Update the doc code to use new api (#27689)\n\nCo-authored-by: Archit Kulkarni ", "code": "def hello(request):\n return \"hello world\"\n\n\nserve.run(hello.bind())\n# __deploy_in_k8s_end__\n\nsubprocess.check_output([\"ray\", \"stop\", \"--force\"])\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 10, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 7, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def hello(request):\n return \"hello world\"\n\n\nserve.run(hello.bind" }, { "id": 126321, "commit_id": "df124d0ad58ea7189e88f9fe42c1ee377ade9c8d", "repo": "ray", "path": "python/ray/train/tests/test_predictor.py", "file_name": "test_predictor.py", "fun_name": "test_predict", "commit_message": "[AIR - Datasets] Hide tensor extension from UDFs. (#27019)\n\nWe previously added automatic tensor extension casting on Datasets transformation outputs to allow the user to not have to worry about tensor column casting; however, this current state creates several issues:\r\n\r\n1. Not all tensors are supported, which means that we’ll need to have an opaque object dtype (i.e. ndarray of ndarray pointers) fallback for the Pandas-only case. Known unsupported tensor use cases:\r\na. Heterogeneous-shaped (i.e. ragged) tensors\r\nb. Struct arrays\r\n2. UDFs will expect a NumPy column and won’t know what to do with our TensorArray type. E.g., torchvision transforms don’t respect the array protocol (which they should), and instead only support Torch tensors and NumPy ndarrays; passing a TensorArray column or a TensorArrayElement (a single item in the TensorArray column) fails.\r\nImplicit casting with object dtype fallback on UDF outputs can make the input type to downstream UDFs nondeterministic, where the user won’t know if they’ll get a TensorArray column or an object dtype column.\r\n3. The tensor extension cast fallback warning spams the logs.\r\n\r\nThis PR:\r\n\r\n1. Adds automatic casting of tensor extension columns to NumPy ndarray columns for Datasets UDF inputs, meaning the UDFs will never have to see tensor extensions and that the UDF input column types will be consistent and deterministic; this fixes both (2) and (3).\r\n2. No longer implicitly falls back to an opaque object dtype when TensorArray casting fails (e.g. for ragged tensors), and instead raises an error; this fixes (4) but removes our support for (1).\r\n3. Adds a global enable_tensor_extension_casting config flag, which is True by default, that controls whether we perform this automatic casting. Turning off the implicit casting provides a path for (1), where the tensor extension can be avoided if working with ragged tensors in Pandas land. Turning off this flag also allows the user to explicitly control their tensor extension casting, if they want to work with it in their UDFs in order to reap the benefits of less data copies, more efficient slicing, stronger column typing, etc.", "code": "def test_predict(convert_to_pandas_mock, convert_from_pandas_mock):\n\n input = pd.DataFrame({\"x\": [1, 2, 3]})\n expected_output = input * 4.0\n\n convert_to_pandas_mock.return_value = input\n convert_from_pandas_mock.return_value = expected_output\n\n checkpoint = Checkpoint.from_dict(\n {\"factor\": 2.0, PREPROCESSOR_KEY: DummyPreprocessor()}\n )\n predictor = DummyPredictor.from_checkpoint(checkpoint)\n\n actual_output = predictor.predict(input)\n pd.testing.assert_frame_equal(actual_output, expected_output)\n\n # Ensure the proper conversion functions are called.\n convert_to_pandas_mock.assert_called_once_with(input, False)\n convert_from_pandas_mock.assert_called_once()\n\n pd.testing.assert_frame_equal(\n convert_from_pandas_mock.call_args[0][0], expected_output\n )\n assert convert_from_pandas_mock.call_args[1][\"type\"] == DataType.PANDAS\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 113, "n_words": 55, "vocab_size": 44, "complexity": 1, "nloc": 17, "token_counts": 133, "n_ast_nodes": 204, "n_identifiers": 25, "random_cut": "def test_predict(convert_to_pandas_mock, convert_from_pandas_mock):\n\n input = pd.DataFrame({\"x\": [1, 2, 3]})\n" }, { "id": 247366, "commit_id": "7e91107be1a4287873266e588a3c5b415279f4c8", "repo": "synapse", "path": "tests/rest/media/v1/test_filepath.py", "file_name": "test_filepath.py", "fun_name": "test_remote_media_thumbnail_legacy", "commit_message": "Add type hints to `tests/rest` (#12146)\n\n* Add type hints to `tests/rest`\r\n\r\n* newsfile\r\n\r\n* change import from `SigningKey`", "code": "def test_remote_media_thumbnail_legacy(self) -> None:\n \n self.assertEqual(\n self.filepaths.remote_media_thumbnail_rel_legacy(\n \"example.com\", \"GerZNDnDZVjsOtardLuwfIBg\", 800, 600, \"image/jpeg\"\n ),\n \"remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg\",\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 83, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 8, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 5, "random_cut": "def test_remote_media_thumbnail_legacy(self) -> None:\n \n self.assertEqual(\n " }, { "id": 3378, "commit_id": "25fb7e7fd744f3852ebe8152db5514513f8a2c9a", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-hubspot/source_hubspot/api.py", "file_name": "api.py", "fun_name": "_update_state", "commit_message": "Source Hubspot: Some incremental CRM objects and engagements (#8887)", "code": "def _update_state(self, latest_cursor):\n if latest_cursor:\n new_state = max(latest_cursor, self._state) if self._state else latest_cursor\n if new_state != self._state:\n logger.info(f\"Advancing bookmark for {self.name} stream from {self._state} to {latest_cursor}\")\n self._state = new_state\n self._start_date = self._state\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 105, "n_words": 32, "vocab_size": 24, "complexity": 4, "nloc": 7, "token_counts": 52, "n_ast_nodes": 99, "n_identifiers": 10, "random_cut": "def _update_state(self, latest_cursor):\n if latest_cursor:\n new_state = max(latest_cursor, self._state) if self._state else latest_cursor\n if new_state != self._state:\n logger.info(f\"Advancing bookmark for {self.name} stream from {self._state} to {latest_cursor}\")\n self._state = new_state\n self._start_date = self._state\n" }, { "id": 146811, "commit_id": "cc1728120f7d49b0016d190971bc8056d3245c5d", "repo": "ray", "path": "python/ray/tune/utils/resource_updater.py", "file_name": "resource_updater.py", "fun_name": "get_num_cpus", "commit_message": "[Tune] Move resource updater out of trial executor (#23178)\n\n* simplify trial executor\r\n\r\n* update test\r\n\r\n* fix: proper resource update before initialization\r\n\r\n* add test to BUILD\r\n\r\n* add doc for resource updater", "code": "def get_num_cpus(self) -> int:\n self.update_avail_resources()\n return self._avail_resources.cpu\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 30, "n_identifiers": 6, "random_cut": "def get_num_cpus(self) -> int:\n self.update_avail_resources()\n return self._avail_resources." }, { "id": 100143, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_user_notification_fine_tuning.py", "file_name": "test_user_notification_fine_tuning.py", "fun_name": "test_permissions", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_permissions(self):\n new_user = self.create_user(email=\"b@example.com\")\n new_org = self.create_organization(name=\"New Org\")\n new_team = self.create_team(name=\"New Team\", organization=new_org, members=[new_user])\n new_project = self.create_project(\n organization=new_org, teams=[new_team], name=\"New Project\"\n )\n\n data = {str(new_org.id): 0}\n self.get_error_response(\"me\", \"reports\", status_code=403, **data)\n\n assert not UserOption.objects.filter(\n user=self.user, organization=new_org, key=\"reports\"\n ).exists()\n\n data = {str(new_project.id): 1}\n self.get_error_response(\"me\", \"alerts\", status_code=403, **data)\n\n value = NotificationSetting.objects.get_settings(\n ExternalProviders.EMAIL,\n NotificationSettingTypes.ISSUE_ALERTS,\n user=self.user,\n project=new_project,\n )\n assert value == NotificationSettingOptionValues.DEFAULT\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 221, "n_words": 58, "vocab_size": 42, "complexity": 1, "nloc": 21, "token_counts": 178, "n_ast_nodes": 283, "n_identifiers": 36, "random_cut": "def test_permissions(self):\n new_user = self.create_user(email=\"b@example.com\")\n new_org = self.create_organization(name=\"New Org\")\n new_team = self.create_team(name=\"New Team\", organization=new_org, members=[new_user])\n new_project = self.create_project(\n organization=new_org, teams=[new_team], name=\"New Project\"\n )\n\n data = {str(new_org.id): 0}\n self.get_error_response(\"me\", \"reports\", status_code=403, **data)\n\n assert not UserOption.objects.filter(\n user=s" }, { "id": 213790, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy_tests/test_core/test_container.py", "file_name": "test_container.py", "fun_name": "test_container_structural_diff", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def test_container_structural_diff(dev, call):\n # all different keys or shapes\n container_0 = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container_1 = Container({'a': ivy.array([[4]], dev=dev),\n 'b': {'c': ivy.array([[[5]]], dev=dev), 'e': ivy.array([3], dev=dev)}})\n container_diff = ivy.Container.structural_diff(container_0, container_1)\n assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))\n assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([[4]]))\n assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))\n assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([[[5]]]))\n assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))\n assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))\n container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')\n assert container_diff_diff_only.to_dict() == container_diff.to_dict()\n container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')\n assert container_diff_same_only.to_dict() == {}\n\n # some different shapes\n container_0 = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container_1 = Container({'a': ivy.array([4], dev=dev),\n 'b': {'c': ivy.array([[5]], dev=dev), 'd': ivy.array([6], dev=dev)}})\n container_diff = ivy.Container.structural_diff(container_0, container_1)\n assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))\n assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))\n assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))\n assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))\n container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')\n assert 'a' not in container_diff_diff_only\n assert 'b' in container_diff_diff_only\n assert 'c' in container_diff_diff_only['b']\n assert 'd' not in container_diff_diff_only['b']\n container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')\n assert 'a' in container_diff_same_only\n assert 'b' in container_diff_same_only\n assert 'c' not in container_diff_same_only['b']\n assert 'd' in container_diff_same_only['b']\n\n # all different keys\n container_0 = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container_1 = Container({'e': ivy.array([4], dev=dev),\n 'f': {'g': ivy.array([5], dev=dev), 'h': ivy.array([6], dev=dev)}})\n container_diff = ivy.Container.structural_diff(container_0, container_1)\n assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))\n assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))\n assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))\n assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([4]))\n assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([5]))\n assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([6]))\n container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')\n assert container_diff_diff_only.to_dict() == container_diff.to_dict()\n container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')\n assert container_diff_same_only.to_dict() == {}\n\n # some different keys\n container_0 = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container_1 = Container({'a': ivy.array([4], dev=dev),\n 'b': {'c': ivy.array([5], dev=dev), 'e': ivy.array([6], dev=dev)}})\n container_diff = ivy.Container.structural_diff(container_0, container_1)\n assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))\n assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))\n assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))\n assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([6]))\n container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')\n assert 'a' not in container_diff_diff_only\n assert 'b' in container_diff_diff_only\n assert 'c' not in container_diff_diff_only['b']\n assert 'd' in container_diff_diff_only['b']\n assert 'e' in container_diff_diff_only['b']\n container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')\n assert 'a' in container_diff_same_only\n assert 'b' in container_diff_same_only\n assert 'c' in container_diff_same_only['b']\n assert 'd' not in container_diff_same_only['b']\n assert 'e' not in container_diff_same_only['b']\n\n # all same\n container_0 = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container_1 = Container({'a': ivy.array([4], dev=dev),\n 'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([6], dev=dev)}})\n container_diff = ivy.Container.structural_diff(container_0, container_1)\n assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))\n assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))\n assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))\n container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')\n assert container_diff_diff_only.to_dict() == {}\n container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')\n assert container_diff_same_only.to_dict() == container_diff.to_dict()\n\n", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 896, "n_words": 386, "vocab_size": 79, "complexity": 1, "nloc": 83, "token_counts": 1556, "n_ast_nodes": 2474, "n_identifiers": 27, "random_cut": "def test_container_structural_diff(dev, call):\n # all different keys or shapes\n container_0 = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container_1 = Container({'a': ivy.array([[4]], dev=dev),\n 'b': {'c': ivy.array([[[5]]], dev=dev), 'e': ivy.array([3], dev=dev)}})\n container_diff = ivy.Container.structural_diff(container_0, container_1)\n assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))\n assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([[4]]))\n assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))\n assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([[[5]]]))\n assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))\n assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))\n container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')\n assert container_diff_diff_only.to_dict() == container_diff.to_dict()\n container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')\n assert container_diff_same_only.to_dict() == {}\n\n # some different shapes\n container_0 = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container_1 = Container({'a': ivy.array([4], dev=dev),\n 'b': {'c': ivy.array([[5]], dev=dev), 'd': ivy.array([6], dev=dev)}})\n container_diff = ivy.Container.structural_diff(container_0, container_1)\n assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))\n assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))\n assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))\n assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))\n container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')\n assert 'a' not in container_diff_diff_only\n assert 'b' in container_diff_diff_only\n assert 'c' in container_diff_diff_only['b']\n assert 'd' not in container_diff_diff_only['b']\n container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')\n assert 'a' in container_diff_same_only\n assert 'b' in container_diff_same_only\n assert 'c' not in container_diff_same_only['b']\n assert 'd' in container_diff_same_only['b']\n\n # all different keys\n container_0 = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container_1 = Container({'e': ivy.array([4], dev=dev),\n 'f': {'g': ivy.array([5], dev=dev), 'h': ivy.array([6], dev=dev)}})\n container_diff = ivy.Container.structural_diff(container_0, container_1)\n assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))\n assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))\n assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))\n assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([4]))\n assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([5]))\n assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([6]))\n container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')\n assert container_diff_diff_only.to_dict() == container_diff.to_dict()\n container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')\n assert container_diff_same_only.to_dict() == {}\n\n # some different keys\n container_0 = Container({'a': ivy.array([1], dev=dev),\n " }, { "id": 187151, "commit_id": "3d44da082b3ba202b9d0557bfd8ce747a1d7960c", "repo": "streamlink", "path": "tests/test_api_validate.py", "file_name": "test_api_validate.py", "fun_name": "test_parse_json", "commit_message": "plugin.api.validate: implement ValidationError\n\n- Implement `ValidationError`\n - Inherit from `ValueError` to preserve backwards compatiblity\n - Allow collecting multiple errors (AnySchema)\n - Keep an error stack of parent `ValidationError`s or other exceptions\n - Format error stack when converting error to string\n- Raise `ValidationError` instead of `ValueError`\n - Add error contexts where it makes sense\n - Add schema names to error instances\n- Add and update tests", "code": "def test_parse_json(self):\n assert validate(parse_json(), '{\"a\": [\"b\", true, false, null, 1, 2.3]}') == {\"a\": [\"b\", True, False, None, 1, 2.3]}\n with self.assertRaises(ValueError) as cm:\n validate(parse_json(), \"invalid\")\n assert_validationerror(cm.exception, )\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 60, "n_ast_nodes": 99, "n_identifiers": 9, "random_cut": "def test_parse_json(self):\n assert validate(parse_json(), '{\"a\": [\"b\", true, false, null, 1, 2.3]}') == {\"a\": [\"b\", True, False, None, 1, 2.3]}\n with self.assertRaises(ValueError) as cm:\n validate(parse_json()," }, { "id": 15128, "commit_id": "9c7c3aab121a5e6be89197156432970625688a70", "repo": "ccxt", "path": "python/ccxt/aax.py", "file_name": "aax.py", "fun_name": "fetch_deposits", "commit_message": "add fetchdeposits", "code": "def fetch_deposits(self, code=None, since=None, limit=None, params={}):\n self.load_markets()\n request = {\n # status Not required - Deposit status, \"1: pending,2: confirmed, 3:failed\"\n # currency: Not required - String Currency\n # startTime Not required Integer Default: 90 days from current timestamp.\n # endTime Not required Integer Default: present timestamp.\n }\n currency = None\n if code is not None:\n currency = self.currency(code)\n request['currency'] = currency['id']\n if since is not None:\n request['startTime'] = since # default 90 days\n response = self.privateGetAccountDeposits(self.extend(request, params))\n # { \"code\": 1,\n # \"data\": [{\n # \"currency\": \"USDT\",\n # \"network\": \"USDT\",\n # \"quantity\": \"19.000000000000\",\n # \"txHash\": \"75eb2e5f037b025c535664c49a0f7cc8f601dae218a5f4fe82290ff652c43f3d\",\n # \"address\": \"1GkB7Taf7uttcguKEb2DmmyRTnihskJ9Le\",\n # \"status\": \"2\",\n # \"createdTime\": \"2021-01-08T19:45:01.354Z\",\n # \"updatedTime\": \"2021-01-08T20:03:05.000Z\",\n # }]\n # \"message\": \"success\",\n # \"ts\": 1573561743499\n # }\n deposits = self.safe_value(response, 'data', [])\n return self.parse_transactions(deposits, code, since, limit)\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 451, "n_words": 129, "vocab_size": 84, "complexity": 3, "nloc": 13, "token_counts": 110, "n_ast_nodes": 191, "n_identifiers": 15, "random_cut": "def fetch_deposits(self, code=None, since=None, limit=None, params={}):\n self.load_markets()\n request = {\n # status Not required - Deposit status, \"1: pending,2: confirmed, 3:failed\"\n " }, { "id": 123567, "commit_id": "df4293473d2fb6e887e31522cab5aff95e201581", "repo": "sqlmap", "path": "plugins/dbms/db2/fingerprint.py", "file_name": "fingerprint.py", "fun_name": "checkDbms", "commit_message": "Fixing DeprecationWarning (logger.warn)", "code": "def checkDbms(self):\n if not conf.extensiveFp and Backend.isDbmsWithin(DB2_ALIASES):\n setDbms(DBMS.DB2)\n\n return True\n\n logMsg = \"testing %s\" % DBMS.DB2\n logger.info(logMsg)\n\n result = inject.checkBooleanExpression(\"[RANDNUM]=(SELECT [RANDNUM] FROM SYSIBM.SYSDUMMY1)\")\n\n if result:\n logMsg = \"confirming %s\" % DBMS.DB2\n logger.info(logMsg)\n\n result = inject.checkBooleanExpression(\"JULIAN_DAY(CURRENT DATE) IS NOT NULL\")\n\n if not result:\n warnMsg = \"the back-end DBMS is not %s\" % DBMS.DB2\n logger.warning(warnMsg)\n\n return False\n\n version = self._versionCheck()\n if version:\n Backend.setVersion(version)\n setDbms(\"%s %s\" % (DBMS.DB2, Backend.getVersion()))\n else:\n setDbms(DBMS.DB2)\n\n return True\n else:\n warnMsg = \"the back-end DBMS is not %s\" % DBMS.DB2\n logger.warning(warnMsg)\n\n return False\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 358, "n_words": 84, "vocab_size": 44, "complexity": 6, "nloc": 26, "token_counts": 149, "n_ast_nodes": 258, "n_identifiers": 22, "random_cut": "def checkDbms(self):\n if not conf.extensiveFp and Backend.isDbmsWithin(DB2_ALIASES):\n setDbms(DBMS.DB2)\n\n return True\n\n logMsg = \"testing %s\" % DBMS.DB2\n logger.info(logMsg)\n\n result = inject.checkBooleanExpression(\"[RANDNUM]=(SELECT [RANDNUM] FROM SYSIBM.SYSDUMMY1)\")\n\n if result:\n " }, { "id": 266185, "commit_id": "d4a231585ac9a25d9739552d8c9e433dbf9398af", "repo": "netbox", "path": "netbox/dcim/tests/test_natural_ordering.py", "file_name": "test_natural_ordering.py", "fun_name": "setUpTestData", "commit_message": "Clean up tests", "code": "def setUpTestData(cls):\n\n site = Site.objects.create(name='Test Site 1', slug='test-site-1')\n manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')\n devicetype = DeviceType.objects.create(\n manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'\n )\n devicerole = DeviceRole.objects.create(\n name='Test Device Role 1', slug='test-device-role-1', color='ff0000'\n )\n Device.objects.create(\n device_type=devicetype, device_role=devicerole, name='Test Device 1', site=site\n )\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 130, "n_words": 42, "vocab_size": 30, "complexity": 1, "nloc": 12, "token_counts": 99, "n_ast_nodes": 166, "n_identifiers": 19, "random_cut": "def setUpTestData(cls):\n\n site = Site.objects.create(name='Test Site 1', slug='test-site-1')\n manufacturer = Manufacturer.objects.create(name='Test Manufacturer 1', slug='test-manufacturer-1')\n devicetype = DeviceType.objects.create(\n manufacturer=manufacturer, model='Test Device Type 1', slug='test-device-type-1'\n )\n devicerole = DeviceRole.objects.create(\n name='Test Devic" }, { "id": 7396, "commit_id": "ae8de108e14111afef08a5e9c429bb19e368c0b3", "repo": "ludwig", "path": "tests/ludwig/benchmarking/test_resource_usage_tracker.py", "file_name": "test_resource_usage_tracker.py", "fun_name": "test_resource_usage_tracker", "commit_message": "adding hardware usage and software packages tracker (#2195)\n\n* adding hardware usage and software packages tracker\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* removed stdout redirection to null during import\r\n\r\n* reverting\r\n\r\n* updated `tracker.py`\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* improved docstring style\r\n\r\n* removing unnecessary `torch.cuda.synchronize()` call\r\n\r\n* using the `multiprocessing` library instead of the `@processify` wrapper to spawn the `Tracker` monitor process\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* style changes\r\n\r\n* adding s3fs to `requirements.txt`\r\n\r\n* name change to `resource_usage_tracker.py`\r\n\r\n* added test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* tag name validation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* flake8 updates\r\n\r\n* fixed test file\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* update test file\r\n\r\n* fixing empty utilization (due to very short experiment)\r\n\r\n* added # noqa E402\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def test_resource_usage_tracker(tmpdir):\n train_df = pd.DataFrame(np.random.normal(0, 1, size=(100, 3)), columns=[\"input_1\", \"input_2\", \"output_1\"])\n eval_df = pd.DataFrame(np.random.normal(0, 1, size=(20, 3)), columns=[\"input_1\", \"input_2\", \"output_1\"])\n\n config = {\n \"input_features\": [{\"name\": \"input_1\", \"type\": \"number\"}, {\"name\": \"input_2\", \"type\": \"number\"}],\n \"output_features\": [{\"name\": \"output_1\", \"type\": \"number\"}],\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 1},\n }\n\n model = LudwigModel(config=config, backend=\"local\")\n\n with ResourceUsageTracker(tag=\"train\", output_dir=tmpdir, logging_interval=0.05, num_examples=len(train_df)):\n model.train(\n dataset=train_df,\n output_directory=tmpdir,\n skip_save_training_description=True,\n skip_save_training_statistics=True,\n skip_save_model=True,\n skip_save_progress=True,\n skip_save_log=True,\n skip_save_processed_input=True,\n )\n\n with ResourceUsageTracker(tag=\"evaluate\", output_dir=tmpdir, logging_interval=0.05, num_examples=len(eval_df)):\n model.evaluate(dataset=eval_df)\n\n assert os.path.exists(os.path.join(tmpdir, \"train_resource_usage_metrics.json\"))\n assert os.path.exists(os.path.join(tmpdir, \"evaluate_resource_usage_metrics.json\"))\n\n shutil.rmtree(tmpdir)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 244, "n_words": 78, "vocab_size": 59, "complexity": 1, "nloc": 26, "token_counts": 286, "n_ast_nodes": 464, "n_identifiers": 38, "random_cut": "def test_resource_usage_tracker(tmpdir):\n train_df = pd.DataFrame(np.random.normal(0, 1, size=(100, 3)), columns=[\"input_1\", \"input_2\", \"output_1\"])\n eval_df = pd.DataFrame(np.random.normal(0, 1, size=(20, 3)), columns=[\"input_1\", \"input_2\", \"output_1\"])\n\n config = {\n \"input_features\": [{\"name\": \"input_1\", \"type\": \"number\"}, {\"name\": \"input_2\", \"type\": \"number\"}],\n \"output_features\": [{\"name\": \"output_1\", \"typ" }, { "id": 219351, "commit_id": "42dde73cebb1d524b6adfcde69fd947ed9b2440b", "repo": "XX-Net", "path": "code/default/smart_router/local/ip_region.py", "file_name": "ip_region.py", "fun_name": "generate_db", "commit_message": "Roll back 4.6.8 from upgrade", "code": "def generate_db(self):\n keeprange = (\n '0.0.0.0/8', # 本地网络\n '10.0.0.0/8', # 私有网络\n '100.64.0.0/10', # 地址共享(运营商 NAT)\n '127.0.0.0/8', # 环回地址\n '169.254.0.0/16', # 链路本地\n '172.16.0.0/12', # 私有网络\n '192.0.0.0/24', # 保留地址(IANA)\n '192.0.2.0/24', # TEST-NET-1\n '192.88.99.0/24', # 6to4 中继\n '192.168.0.0/16', # 私有网络\n '198.18.0.0/15', # 网络基准测试\n '198.51.100.0/24', # TEST-NET-2\n '203.0.113.0/24', # TEST-NET-3\n # 连续地址直到 IP 结束,特殊处理\n # '224.0.0.0/4', #组播地址(D类)\n # '240.0.0.0/4', #保留地址(E类)\n )\n keeplist = []\n for iprange in keeprange:\n ip, mask = iprange.split('/')\n keeplist.append((utils.ip_string_to_num(ip), 32 - int(mask)))\n\n mask_dict = dict((str(2 ** i), i) for i in range(8, 25))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 487, "n_words": 83, "vocab_size": 61, "complexity": 11, "nloc": 81, "token_counts": 537, "n_ast_nodes": 182, "n_identifiers": 17, "random_cut": "def generate_db(self):\n keeprange = (\n '0.0.0.0/8', # 本地网络\n '10.0.0.0/8', # 私有网络\n '100.64.0.0/10', # 地址共享(运营商 NAT)\n '127.0.0.0/8', # 环回地址\n '169.254.0.0/16', # 链路本地\n '172.16.0.0/12', # 私有网络\n '192.0." }, { "id": 146321, "commit_id": "0c5440ee724a9f2b0fd94b7e6055c5be71968a84", "repo": "ray", "path": "python/ray/runtime_env.py", "file_name": "runtime_env.py", "fun_name": "py_container_image", "commit_message": "[runtime env] Deletes the proto cache on RuntimeEnv (#22944)\n\nMainly the following things:\r\n- This PR deletes the proto cache on RuntimeEnv, ensuring that the user's modification of RuntimeEnv can take effect in the Proto message.\r\n- validate whole runtime env when serialize runtime_env. \r\n- overload method `__setitem__` to parse and validate field when it has to modify.", "code": "def py_container_image(self) -> Optional[str]:\n if not self.has_py_container():\n return None\n return self[\"container\"].get(\"image\", \"\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 36, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 6, "random_cut": "def py_container_image(self) -> Optional[str]:\n if not self.has_py_container():\n return None\n return self[\"container\"].get(\"image\", \"\")\n" }, { "id": 260854, "commit_id": "306608e622bb3fb55095a97405b9ef0f1ad901d9", "repo": "scikit-learn", "path": "sklearn/ensemble/tests/test_bagging.py", "file_name": "test_bagging.py", "fun_name": "test_oob_score_classification", "commit_message": "MAINT rename and deprecate `base_estimator` in favor of `estimator` in ensemble classes (#23819)\n\nCo-authored-by: Adrian Trujillo Duron \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_oob_score_classification():\n # Check that oob prediction is a good estimation of the generalization\n # error.\n rng = check_random_state(0)\n X_train, X_test, y_train, y_test = train_test_split(\n iris.data, iris.target, random_state=rng\n )\n\n for estimator in [DecisionTreeClassifier(), SVC()]:\n clf = BaggingClassifier(\n estimator=estimator,\n n_estimators=100,\n bootstrap=True,\n oob_score=True,\n random_state=rng,\n ).fit(X_train, y_train)\n\n test_score = clf.score(X_test, y_test)\n\n assert abs(test_score - clf.oob_score_) < 0.1\n\n # Test with few estimators\n warn_msg = (\n \"Some inputs do not have OOB scores. This probably means too few \"\n \"estimators were used to compute any reliable oob estimates.\"\n )\n with pytest.warns(UserWarning, match=warn_msg):\n clf = BaggingClassifier(\n estimator=estimator,\n n_estimators=1,\n bootstrap=True,\n oob_score=True,\n random_state=rng,\n )\n clf.fit(X_train, y_train)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 364, "n_words": 99, "vocab_size": 80, "complexity": 2, "nloc": 28, "token_counts": 151, "n_ast_nodes": 227, "n_identifiers": 30, "random_cut": "def test_oob_score_classification():\n # Check that oob prediction is a good estimation of the generalization\n # error.\n rng = check_random_state(0)\n X_train, X_test, y_train, y_test = train_test_split(\n iris.data, iris.target, random_state=rng\n )\n\n for estimator in [DecisionTreeClassifier(), SVC()]:\n clf = BaggingClassifier(\n estimator=estimator,\n n_estimators=100,\n bootstrap=True,\n oob_score=True,\n random_state=rng,\n ).fit(X_train, y_train)\n\n test_score = clf.score(X_test, y_test)\n\n assert abs(test_score - clf.oob_score_) < 0.1\n\n # Test with few estimators\n warn_msg = (\n \"Some inputs do not have OOB scores. This probably means too few \"\n \"estimators were used to compute any reliable oob estimates.\"\n )\n with pytest." }, { "id": 254806, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/if.py", "file_name": "if.py", "fun_name": "export_if", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export_if() -> None:\n # Given a bool scalar input cond.\n # return constant tensor x if cond is True, otherwise return constant tensor y.\n\n then_out = onnx.helper.make_tensor_value_info('then_out', onnx.TensorProto.FLOAT, [5])\n else_out = onnx.helper.make_tensor_value_info('else_out', onnx.TensorProto.FLOAT, [5])\n\n x = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n y = np.array([5, 4, 3, 2, 1]).astype(np.float32)\n\n then_const_node = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['then_out'],\n value=onnx.numpy_helper.from_array(x)\n )\n\n else_const_node = onnx.helper.make_node(\n 'Constant',\n inputs=[],\n outputs=['else_out'],\n value=onnx.numpy_helper.from_array(y)\n )\n\n then_body = onnx.helper.make_graph(\n [then_const_node],\n 'then_body',\n [],\n [then_out]\n )\n\n else_body = onnx.helper.make_graph(\n [else_const_node],\n 'else_body',\n [],\n [else_out]\n )\n\n if_node = onnx.helper.make_node(\n 'If',\n inputs=['cond'],\n outputs=['res'],\n then_branch=then_body,\n else_branch=else_body\n )\n\n cond = np.array(1).astype(bool)\n res = x if cond else y\n expect(if_node, inputs=[cond], outputs=[res], name='test_if',\n opset_imports=[onnx.helper.make_opsetid(\"\", 11)])\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 483, "n_words": 106, "vocab_size": 71, "complexity": 2, "nloc": 40, "token_counts": 287, "n_ast_nodes": 443, "n_identifiers": 35, "random_cut": "def export_if() -> None:\n # Given a bool scalar input cond.\n # return constant tensor x if cond is True, otherwise return constant tensor y.\n\n then_out = onnx.helper.make_tensor_value_info('then_out', onnx.TensorProto.FLOAT, [5])\n else_out = onnx.helper.make_tensor_value_info('else_out', onnx.TensorProto.FLOAT, [5])\n\n x = np.array([1, 2, 3, 4, 5]).astype(np.float32)\n y = np.array([5, 4, 3, 2, 1]).astyp" }, { "id": 43987, "commit_id": "2b4bf7fe67fc656ceb7bdaad36453b7a5b83ef04", "repo": "airflow", "path": "tests/api/common/test_mark_tasks.py", "file_name": "test_mark_tasks.py", "fun_name": "test_set_running_dag_run_to_success", "commit_message": "Use `DagRun.run_id` instead of `execution_date` when updating state of TIs(UI & REST API) (#18724)\n\nWe can now use run_id as well as execution_date to update states\r\nof task instances\r\n\r\nCo-authored-by: Tzu-ping Chung \r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def test_set_running_dag_run_to_success(self):\n date = self.execution_dates[0]\n dr = self._create_test_dag_run(State.RUNNING, date)\n middle_time = timezone.utcnow()\n self._set_default_task_instance_states(dr)\n\n altered = set_dag_run_state_to_success(dag=self.dag1, run_id=dr.run_id, commit=True)\n\n # All except the SUCCESS task should be altered.\n expected = self._get_num_tasks_with_starting_state(State.SUCCESS, inclusion=False)\n assert len(altered) == expected\n self._verify_dag_run_state(self.dag1, date, State.SUCCESS)\n self._verify_task_instance_states(self.dag1, date, State.SUCCESS)\n self._verify_dag_run_dates(self.dag1, date, State.SUCCESS, middle_time)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 121, "n_words": 45, "vocab_size": 37, "complexity": 1, "nloc": 11, "token_counts": 123, "n_ast_nodes": 185, "n_identifiers": 26, "random_cut": "def test_set_running_dag_run_to_success(self):\n date = self.execution_dates[0]\n " }, { "id": 176777, "commit_id": "5c0b11afb4c0882a070d522ef3fa41482ba935d3", "repo": "networkx", "path": "networkx/algorithms/smallworld.py", "file_name": "smallworld.py", "fun_name": "lattice_reference", "commit_message": "Use isort with pre-commit to enforce import guidelines (#5659)\n\n* Add isort to pre-commit\r\n\r\n* Run isort on all python files (except __init__.py ones)", "code": "def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None):\n \n import numpy as np\n\n from networkx.utils import cumulative_distribution, discrete_sequence\n\n local_conn = nx.connectivity.local_edge_connectivity\n\n if len(G) < 4:\n raise nx.NetworkXError(\"Graph has less than four nodes.\")\n # Instead of choosing uniformly at random from a generated edge list,\n # this algorithm chooses nonuniformly from the set of nodes with\n # probability weighted by degree.\n G = G.copy()\n keys, degrees = zip(*G.degree()) # keys, degree\n cdf = cumulative_distribution(degrees) # cdf of degree\n\n nnodes = len(G)\n nedges = nx.number_of_edges(G)\n if D is None:\n D = np.zeros((nnodes, nnodes))\n un = np.arange(1, nnodes)\n um = np.arange(nnodes - 1, 0, -1)\n u = np.append((0,), np.where(un < um, un, um))\n\n for v in range(int(np.ceil(nnodes / 2))):\n D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1])\n D[v, :] = D[nnodes - v - 1, :][::-1]\n\n niter = niter * nedges\n # maximal number of rewiring attempts per 'niter'\n max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2))\n\n for _ in range(niter):\n n = 0\n while n < max_attempts:\n # pick two random edges without creating edge list\n # choose source node indices from discrete distribution\n (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed)\n if ai == ci:\n continue # same source, skip\n a = keys[ai] # convert index to label\n c = keys[ci]\n # choose target uniformly from neighbors\n b = seed.choice(list(G.neighbors(a)))\n d = seed.choice(list(G.neighbors(c)))\n bi = keys.index(b)\n di = keys.index(d)\n\n if b in [a, c, d] or d in [a, b, c]:\n continue # all vertices should be different\n\n # don't create parallel edges\n if (d not in G[a]) and (b not in G[c]):\n if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]:\n # only swap if we get closer to the diagonal\n G.add_edge(a, d)\n G.add_edge(c, b)\n G.remove_edge(a, b)\n G.remove_edge(c, d)\n\n # Check if the graph is still connected\n if connectivity and local_conn(G, a, b) == 0:\n # Not connected, revert the swap\n G.remove_edge(a, d)\n G.remove_edge(c, b)\n G.add_edge(a, b)\n G.add_edge(c, d)\n else:\n break\n n += 1\n\n return G\n\n\n@py_random_state(3)\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@py_random_state(3)\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 976, "n_words": 339, "vocab_size": 216, "complexity": 14, "nloc": 50, "token_counts": 516, "n_ast_nodes": 829, "n_identifiers": 57, "random_cut": "def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None):\n \n import numpy as np\n\n from networkx.utils import cumulative_distribution, discrete_sequence\n\n local_conn = nx.connectivity.local_edge_connectivity\n\n if len(G) < 4:\n raise nx.NetworkXError(\"Graph has less than four nodes.\")\n # Instead of choosing uniformly at random from a generated edge list,\n # this algorithm chooses nonuniformly from the set of nodes with\n # probability weighted by degree.\n G = G.copy()\n keys, degrees = zip(*G.degree()) # keys, degree\n cdf = cumulative_distribution(degrees) # cdf of degree\n\n nnodes = len(G)\n nedges = nx.number_of_edges(G)\n if D is None:\n D = np.zeros((nnodes, nnodes))\n un = np.arange(1, nnodes)\n um = np.arange(nnodes - 1, 0, -1)\n u = np.append((0,), np.where(un < um, un, um))\n\n for v in range(int(np.ceil(nnodes / 2))):\n D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1])\n D[v, :] = D[nnodes - v - 1, :][::-1]\n\n niter = niter * nedges\n # maximal number of rewiring attempts per 'niter'\n max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2))\n\n for _ in range(niter):\n n = 0\n while n < max_attempts:\n # pick two random edges without creating edge list\n # choose source node indices from discrete distribution\n (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed)\n if ai == ci:\n continue # same source, skip\n a = keys[ai] # convert index to label\n c = keys[ci]\n # choose target uniformly from neighbors\n b = seed.choice(list(G.neighbors(a)))\n d = seed.choice(list(G.neighbors(c)))\n bi = keys.index(b)\n di = keys.index(d)\n\n if b in [a, c, d] or d in [a, b, c]:\n continue # all vertices should be different\n\n # don't create parallel edges\n if (d not in G[a]) and (b not in G[c]):\n if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]:\n # only swap if we get closer to the diagonal\n " }, { "id": 36213, "commit_id": "204c54d411c2b4c7f31405203533a51632f46ab1", "repo": "transformers", "path": "tests/t5/test_modeling_tf_t5.py", "file_name": "test_modeling_tf_t5.py", "fun_name": "test_beam_search_generate", "commit_message": "TF: add beam search tests (#16202)", "code": "def test_beam_search_generate(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\")\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n sentences = [\"I really love my\", \"Translate English to German: the transformers are truly amazing\"]\n input_ids = tokenizer(sentences, return_tensors=\"tf\", padding=True).input_ids\n\n generation_kwargs = {\n \"bad_words_ids\": [tokenizer(\"my\").input_ids, tokenizer(\"ein schöner\").input_ids],\n \"no_repeat_ngram_size\": 3,\n \"do_sample\": False,\n \"repetition_penalty\": 2.2,\n \"num_beams\": 4,\n }\n\n output_ids = model.generate(input_ids, **generation_kwargs)\n\n output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n expected_output_string = [\"Ich liebe es so sehr!\", \"die Transformatoren sind wirklich erstaunlich\"]\n self.assertListEqual(expected_output_string, output_strings)\n\n\n@require_tf\n@require_sentencepiece\n@require_tokenizers", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@require_tf\n@require_sentencepiece\n@require_tokenizers", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 190, "n_words": 69, "vocab_size": 62, "complexity": 1, "nloc": 16, "token_counts": 122, "n_ast_nodes": 216, "n_identifiers": 22, "random_cut": "def test_beam_search_generate(self):\n model = TFT5ForConditionalGeneration.from_pretrained(\"t5-small\"" }, { "id": 57678, "commit_id": "a0b82ae203029e65ba4dad2a93e545960eaca6ab", "repo": "prefect", "path": "tests/cli/test_profile.py", "file_name": "test_profile.py", "fun_name": "authorized_cloud", "commit_message": "Refactor tests for clarity", "code": "def authorized_cloud(self):\n # attempts to reach the Cloud 2 workspaces endpoint implies a good connection\n # to Prefect Cloud as opposed to a hosted Prefect Orion instance\n with respx.mock:\n authorized = respx.get(\n \"https://mock-cloud.prefect.io/api/me/workspaces\",\n ).mock(return_value=Response(200, json={}))\n\n yield authorized\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 105, "n_words": 37, "vocab_size": 30, "complexity": 1, "nloc": 6, "token_counts": 36, "n_ast_nodes": 64, "n_identifiers": 9, "random_cut": "def authorized_cloud(self):\n # attempts to reach the Cloud 2 workspaces endpoint implies a good connection\n # to Prefect Cloud as opposed to a hosted Prefect Orion instance\n with respx.mock:\n authorized = respx.get(\n \"https://mock-cloud.prefect.io/api/me/workspaces\",\n ).mock(return_value=Response(200, json={}))\n\n yield authorized\n" }, { "id": 257130, "commit_id": "4eec2dc45ee60e8b8780aa4f956aea8ad3624da3", "repo": "haystack", "path": "test/test_pipeline_yaml.py", "file_name": "test_pipeline_yaml.py", "fun_name": "test_load_yaml_missing_version", "commit_message": "Change YAML version exception into a warning (#2385)\n\n* Change exception into warning, add strict_version param, and remove compatibility between schemas\r\n\r\n* Simplify update_json_schema\r\n\r\n* Rename unstable into master\r\n\r\n* Prevent validate_config from changing the config to validate\r\n\r\n* Fix version validation and add tests\r\n\r\n* Rename master into ignore\r\n\r\n* Complete parameter rename\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_load_yaml_missing_version(tmp_path):\n with open(tmp_path / \"tmp_config.yml\", \"w\") as tmp_file:\n tmp_file.write(\n \n )\n with pytest.raises(PipelineConfigError, match=\"Validation failed\") as e:\n Pipeline.load_from_yaml(path=tmp_path / \"tmp_config.yml\")\n assert \"version\" in str(e)\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 69, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 18, "token_counts": 54, "n_ast_nodes": 103, "n_identifiers": 14, "random_cut": "def test_load_yaml_missing_version(tmp_path):\n with open(tmp_path / \"tmp_config.yml\", \"w\") as tmp_file:\n tmp_file.write(\n \n )\n with pytest.raises(PipelineConfigError, match=\"Validation failed\") as e:\n Pipeline.load_from_yaml(path=tmp_path / \"tmp_config.yml\")\n assert \"version\" in str(e)\n\n" }, { "id": 200587, "commit_id": "3e01222efcf2cf445f441eddc71e1c8194cee216", "repo": "sympy", "path": "sympy/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "_dedupe_indices", "commit_message": "TensMul._dedupe_indices: rename variable", "code": "def _dedupe_indices(new, exclude):\n \n exclude = set(exclude)\n dums_new = set(get_dummy_indices(new))\n\n conflicts = dums_new.intersection(exclude)\n if len(conflicts) == 0:\n return None\n\n \n exclude.update(dums_new)\n exclude_for_gen = [(i, None) for i in exclude]\n gen = _IndexStructure._get_generator_for_dummy_indices(exclude_for_gen)\n repl = {}\n for d in conflicts:\n if -d in repl.keys():\n continue\n newname = gen(d.tensor_index_type)\n new_d = d.func(newname, *d.args[1:])\n repl[d] = new_d\n repl[-d] = -new_d\n\n if len(repl) == 0:\n return None\n\n new_renamed = new._replace_indices(repl)\n return new_renamed\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 257, "n_words": 66, "vocab_size": 44, "complexity": 6, "nloc": 26, "token_counts": 148, "n_ast_nodes": 240, "n_identifiers": 25, "random_cut": "def _dedupe_indices(new, exclude):\n \n exclude = set(exclude)\n dums_new = set(get_dummy_indices(new))\n\n conflicts = dums_new.intersection(exclude)\n if len(confl" }, { "id": 56900, "commit_id": "d97eb751d3d526bae64b9d9580c75ebc0623121f", "repo": "prefect", "path": "tests/cli/test_deployment_preview.py", "file_name": "test_deployment_preview.py", "fun_name": "test_preview_works_for_unnamed_deployments", "commit_message": "Fix path for deployments test files", "code": "def test_preview_works_for_unnamed_deployments(deployments_path):\n \n result = invoke_and_assert(\n [\n \"deployment\",\n \"preview\",\n str(deployments_path / \"single_unnamed_deployment.py\"),\n ],\n expected_output_contains=\"kind: Job\",\n )\n assert \"Preview for \" in result.stdout\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 89, "n_words": 23, "vocab_size": 23, "complexity": 1, "nloc": 10, "token_counts": 35, "n_ast_nodes": 64, "n_identifiers": 7, "random_cut": "def test_preview_works_for_unnamed_deployments(deployments_path):\n \n result = invoke_and_assert(\n [\n \"deployment\",\n \"preview\",\n str(deployments_path / \"single_unnamed_deployment.py\"),\n ],\n expected_" }, { "id": 221122, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "clear_bpbynumber", "commit_message": "add python 3.10.4 for windows", "code": "def clear_bpbynumber(self, arg):\n \n try:\n bp = self.get_bpbynumber(arg)\n except ValueError as err:\n return str(err)\n bp.deleteMe()\n self._prune_breaks(bp.file, bp.line)\n return None\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 82, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 8, "token_counts": 47, "n_ast_nodes": 79, "n_identifiers": 12, "random_cut": "def clear_bpbynumber(self, arg):\n \n try:\n bp = self.get_b" }, { "id": 290579, "commit_id": "a848dc11556624f8ebf2a09aff7192b84ab4f66e", "repo": "core", "path": "tests/components/picnic/test_services.py", "file_name": "test_services.py", "fun_name": "picnic_api_client", "commit_message": "Add service for adding products to a Picnic order (#67877)\n\n* Add Picnic services for searching products and adding products to the cart\r\n\r\n* Improve the Picnic services implementation and add unit tests\r\n\r\n* Fix pre-commit check issues\r\n\r\n* Fix comments and example product name\r\n\r\n* Remove search service, update add_product service schema\r\n\r\n* Fix pylint suggestion\r\n\r\n* Add more tests and removed unused code\r\n\r\n* Remove code needed for the removed service, clean tests from obvious comments and add type hints\r\n\r\n* Remove unused import\r\n\r\n* Remove unnecessary comments and simplify getting the config entry id\r\n\r\nCo-authored-by: Allen Porter \r\n\r\n* Don't use hass.data in tests, make device id mandatory for service\r\n\r\n* Rewrite all service tests so using lru.cache is not needed\r\n\r\n* Add test for uncovered line in _product_search()\r\n\r\n* Require a config entry id as service parameter instead of device id\r\n\r\n* Use explicit check in get_api_client() and raise HomeAssistantError\r\n\r\n* Fix HomeAssistantError import, fix services tests\r\n\r\n* Change HomeAssistantError to ValueError when config entry is not found\r\n\r\nCo-authored-by: Allen Porter ", "code": "def picnic_api_client():\n \n with patch(\n \"homeassistant.components.picnic.create_picnic_client\"\n ) as create_picnic_client_mock:\n picnic_client_mock = create_picnic_api_client(UNIQUE_ID)\n create_picnic_client_mock.return_value = picnic_client_mock\n\n yield picnic_client_mock\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 53, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 26, "n_ast_nodes": 59, "n_identifiers": 9, "random_cut": "def picnic_api_client():\n \n with patch(\n \"homeassistant.components.picnic.create_picnic_client\"\n ) as create_picnic_client_mock:\n picnic_client_mock = create_picnic_api_client(UNIQUE_ID)\n " }, { "id": 184537, "commit_id": "c891f6b70a0e885d2afe9a02bebb40e4af2864a6", "repo": "textual", "path": "docs/examples/introduction/stopwatch.py", "file_name": "stopwatch.py", "fun_name": "action_remove_stopwatch", "commit_message": "fix for call_later and scroll_to_widget", "code": "def action_remove_stopwatch(self) -> None:\n \n timers = self.query(\"#timers Stopwatch\")\n if timers:\n timers.last().remove()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 43, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 52, "n_identifiers": 6, "random_cut": "def action_remove_stopwatch(self) -> None:\n \n timers = self.query(\"#timers Stopwatch\")\n" }, { "id": 285232, "commit_id": "9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/common/behavioural_analysis/test_sentimentinvestor_view.py", "file_name": "test_sentimentinvestor_view.py", "fun_name": "test_display_trending_empty_df", "commit_message": "Here we merge all API Refactor related branches (#2236)\n\n* Update api.py\r\n\r\n* Updated forex menu\r\n\r\n* refactor ycrv command\r\n\r\n* refactor ycrv command black\r\n\r\n* refactor ecocal command\r\n\r\n* Minh changes\r\n\r\n* Adding space to test pushing\r\n\r\n* title fix ecocal df\r\n\r\n* get economic calendar annotation\r\n\r\n* fix investingcom tests\r\n\r\n* refactor index command\r\n\r\n* refactor overview command\r\n\r\n* give defaults to wsj view function args\r\n\r\n* rename date args investincom\r\n\r\n* refacto bigmac command\r\n\r\n* fix ecocal typo\r\n\r\n* refactor rtps command\r\n\r\n* alphavantage gdp\r\n\r\n* alphavantage gdp per capita\r\n\r\n* alphavantage cpi\r\n\r\n* alphavantage tyld\r\n\r\n* alphavantage inf\r\n\r\n* refactor macro command\r\n\r\n* refactor macro command w helpers\r\n\r\n* refactor treasury command\r\n\r\n* fix macro on terminal\r\n\r\n* treasury labels\r\n\r\n* refactor maturities\r\n\r\n* update treasury maturities doc strings\r\n\r\n* refactor get economic calendar finhub\r\n\r\n* refactor map command api\r\n\r\n* display map filter choices\r\n\r\n* route economy api to performance map\r\n\r\n* route economy api to performance map\r\n\r\n* display group choices on valuation command\r\n\r\n* refactor performance and valuation commands\r\n\r\n* refactor spectrum model and view\r\n\r\n* add choices to spectrum controller\r\n\r\n* delete image after view\r\n\r\n* fix model tests finviz\r\n\r\n* fix finciz view tests\r\n\r\n* refactor futures\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix controller test\r\n\r\n* refactor fred series notes\r\n\r\n* update fred notes docstring\r\n\r\n* refacto fred series ids\r\n\r\n* fix pred and qa when empty datasets\r\n\r\n* refactor fred\r\n\r\n* uncomment stuff\r\n\r\n* refacto get series data\r\n\r\n* fix some tests\r\n\r\n* set defaults on args\r\n\r\n* refactor fred yield curve\r\n\r\n* black\r\n\r\n* fix spell and remove ecocal names\r\n\r\n* fix linting\r\n\r\n* linting\r\n\r\n* pylint fix\r\n\r\n* change dangerous defaults\r\n\r\n* Working through crypto fixes (#2256)\r\n\r\n* Working through crypto fixes\r\n\r\n* Continued adding crypto stuff\r\n\r\n* Added crypto overview\r\n\r\n* Added test fixes\r\n\r\n* Added fixtures\r\n\r\n* Fixed tests\r\n\r\n* Fixed charting issue\r\n\r\n* Removed broken APIs\r\n\r\n* Final adjustments\r\n\r\n* Added test fixes\r\n\r\n* map get groups and get ycrv countries into old api\r\n\r\n* exposed econdb helper funcs\r\n\r\n* remove helpers\r\n\r\n* refactor search indices\r\n\r\n* linting\r\n\r\n* refactor arg currency\r\n\r\n* pylint from currency\r\n\r\n* Started switching crpyto ascending to ascend\r\n\r\n* Merging\r\n\r\n* Portfolio model arguements, params, and docstring\r\n\r\n* Refactored for etf commands (#2292)\r\n\r\n* Refactored for etf commands\r\n\r\n* Fixed tests\r\n\r\n* Added load command\r\n\r\n* Fixed menu\r\n\r\n* Portfolio logic fixes\r\n\r\n* Added econometrics (#2260)\r\n\r\n* Added econometrics\r\n\r\n* Fixed tests\r\n\r\n* Simplified API\r\n\r\n* Added test fixes\r\n\r\n* Added test csv\r\n\r\n* Allowed examples to be loaded\r\n\r\n* Fund refactor (#2291)\r\n\r\n* Fund refactor\r\n\r\n* Changed fund_name and fund to name\r\n\r\n* Changed ascending to ascend\r\n\r\n* Stock menu refactoring for easier API usage (#2194)\r\n\r\n* Stocks refactoring for easier API usage\r\n\r\n* Linting\r\n\r\n* Refactor newly added features\r\n\r\n* Linting\r\n\r\n* Fixing tests\r\n\r\n* Refactor common files used by stocks menu\r\n\r\n* Fixing flake8\r\n\r\n* Fix linting and tests\r\n\r\n* Linting\r\n\r\n* Fix flake8\r\n\r\n* refactor insider_data\r\n\r\n* refactor mentions\r\n\r\n* refactor watchlist\r\n\r\n* refactor sentiment\r\n\r\n* refactor sentiment\r\n\r\n* fix yahoofinance tests\r\n\r\n* refactor load and candle\r\n\r\n* refactor get_news and display_news\r\n\r\n* refactor stocks.ins.act\r\n\r\n* candle default matplotlib\r\n\r\n* fix yahoofinance_view tests\r\n\r\n* fix ark model tests\r\n\r\n* fix ark view tests\r\n\r\n* fix business insider model\r\n\r\n* fix business insider view\r\n\r\n* refactor csimarket model\r\n\r\n* fix tests csi market model\r\n\r\n* update dd controller\r\n\r\n* fix get suppliers tests\r\n\r\n* fix dd controller tests\r\n\r\n* fix finhub tests\r\n\r\n* fix finviz tests\r\n\r\n* fix fmp tests\r\n\r\n* fix marketwatch tests\r\n\r\n* corrected argument keywords in test_bt_model\r\n\r\n* corrected argument keywords in test_bt_view\r\n\r\n* refactor fa controller\r\n\r\n* refactor marketwatch view\r\n\r\n* refactor gov controller\r\n\r\n* fix tests fa av\r\n\r\n* fix tests elect\r\n\r\n* fix dcf tests\r\n\r\n* fix polygon tests\r\n\r\n* fix fmp tests\r\n\r\n* fix quiverquant tests\r\n\r\n* fix yahoofinance fa tests\r\n\r\n* fix more fa tests\r\n\r\n* fix insider tests\r\n\r\n* fix more tests\r\n\r\n* fix more tests\r\n\r\n* fix options tests\r\n\r\n* fix stock gov tests\r\n\r\n* fix tests test_ba_controller\r\n\r\n* fix tests for test_finviz_compare_model.py\r\n\r\n* fixed 2 tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fix final tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* Fix tests\r\n\r\n* black\r\n\r\n* forgot to black tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* flakefix\r\n\r\n* Tests + code : Stocks / Discovery\r\n\r\n* fix tests\r\n\r\n* added recorder\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* black\r\n\r\n* black\r\n\r\n* remove unused imports\r\n\r\n* refactor display raw\r\n\r\n* sia dicts fix\r\n\r\n* pylint\r\n\r\n* linting\r\n\r\n* remove dangerous default\r\n\r\n* fix tests\r\n\r\n* fix beta model test\r\n\r\n* black\r\n\r\n* skip screener qa test\r\n\r\n* change sector path to sectors\r\n\r\n* update tests readme\r\n\r\n* fix metric defaults\r\n\r\n* black\r\n\r\n* substitute lost ticker\r\n\r\n* defaults cpic\r\n\r\n* another round on sia\r\n\r\n* refactor cramer\r\n\r\n* reduce default tweets on sentiment\r\n\r\n* refactor yf hist, corr, volume\r\n\r\n* arkorders default\r\n\r\n* refactor income, balance, cashflow\r\n\r\n* refacto scorr, screener, getfinnhub\r\n\r\n* refactor stockgrid\r\n\r\n* ibkr refactor\r\n\r\n* another round on stockgrid\r\n\r\n* add dividens end point\r\n\r\n* refactor discovery endpoints\r\n\r\n* update docstrings with similar input\r\n\r\n* refactor messages\r\n\r\n* refactor ba\r\n\r\n* refactor regioons\r\n\r\n* refactor twitter sentiment\r\n\r\n* refactor hist\r\n\r\n* refactor regions\r\n\r\n* give default to timeframe\r\n\r\n* refactor bunch of defaults and arg names\r\n\r\n* remove leftover imports\r\n\r\n* refactor vwap\r\n\r\n* let tests run\r\n\r\n* fix tests\r\n\r\n* fix stock tests\r\n\r\n* fix stockanalysis tests\r\n\r\n* flake\r\n\r\n* MYPY\r\n\r\n* Made important changes\r\n\r\n* added fixes\r\n\r\n* Fixed big issue\r\n\r\n* Added fixes to tests\r\n\r\n* fix qa tests\r\n\r\n* fix tests\r\n\r\n* fix 1 more test\r\n\r\n* last stocks failing\r\n\r\n* fix crypto test\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: montezdesousa \r\nCo-authored-by: hjoaquim \r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\n\r\n* fix portfolio tests\r\n\r\n* change period to window\r\n\r\n* update ca docstrings\r\n\r\n* refactor get_similar_companies func\r\n\r\n* Fixed\r\n\r\n* Update CI\r\n\r\n* Update CI 2\r\n\r\n* Update CI 3\r\n\r\n* Update dependencies\r\n\r\nCo-authored-by: colin99d \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: montezdesousa \r\nCo-authored-by: James Simmons \r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com>\r\nCo-authored-by: hjoaquim ", "code": "def test_display_trending_empty_df(mocker):\n view = \"openbb_terminal.common.behavioural_analysis.sentimentinvestor_view\"\n\n # MOCK GET_HISTORICAL\n mocker.patch(\n target=f\"{view}.sentimentinvestor_model.get_trending\",\n return_value=pd.DataFrame(),\n )\n\n sentimentinvestor_view.display_trending(\n start_date=datetime(2021, 12, 21),\n hour=9,\n number=10,\n limit=10,\n export=\"\",\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 13, "token_counts": 58, "n_ast_nodes": 94, "n_identifiers": 16, "random_cut": "def test_display_trending_empty_df(mocker):\n view = \"o" }, { "id": 59966, "commit_id": "045492f4d2205a0029514f5f00ec7560c06059a8", "repo": "prefect", "path": "src/prefect/agent.py", "file_name": "agent.py", "fun_name": "start", "commit_message": "Agent: Add limit to control number of concurrent flow runs (#7361)\n\nCo-authored-by: Thomas Pedersen \r\nCo-authored-by: Michael Adkins ", "code": "async def start(self):\n self.started = True\n self.task_group = anyio.create_task_group()\n self.limiter = (\n anyio.CapacityLimiter(self.limit) if self.limit is not None else None\n )\n self.client = get_client()\n await self.client.__aenter__()\n await self.task_group.__aenter__()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 87, "n_words": 28, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 65, "n_ast_nodes": 107, "n_identifiers": 12, "random_cut": "async def start(self):\n self.started = True\n self.task_group = anyio.create_task_group()\n self.limiter = (\n anyio.CapacityLimiter(self" }, { "id": 107773, "commit_id": "2357c92d87d96d519c8470776e76180e71663d0b", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "_reset_major_tick_kw", "commit_message": "Refactor handling of tick and ticklabel visiblity in Axis.clear()\n\nThis is a follow-up to #20826, which makes the exceptions from clearing\nmore explicit.", "code": "def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False):\n \n backup = {name: value for name, value in self._major_tick_kw.items()\n if name in ['tick1On', 'tick2On', 'label1On', 'label2On']}\n self._major_tick_kw.clear()\n if keep_tick_and_label_visibility:\n self._major_tick_kw.update(backup)\n self._major_tick_kw['gridOn'] = (\n mpl.rcParams['axes.grid'] and\n mpl.rcParams['axes.grid.which'] in ('both', 'major'))\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 125, "n_words": 32, "vocab_size": 27, "complexity": 5, "nloc": 9, "token_counts": 87, "n_ast_nodes": 150, "n_identifiers": 12, "random_cut": "def _reset_major_tick_kw(self, keep_tick_and_label_visibility=False):\n \n backup = {name: value for name, value in self._major_tick_kw.items()\n if name in ['tick1On', 'tick2On', 'label1On', 'label2On']}\n self._major_tick_kw.clear()\n if keep_tick_and_label_visibility:\n self._major_tick_kw.update(backup)\n self._major_tick_kw['gridOn'] = (\n mpl.rcParams[" }, { "id": 15728, "commit_id": "4e4e4e5d50f9a10f38d2aac5ea07696b84b365c4", "repo": "ccxt", "path": "python/ccxt/async_support/bitbns.py", "file_name": "bitbns.py", "fun_name": "describe", "commit_message": "1.67.89\n\n[ci skip]", "code": "def describe(self):\n return self.deep_extend(super(bitbns, self).describe(), {\n 'id': 'bitbns',\n 'name': 'Bitbns',\n 'countries': ['IN'], # India\n 'rateLimit': 1000,\n 'certified': False,\n 'pro': False,\n 'version': 'v2',\n # new metainfo interface\n 'has': {\n 'spot': True,\n 'margin': None,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'cancelOrder': True,\n 'createOrder': True,\n 'fetchBalance': True,\n 'fetchDepositAddress': True,\n 'fetchDeposits': True,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchIsolatedPositions': False,\n 'fetchLeverage': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchMyTrades': True,\n 'fetchOHLCV': None,\n 'fetchOpenOrders': True,\n 'fetchOrder': True,\n 'fetchOrderBook': True,\n 'fetchPositions': False,\n 'fetchPositionsRisk': False,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchStatus': True,\n 'fetchTicker': 'emulated',\n 'fetchTickers': True,\n 'fetchTrades': True,\n 'fetchWithdrawals': True,\n 'reduceMargin': False,\n 'setLeverage': False,\n 'setPositionMode': False,\n },\n 'timeframes': {\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/117201933-e7a6e780-adf5-11eb-9d80-98fc2a21c3d6.jpg',\n 'api': {\n 'www': 'https://bitbns.com',\n 'v1': 'https://api.bitbns.com/api/trade/v1',\n 'v2': 'https://api.bitbns.com/api/trade/v2',\n },\n 'www': 'https://bitbns.com',\n 'referral': 'https://ref.bitbns.com/1090961',\n 'doc': [\n 'https://bitbns.com/trade/#/api-trading/',\n ],\n 'fees': 'https://bitbns.com/fees',\n },\n 'api': {\n 'www': {\n 'get': [\n 'order/fetchMarkets',\n 'order/fetchTickers',\n 'order/fetchOrderbook',\n 'order/getTickerWithVolume',\n 'exchangeData/ohlc', # ?coin=${coin_name}&page=${page}\n 'exchangeData/orderBook',\n 'exchangeData/tradedetails',\n ],\n },\n 'v1': {\n 'get': [\n 'platform/status',\n 'tickers',\n 'orderbook/sell/{symbol}',\n 'orderbook/buy/{symbol}',\n ],\n 'post': [\n 'currentCoinBalance/EVERYTHING',\n 'getApiUsageStatus/USAGE',\n 'getOrderSocketToken/USAGE',\n 'currentCoinBalance/{symbol}',\n 'orderStatus/{symbol}',\n 'depositHistory/{symbol}',\n 'withdrawHistory/{symbol}',\n 'withdrawHistoryAll/{symbol}',\n 'depositHistoryAll/{symbol}',\n 'listOpenOrders/{symbol}',\n 'listOpenStopOrders/{symbol}',\n 'getCoinAddress/{symbol}',\n 'placeSellOrder/{symbol}',\n 'placeBuyOrder/{symbol}',\n 'buyStopLoss/{symbol}',\n 'sellStopLoss/{symbol}',\n 'placeSellOrder/{symbol}',\n 'cancelOrder/{symbol}',\n 'cancelStopLossOrder/{symbol}',\n 'listExecutedOrders/{symbol}',\n 'placeMarketOrder/{symbol}',\n 'placeMarketOrderQnty/{symbol}',\n ],\n },\n 'v2': {\n 'post': [\n 'orders',\n 'cancel',\n 'getordersnew',\n 'marginOrders',\n ],\n },\n },\n 'fees': {\n 'trading': {\n 'feeSide': 'quote',\n 'tierBased': False,\n 'percentage': True,\n 'taker': self.parse_number('0.0025'),\n 'maker': self.parse_number('0.0025'),\n },\n },\n 'exceptions': {\n 'exact': {\n '400': BadRequest, # {\"msg\":\"Invalid Request\",\"status\":-1,\"code\":400}\n '409': BadSymbol, # {\"data\":\"\",\"status\":0,\"error\":\"coin name not supplied or not yet supported\",\"code\":409}\n '416': InsufficientFunds, # {\"data\":\"Oops ! Not sufficient currency to sell\",\"status\":0,\"error\":null,\"code\":416}\n '417': OrderNotFound, # {\"data\":[],\"status\":0,\"error\":\"Nothing to show\",\"code\":417}\n },\n 'broad': {},\n },\n })\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 2545, "n_words": 246, "vocab_size": 161, "complexity": 1, "nloc": 134, "token_counts": 434, "n_ast_nodes": 814, "n_identifiers": 10, "random_cut": "def describe(self):\n return self.deep_extend(super(bitbns, self).describe(), {\n 'id': 'bitbns',\n 'name': 'Bitbns',\n 'countries': ['IN'], # India\n 'rateLimit': 1000,\n 'certified': False,\n 'pro': False,\n 'version': 'v2',\n # new metainfo interface\n 'has': {\n 'spot': True,\n 'margin': None,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'cancelOrder': True,\n 'createOrder': True,\n 'fetchBalance': True,\n 'fetchDepositAddress': True,\n 'fetchDeposits': True,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchIsolatedPositions': False,\n 'fetchLeverage': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchMyTrades': True,\n 'fetchOHLCV': None,\n 'fetchOpenOrders': True,\n 'fetchOrder': True,\n 'fetchOrderBook': True,\n 'fetchPositions': False,\n 'fetchPositionsRisk': False,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchStatus': True,\n 'fetchTicker': 'emulated',\n 'fetchTickers': True,\n 'fetchTrades': True,\n 'fetchWithdrawals': True,\n 'reduceMargin': False,\n 'setLeverage': False,\n 'setPositionMode': False,\n },\n 'timeframes': {\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/117201933-e7a6e780-adf5-11eb-9d80-98fc2a21c3d6.jpg',\n 'api': {\n 'www': 'https://bitbns.com',\n 'v1': 'https://api.bitbns.com/api/trade/v1',\n 'v2': 'https://api.bitbns.com/api/trade/v2',\n },\n 'www': 'https://bitbns.com',\n 'referral': 'https://ref.bitbns.com/1090961',\n 'doc': [\n 'https://bitbns.com/trade/#/api-trading/',\n ],\n 'fees': 'https://bitbns.com/fees',\n },\n 'api': {\n 'www': {\n 'get': [\n 'order/fetchMarkets',\n 'order/fetchTickers',\n 'order/fetchOrderbook',\n 'order/getTickerWit" }, { "id": 222071, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ctypes/test/test_memfunctions.py", "file_name": "test_memfunctions.py", "fun_name": "test_overflow", "commit_message": "add python 3.10.4 for windows", "code": "def test_overflow(self):\n # string_at and wstring_at must use the Python calling\n # convention (which acquires the GIL and checks the Python\n # error flag). Provoke an error and catch it; see also issue\n # #3554: \n self.assertRaises((OverflowError, MemoryError, SystemError),\n lambda: wstring_at(u\"foo\", sys.maxint - 1))\n self.assertRaises((OverflowError, MemoryError, SystemError),\n lambda: string_at(\"foo\", sys.maxint - 1))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 144, "n_words": 52, "vocab_size": 36, "complexity": 1, "nloc": 5, "token_counts": 56, "n_ast_nodes": 87, "n_identifiers": 10, "random_cut": "def test_overflow(self):\n # string_at and wstring_at must use the Python calling\n # convention (which acquires the GIL and checks the Python\n # error flag). Provoke an error and catch it; see also issue\n # #3554: dict[str, dict[str, Any]]:\n \n\n devices = []\n try:\n for dev in await self.client.async_get_devices():\n devices.append(dev)\n except (pysensibo.SensiboError) as error:\n raise UpdateFailed from error\n\n device_data: dict[str, dict[str, Any]] = {}\n for dev in devices:\n unique_id = dev[\"id\"]\n name = dev[\"room\"][\"name\"]\n temperature = dev[\"measurements\"].get(\"temperature\", 0.0)\n humidity = dev[\"measurements\"].get(\"humidity\", 0)\n ac_states = dev[\"acState\"]\n target_temperature = ac_states.get(\"targetTemperature\")\n hvac_mode = ac_states.get(\"mode\")\n running = ac_states.get(\"on\")\n fan_mode = ac_states.get(\"fanLevel\")\n swing_mode = ac_states.get(\"swing\")\n available = dev[\"connectionStatus\"].get(\"isAlive\", True)\n capabilities = dev[\"remoteCapabilities\"]\n hvac_modes = list(capabilities[\"modes\"])\n if hvac_modes:\n hvac_modes.append(\"off\")\n current_capabilities = capabilities[\"modes\"][ac_states.get(\"mode\")]\n fan_modes = current_capabilities.get(\"fanLevels\")\n swing_modes = current_capabilities.get(\"swing\")\n temperature_unit_key = dev.get(\"temperatureUnit\") or ac_states.get(\n \"temperatureUnit\"\n )\n temperatures_list = (\n current_capabilities[\"temperatures\"]\n .get(temperature_unit_key, {})\n .get(\"values\", [0, 1])\n )\n if temperatures_list:\n temperature_step = temperatures_list[1] - temperatures_list[0]\n features = list(ac_states)\n state = hvac_mode if hvac_mode else \"off\"\n\n fw_ver = dev[\"firmwareVersion\"]\n fw_type = dev[\"firmwareType\"]\n model = dev[\"productModel\"]\n\n calibration_temp = dev[\"sensorsCalibration\"].get(\"temperature\", 0.0)\n calibration_hum = dev[\"sensorsCalibration\"].get(\"humidity\", 0.0)\n\n device_data[unique_id] = {\n \"id\": unique_id,\n \"name\": name,\n \"ac_states\": ac_states,\n \"temp\": temperature,\n \"humidity\": humidity,\n \"target_temp\": target_temperature,\n \"hvac_mode\": hvac_mode,\n \"on\": running,\n \"fan_mode\": fan_mode,\n \"swing_mode\": swing_mode,\n \"available\": available,\n \"hvac_modes\": hvac_modes,\n \"fan_modes\": fan_modes,\n \"swing_modes\": swing_modes,\n \"temp_unit\": temperature_unit_key,\n \"temp_list\": temperatures_list,\n \"temp_step\": temperature_step,\n \"features\": features,\n \"state\": state,\n \"fw_ver\": fw_ver,\n \"fw_type\": fw_type,\n \"model\": model,\n \"calibration_temp\": calibration_temp,\n \"calibration_hum\": calibration_hum,\n }\n return device_data\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1071, "n_words": 194, "vocab_size": 153, "complexity": 8, "nloc": 72, "token_counts": 454, "n_ast_nodes": 775, "n_identifiers": 43, "random_cut": "async def _async_update_data(self) -> dict[str, dict[str, Any]]:\n \n\n devices = []\n try:\n for dev in await self.client.async_get_devices():\n devices.append(dev)\n except (pysensibo.SensiboError) as error:\n raise UpdateFailed from error\n\n device_data: dict[str, dict[str, Any]] = {}\n for dev in devices:\n unique_id = dev[\"id\"]\n name = dev[\"room\"][\"name\"]\n temperature = dev[\"measurements\"].get(\"temperature\", 0.0)\n " }, { "id": 177694, "commit_id": "03bd7e0238b7c21d6276e0b927a1722ed7c0aedc", "repo": "label-studio", "path": "label_studio/labels_manager/api.py", "file_name": "api.py", "fun_name": "post", "commit_message": "feat: DEV-1926: Add labels api (#2128)\n\n* feat: DEV-1926: Add labels api\r\n\r\n* Update DM to master branch\r\n\r\nCo-authored-by: hlomzik ", "code": "def post(self, request):\n serializer = LabelBulkUpdateSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n project = serializer.validated_data['project']\n if project is not None:\n self.check_object_permissions(self.request, project)\n\n updated_count = bulk_update_label(\n old_label=serializer.validated_data['old_label'],\n new_label=serializer.validated_data['new_label'],\n organization=self.request.user.active_organization,\n project=project,\n )\n return Response({'annotations_updated': updated_count})\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 131, "n_words": 28, "vocab_size": 25, "complexity": 2, "nloc": 13, "token_counts": 95, "n_ast_nodes": 150, "n_identifiers": 19, "random_cut": "def post(self, request):\n serializer = LabelBulkUpdateSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n project = serializer.validated_data['project']\n if project is not " }, { "id": 47006, "commit_id": "6933022e94acf139b2dea9a589bb8b25c62a5d20", "repo": "airflow", "path": "tests/providers/google/cloud/hooks/test_dataplex.py", "file_name": "test_dataplex.py", "fun_name": "test_list_tasks", "commit_message": "Fix new MyPy errors in main (#22884)\n\nThose MyPe errors are side effect of some new dependencies.", "code": "def test_list_tasks(self, mock_client):\n self.hook.list_tasks(project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID)\n\n parent = f'projects/{PROJECT_ID}/locations/{REGION}/lakes/{LAKE_ID}'\n mock_client.return_value.list_tasks.assert_called_once_with(\n request=dict(\n parent=parent,\n page_size=None,\n page_token=None,\n filter=None,\n order_by=None,\n ),\n retry=DEFAULT,\n timeout=None,\n metadata=(),\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 178, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 15, "token_counts": 77, "n_ast_nodes": 122, "n_identifiers": 24, "random_cut": "def test_list_tasks(self, mock_client):\n self.hook.list_tasks(project_id=PROJECT_ID, region=REGION, lake_id=LAKE_ID)\n\n " }, { "id": 214576, "commit_id": "6ed3648502ddc7d44e8b6b3f9f8e6adcb15cf134", "repo": "flair", "path": "tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_sequence_tagger_transformer_finetune", "commit_message": "make `add_unk` optional and don't use it for ner", "code": "def test_sequence_tagger_transformer_finetune(results_base_path, tasks_base_path):\n flair.set_seed(123)\n\n # load dataset\n corpus: Corpus = ColumnCorpus(\n data_folder=tasks_base_path / \"trivial\" / \"trivial_bioes\",\n column_format={0: \"text\", 1: \"ner\"},\n )\n tag_dictionary = corpus.make_label_dictionary(\"ner\", add_unk=False)\n\n # tagger without CRF\n tagger: SequenceTagger = SequenceTagger(\n hidden_size=64,\n embeddings=TransformerWordEmbeddings(\"distilbert-base-uncased\", fine_tune=True),\n tag_dictionary=tag_dictionary,\n tag_type=\"ner\",\n use_crf=False,\n use_rnn=False,\n reproject_embeddings=False,\n )\n\n # train\n trainer = ModelTrainer(tagger, corpus)\n trainer.fine_tune(\n results_base_path,\n mini_batch_size=2,\n max_epochs=10,\n shuffle=True,\n learning_rate=0.5e-4,\n )\n\n loaded_model: SequenceTagger = SequenceTagger.load(results_base_path / \"final-model.pt\")\n\n sentence = Sentence(\"this is New York\")\n sentence_empty = Sentence(\" \")\n\n loaded_model.predict(sentence)\n loaded_model.predict([sentence, sentence_empty])\n loaded_model.predict([sentence_empty])\n\n # check if loaded model can predict\n entities = [label.data_point.text for label in sentence.get_labels(\"ner\")]\n assert \"New York\" in entities\n\n # check if loaded model successfully fit the training data\n result: Result = loaded_model.evaluate(corpus.test, gold_label_type=\"ner\")\n assert result.classification_report[\"micro avg\"][\"f1-score\"] == 1.0\n\n del loaded_model\n\n\n@pytest.mark.integration", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "@pytest.mark.integration", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 294, "n_words": 117, "vocab_size": 93, "complexity": 2, "nloc": 35, "token_counts": 231, "n_ast_nodes": 377, "n_identifiers": 49, "random_cut": "def test_sequence_tagger_transformer_finetune(results_base_path, tasks_base_path):\n flair.set_seed(123)\n\n # load dataset\n corpus: Corpus = ColumnCorpus(\n data_folder=tasks_base_path / \"trivial\" / \"trivial_bioes\",\n column_format={0: \"text\", 1: \"ner\"},\n )\n tag_dictionary = corpus.make_label_dictionary(\"ner\", add_unk=False)\n\n # tagger without CR" }, { "id": 201885, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/basic/tests.py", "file_name": "tests.py", "fun_name": "test_objects_attribute_is_only_available_on_the_class_itself", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_objects_attribute_is_only_available_on_the_class_itself(self):\n with self.assertRaisesMessage(\n AttributeError, \"Manager isn't accessible via Article instances\"\n ):\n getattr(\n Article(),\n \"objects\",\n )\n self.assertFalse(hasattr(Article(), \"objects\"))\n self.assertTrue(hasattr(Article, \"objects\"))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 110, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 10, "token_counts": 48, "n_ast_nodes": 86, "n_identifiers": 9, "random_cut": "def test_objects_attribute_is_only_available_on_the_class_itself(self):\n with self.assertRaisesMessage(\n AttributeError, \"Manager isn't accessible via Article instances\"\n ):\n getattr" }, { "id": 314845, "commit_id": "b7b8feda0ffb7487954545c96c50e7f64e2195bc", "repo": "core", "path": "tests/components/lcn/test_cover.py", "file_name": "test_cover.py", "fun_name": "test_setup_lcn_cover", "commit_message": "Add tests for LCN sensor and binary_sensor platforms (#67263)", "code": "async def test_setup_lcn_cover(hass, entry, lcn_connection):\n \n for entity_id in (\n COVER_OUTPUTS,\n COVER_RELAYS,\n ):\n state = hass.states.get(entity_id)\n assert state is not None\n assert state.state == STATE_OPEN\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 24, "vocab_size": 22, "complexity": 2, "nloc": 8, "token_counts": 41, "n_ast_nodes": 63, "n_identifiers": 11, "random_cut": "async def test_setup_lcn_cover(hass, entry, lcn_connection):\n \n for entity_id in (\n COVER_OUTPUTS,\n COVER_RELAYS,\n ):\n state = hass.states.get(entity_id)\n assert state is not None\n assert state.stat" }, { "id": 86766, "commit_id": "e9ce61066783c3601acd75fa74a9f4af6bd696c1", "repo": "sentry", "path": "src/sentry/integrations/jira/client.py", "file_name": "client.py", "fun_name": "get_project_key_for_id", "commit_message": "ref: type sentry/utils/assets.py and sentry/utils/http.py (#39624)", "code": "def get_project_key_for_id(self, project_id) -> str:\n if not project_id:\n return \"\"\n projects = self.get_projects_list()\n for project in projects:\n if project[\"id\"] == project_id:\n return project[\"key\"]\n return \"\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 89, "n_words": 25, "vocab_size": 20, "complexity": 4, "nloc": 8, "token_counts": 42, "n_ast_nodes": 73, "n_identifiers": 7, "random_cut": "def get_project_key_for_id(self, project_id) -> str:\n if not project_id:\n return \"\"\n projects = self.get_projects_list()\n for project in projects:\n if proj" }, { "id": 72791, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/api/v2/tests/test_pages.py", "file_name": "test_pages.py", "fun_name": "test_remove_id_field", "commit_message": "Reformat with black", "code": "def test_remove_id_field(self):\n response = self.get_response(fields=\"-id\")\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n for page in content[\"items\"]:\n self.assertEqual(set(page.keys()), {\"meta\", \"title\"})\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 57, "n_ast_nodes": 99, "n_identifiers": 13, "random_cut": "def test_remove_id_field(self):\n response = self.get_response(fields=\"-id\")\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n for " }, { "id": 52018, "commit_id": "57d977303b4f6002eb8cc40ccb774146921c984a", "repo": "PaddleHub", "path": "modules/image/Image_editing/super_resolution/swinir_m_real_sr_x2/test.py", "file_name": "test.py", "fun_name": "tearDownClass", "commit_message": "Add swinir_m_real_sr_x2 Module (#2074)\n\n* add swinir_m_real_sr_x2\r\n\r\n* update README\r\n\r\n* fix typo\r\n\r\n* fix typo", "code": "def tearDownClass(cls) -> None:\n shutil.rmtree('tests')\n shutil.rmtree('swinir_m_real_sr_x2_output')\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 36, "n_identifiers": 4, "random_cut": "def tearDownClass(cls) -> None:\n shutil.r" }, { "id": 167627, "commit_id": "1ac13910aabaabeec0f00319d14d31a08e294475", "repo": "pandas", "path": "pandas/tests/reshape/concat/test_index.py", "file_name": "test_index.py", "fun_name": "test_concat_index_keep_dtype", "commit_message": "BUG: concat losing columns dtypes for join=outer (#47586)", "code": "def test_concat_index_keep_dtype(self, dtype):\n # GH#47329\n df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype))\n df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=dtype))\n result = concat([df1, df2], ignore_index=True, join=\"outer\", sort=True)\n expected = DataFrame(\n [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype=dtype)\n )\n tm.assert_frame_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 103, "n_words": 44, "vocab_size": 32, "complexity": 1, "nloc": 8, "token_counts": 138, "n_ast_nodes": 190, "n_identifiers": 18, "random_cut": "def test_concat_index_keep_dtype(self, dtype):\n # GH#47329\n df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype))\n df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=dtype))\n result = concat([df1, df2]" }, { "id": 12750, "commit_id": "6ba1d165a2aad8e863006be69c813b5cac3d8a21", "repo": "jina", "path": "jina/serve/networking.py", "file_name": "networking.py", "fun_name": "_get_next_connection", "commit_message": "feat: do not await gather endpoints, simply schedule task (#5015)", "code": "async def _get_next_connection(self, num_retries=3):\n \n try:\n connection = None\n for i in range(len(self._connections)):\n internal_rr_counter = (self._rr_counter + i) % len(self._connections)\n connection = self._connections[internal_rr_counter]\n # connection is None if it is currently being reset. In that case, try different connection\n if connection is not None:\n break\n all_connections_unavailable = connection is None and num_retries <= 0\n if all_connections_unavailable:\n if num_retries <= 0:\n raise EstablishGrpcConnectionError(\n f'Error while resetting connections {self._connections}. Connections cannot be used.'\n )\n elif connection is None:\n # give control back to async event loop so connection resetting can be completed; then retry\n self._logger.debug(\n f' No valid connection found, give chance for potential resetting of connection'\n )\n try:\n await asyncio.wait_for(\n self._destroyed_event.wait(),\n timeout=GRACE_PERIOD_DESTROY_CONNECTION,\n )\n finally:\n return await self._get_next_connection(num_retries=num_retries - 1)\n except IndexError:\n # This can happen as a race condition while _removing_ connections\n self._rr_counter = 0\n connection = self._connections[self._rr_counter]\n self._rr_counter = (self._rr_counter + 1) % len(self._connections)\n return connection\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 620, "n_words": 145, "vocab_size": 94, "complexity": 9, "nloc": 30, "token_counts": 168, "n_ast_nodes": 280, "n_identifiers": 21, "random_cut": "async def _get_next_connection(self, num_retries=3):\n \n try:\n connection = None\n for i in range(len(self._connections)):\n internal_rr_counter = (self._rr_counter + i) % len(self._connections)\n connection = self._connections[internal_rr_counter]\n # connection is None if it is currently being reset. In that case, try different connec" }, { "id": 280997, "commit_id": "8f8147c3af76f03223943fe630a94dfb326b13c7", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/stocks/research/test_res_controller.py", "file_name": "test_res_controller.py", "fun_name": "test_print_help", "commit_message": "Tests : Stocks > Research + Screener (#1131)\n\n* Updating tests : stocks/research\r\n\r\n* Updating tests : stocks/screener\r\n\r\n* Updating tests : stocks/screener", "code": "def test_print_help():\n\n controller = res_controller.ResearchController(\n ticker=\"MOCK_TICKER\",\n start=datetime.strptime(\"2021-12-01\", \"%Y-%m-%d\"),\n interval=\"MOCK_INTERVAL\",\n queue=None,\n )\n controller.print_help()\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"quit\", \"quit\", \"help\"]),\n (\"help/help\", [\"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"stocks\",\n \"load MOCK_TICKER\",\n \"res\",\n ],\n ),\n ],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"quit\", \"quit\", \"help\"]),\n (\"help/help\", [\"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"stocks\",\n \"load MOCK_TICKER\",\n \"res\",\n ],\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 257, "n_words": 43, "vocab_size": 37, "complexity": 1, "nloc": 8, "token_counts": 39, "n_ast_nodes": 216, "n_identifiers": 16, "random_cut": "def test_print_help():\n\n controller = res_controller.ResearchController(\n ticker=\"MOCK_TICKER\",\n start=datetim" }, { "id": 224966, "commit_id": "a56ac6e0513bdea6860ed1fdc3debc10410638cd", "repo": "mkdocs", "path": "docs/img/plugin-events.py", "file_name": "plugin-events.py", "fun_name": "event", "commit_message": "Add plugin events that persist across builds in `mkdocs serve`\n\n\"One-time events\" `on_startup(command)`, `on_shutdown`.\n\nTheir presence also shows that a plugin *wants* to persist across builds. Otherwise they will be re-created, to not change any existing behavior.", "code": "def event(g, name, parameters):\n with cluster(\n g, f\"cluster_{name}\", href=f\"#{name}\", bgcolor=\"#ffff3388\", pencolor=\"#00000088\"\n ) as c:\n label = \"|\".join(f\"<{p}>{p}\" for p in parameters.split())\n node(c, name, shape=\"record\" if parameters else \"point\", label=label, fillcolor=\"#ffffff55\")\n\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 56, "n_words": 30, "vocab_size": 29, "complexity": 3, "nloc": 6, "token_counts": 72, "n_ast_nodes": 134, "n_identifiers": 16, "random_cut": "def event(g, name, parameters):\n with cluster(\n g, f\"cluster_{name}\", href=f\"#{" }, { "id": 24211, "commit_id": "8d46a1fbbe33d37fc858c53afd0e9fcd9cc185fa", "repo": "PaddleOCR", "path": "tools/infer_vqa_token_ser_re.py", "file_name": "infer_vqa_token_ser_re.py", "fun_name": "make_input", "commit_message": "add dygraph2static support of layoutlm series SER model", "code": "def make_input(ser_inputs, ser_results):\n entities_labels = {'HEADER': 0, 'QUESTION': 1, 'ANSWER': 2}\n\n entities = ser_inputs[8][0]\n ser_results = ser_results[0]\n assert len(entities) == len(ser_results)\n\n # entities\n start = []\n end = []\n label = []\n entity_idx_dict = {}\n for i, (res, entity) in enumerate(zip(ser_results, entities)):\n if res['pred'] == 'O':\n continue\n entity_idx_dict[len(start)] = i\n start.append(entity['start'])\n end.append(entity['end'])\n label.append(entities_labels[res['pred']])\n entities = dict(start=start, end=end, label=label)\n\n # relations\n head = []\n tail = []\n for i in range(len(entities[\"label\"])):\n for j in range(len(entities[\"label\"])):\n if entities[\"label\"][i] == 1 and entities[\"label\"][j] == 2:\n head.append(i)\n tail.append(j)\n\n relations = dict(head=head, tail=tail)\n\n batch_size = ser_inputs[0].shape[0]\n entities_batch = []\n relations_batch = []\n entity_idx_dict_batch = []\n for b in range(batch_size):\n entities_batch.append(entities)\n relations_batch.append(relations)\n entity_idx_dict_batch.append(entity_idx_dict)\n\n ser_inputs[8] = entities_batch\n ser_inputs.append(relations_batch)\n # remove ocr_info segment_offset_id and label in ser input\n ser_inputs.pop(7)\n ser_inputs.pop(6)\n ser_inputs.pop(5)\n return ser_inputs, entity_idx_dict_batch\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 324, "n_words": 126, "vocab_size": 81, "complexity": 8, "nloc": 39, "token_counts": 310, "n_ast_nodes": 507, "n_identifiers": 29, "random_cut": "def make_input(ser_inputs, ser_results):\n entities_labels = {'HEADER': 0, 'QUESTION': 1, 'ANSWER': 2}\n\n entities = ser_inputs[8][0]\n ser_results = ser_results[0]\n assert len(entities) == len(ser_results)\n\n # entities\n start = []\n end = []\n label = []\n entity_idx_dict = {}\n for i, (res, entity) in enumerate(zip(ser_results, entities)):\n if res['pred'] == 'O':\n continue\n entity_idx_dict[len(start)] = i\n " }, { "id": 301684, "commit_id": "45e4dd379b54847174b1f69ca138ba5fe73d24f9", "repo": "core", "path": "homeassistant/components/hunterdouglas_powerview/cover.py", "file_name": "cover.py", "fun_name": "open_position", "commit_message": "Add support for topdown shades to hunterdouglas_powerview (#62788)\n\nCo-authored-by: J. Nick Koston ", "code": "def open_position(self) -> PowerviewShadeMove:\n \n return PowerviewShadeMove(self._shade.open_position, {})\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 4, "random_cut": "def open_position(self) -> PowerviewShadeMove:\n \n return PowerviewShadeMove(self._shade.open_position, {})\n" }, { "id": 278417, "commit_id": "80ee2fa4e1db2dda14370110830db82be3eb97b7", "repo": "keras", "path": "keras/utils/composite_tensor_support_test.py", "file_name": "composite_tensor_support_test.py", "fun_name": "test_sparse_tensors", "commit_message": "resolve line-too-long in utils", "code": "def test_sparse_tensors(self, use_dict, use_dataset, action):\n data = [\n (\n tf.SparseTensor(\n [[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]\n ),\n np.array([[[1, -1, -1]], [[2, 3, -1]]]),\n ),\n (\n tf.SparseTensor(\n [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]],\n [5, 6, 7, 8],\n [3, 1, 4],\n ),\n np.array(\n [[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -1]]]\n ),\n ),\n ]\n # Prepare the model to test.\n input_name = get_input_name(use_dict)\n model_input = input_layer.Input(\n shape=(1, None), sparse=True, name=input_name, dtype=tf.int32\n )\n layers = [ToDense(default_value=-1)]\n model = get_model_from_layers_with_input(\n layers, model_input=model_input\n )\n model.compile(\n optimizer=\"sgd\",\n loss=\"mse\",\n metrics=[\"accuracy\"],\n **get_test_mode_kwargs()\n )\n kwargs = get_kwargs(use_dataset, action)\n\n # Prepare the input data\n for data_element in data:\n input_data, expected_output = prepare_inputs(\n data_element, use_dict, use_dataset, action, input_name\n )\n # Perform the action.\n if action == \"predict\":\n result = model.predict(input_data, **kwargs)\n self.assertAllEqual(expected_output, result)\n if action == \"evaluate\":\n result = model.evaluate(input_data, expected_output, **kwargs)\n self.assertAllEqual(1.0, result[-1])\n if action == \"fit\":\n # TODO(momernick): What's the best way of validating that fit\n # happened?\n _ = model.fit(\n input_data, expected_output, shuffle=False, **kwargs\n )\n\n\n@test_combinations.run_with_all_model_types\n@test_combinations.run_all_keras_modes", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@test_combinations.run_with_all_model_types\n@test_combinations.run_all_keras_modes", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 803, "n_words": 178, "vocab_size": 112, "complexity": 5, "nloc": 48, "token_counts": 392, "n_ast_nodes": 564, "n_identifiers": 46, "random_cut": "def test_sparse_tensors(self, use_dict, use_dataset, action):\n data = [\n (\n tf.SparseTensor(\n [[0, 0, 0], [1, 0, 0], [1, 0, 1]], [1, 2, 3], [2, 1, 3]\n ),\n np.array([[[1, -1, -1]], [[2, 3, -1]]]),\n ),\n (\n tf.SparseTensor(\n [[0, 0, 0], [1, 0, 0], [1, 0, 1], [2, 0, 1]],\n [5, 6, 7, 8],\n [3, 1, 4],\n ),\n np.array(\n [[[5, -1, -1, -1]], [[6, 7, -1, -1]], [[-1, 8, -1, -" }, { "id": 194313, "commit_id": "a8007dcdfb5159a711fa343d2ac4bb7df826975f", "repo": "vision", "path": "test/prototype_transforms_kernel_infos.py", "file_name": "prototype_transforms_kernel_infos.py", "fun_name": "sample_inputs_adjust_hue_image_tensor", "commit_message": "rename features._Feature to datapoints._Datapoint (#7002)\n\n* rename features._Feature to datapoints.Datapoint\r\n\r\n* _Datapoint to Datapoint\r\n\r\n* move is_simple_tensor to transforms.utils\r\n\r\n* fix CI\r\n\r\n* move Datapoint out of public namespace", "code": "def sample_inputs_adjust_hue_image_tensor():\n for image_loader in make_image_loaders(\n sizes=[\"random\"], color_spaces=(datapoints.ColorSpace.GRAY, datapoints.ColorSpace.RGB)\n ):\n yield ArgsKwargs(image_loader, hue_factor=_ADJUST_HUE_FACTORS[0])\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 32, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 44, "n_ast_nodes": 68, "n_identifiers": 12, "random_cut": "def sample_inputs_adjust_hue_image_tensor():\n for image_loader in make_image_loaders(\n sizes=[\"random\"], color_spaces=(datapoints.ColorSpace.GRAY, datapoints.ColorSp" }, { "id": 249383, "commit_id": "ba8938b090c7e1908cfa4feac75f08f3bc1183e8", "repo": "synapse", "path": "scripts-dev/check_pydantic_models.py", "file_name": "check_pydantic_models.py", "fun_name": "test_annotation_without_strict_raises", "commit_message": "Reject non-strict types in Pydantic models (#13502)", "code": "def test_annotation_without_strict_raises(self) -> None:\n with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):\n run_test_snippet(\n \n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 8, "token_counts": 23, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def test_annotation_without_strict_raises(self) -> None:\n with monkeypatch_pydantic(), self.assertRaises(ModelCheckerException):\n run_test_snippet(\n " }, { "id": 163050, "commit_id": "d70b95bc0e17d18bbefee8ac8a07e4fa5f33513c", "repo": "pandas", "path": "pandas/tests/series/indexing/test_setitem.py", "file_name": "test_setitem.py", "fun_name": "test_37477", "commit_message": "TST: tests for setitem-like casting issues (#45154)", "code": "def test_37477():\n # fixed by GH#45121\n orig = DataFrame({\"A\": [1, 2, 3], \"B\": [3, 4, 5]})\n expected = DataFrame({\"A\": [1, 2, 3], \"B\": [3, 1.2, 5]})\n\n df = orig.copy()\n df.at[1, \"B\"] = 1.2\n tm.assert_frame_equal(df, expected)\n\n df = orig.copy()\n df.loc[1, \"B\"] = 1.2\n tm.assert_frame_equal(df, expected)\n\n df = orig.copy()\n df.iat[1, 1] = 1.2\n tm.assert_frame_equal(df, expected)\n\n df = orig.copy()\n df.iloc[1, 1] = 1.2\n tm.assert_frame_equal(df, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 106, "n_words": 62, "vocab_size": 29, "complexity": 1, "nloc": 15, "token_counts": 166, "n_ast_nodes": 243, "n_identifiers": 12, "random_cut": "def test_37477():\n # fixed by GH#45121\n orig = DataFrame({\"A\": [1, 2, 3], \"B\": [3, 4, 5]})\n expe" }, { "id": 250034, "commit_id": "acea4d7a2ff61b5beda420b54a8451088060a8cd", "repo": "synapse", "path": "tests/util/test_treecache.py", "file_name": "test_treecache.py", "fun_name": "test_pop_twolevel", "commit_message": "Add missing types to tests.util. (#14597)\n\nRemoves files under tests.util from the ignored by list, then\r\nfully types all tests/util/*.py files.", "code": "def test_pop_twolevel(self) -> None:\n cache = TreeCache()\n cache[(\"a\", \"a\")] = \"AA\"\n cache[(\"a\", \"b\")] = \"AB\"\n cache[(\"b\", \"a\")] = \"BA\"\n self.assertEqual(cache.pop((\"a\", \"a\")), \"AA\")\n self.assertEqual(cache.get((\"a\", \"a\")), None)\n self.assertEqual(cache.get((\"a\", \"b\")), \"AB\")\n self.assertEqual(cache.pop((\"b\", \"a\")), \"BA\")\n self.assertEqual(cache.pop((\"b\", \"a\")), None)\n self.assertEqual(len(cache), 1)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 105, "n_words": 36, "vocab_size": 25, "complexity": 1, "nloc": 11, "token_counts": 138, "n_ast_nodes": 249, "n_identifiers": 8, "random_cut": "def test_pop_twolevel(self) -> None:\n cache = TreeCache()\n cache[(\"a\", \"a\")] = \"AA\"\n cache[(\"a\", \"b\")] = \"AB\"\n cache[(\"b\", \"a\")] = \"BA\"\n self.assertEqual(cache.pop((\"a\", \"a\")), \"AA\")\n self.assertEqual(cache.get((\"a\", \"a\")), None)\n " }, { "id": 248552, "commit_id": "2959184a42398277ff916206235b844a8f7be5d7", "repo": "synapse", "path": "tests/test_event_auth.py", "file_name": "test_event_auth.py", "fun_name": "test_random_users_cannot_send_state_before_first_pl", "commit_message": "EventAuthTestCase: build events for the right room version\n\nIn practice, when we run the auth rules, all of the events have the right room\nversion. Let's stop building Room V1 events for these tests and use the right\nversion.", "code": "def test_random_users_cannot_send_state_before_first_pl(self):\n \n creator = \"@creator:example.com\"\n joiner = \"@joiner:example.com\"\n auth_events = [\n _create_event(RoomVersions.V1, creator),\n _join_event(RoomVersions.V1, creator),\n _join_event(RoomVersions.V1, joiner),\n ]\n\n # creator should be able to send state\n event_auth.check_auth_rules_for_event(\n RoomVersions.V1,\n _random_state_event(RoomVersions.V1, creator),\n auth_events,\n )\n\n # joiner should not be able to send state\n self.assertRaises(\n AuthError,\n event_auth.check_auth_rules_for_event,\n RoomVersions.V1,\n _random_state_event(RoomVersions.V1, joiner),\n auth_events,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 247, "n_words": 49, "vocab_size": 30, "complexity": 1, "nloc": 20, "token_counts": 89, "n_ast_nodes": 135, "n_identifiers": 14, "random_cut": "def test_random_users_cannot_send_state_before_first_pl(self):\n \n creator = \"@creator:example.com\"\n joiner = \"@joiner:example.com\"\n auth_events = [\n _create_event(RoomVersions.V1, creator),\n _join_event(RoomVersions.V1, creator),\n " }, { "id": 209880, "commit_id": "799f272bc04c361841d01e9c0087950e0eb86610", "repo": "scapy", "path": "scapy/contrib/automotive/scanner/enumerator.py", "file_name": "enumerator.py", "fun_name": "_get_retry_iterator", "commit_message": "Improve reduce function for Automotive Scanner Enumerators (#3740)", "code": "def _get_retry_iterator(self, state):\n # type: (EcuState) -> Iterable[Packet]\n retry_entry = self._retry_pkt[state]\n if isinstance(retry_entry, Packet):\n log_automotive.debug(\"Provide retry packet\")\n return [retry_entry]\n else:\n log_automotive.debug(\"Provide retry iterator\")\n # assume self.retry_pkt is a generator or list\n return retry_entry\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 115, "n_words": 33, "vocab_size": 28, "complexity": 2, "nloc": 8, "token_counts": 43, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def _get_retry_iterator(self, state):\n # type: (EcuState) -> Iterable[Packet]\n retry_entry = self._retry_pkt[state]\n if isinstance(retry_entry, Packet):\n log_automotive.debug(\"Provide retry packet\")\n return [retry_entry]\n else:\n log_automotive.debug(\"Provide retry iterator\")\n # assume self.retry_pkt is a gene" }, { "id": 35175, "commit_id": "e314c19a3ff52b39f33453ab6c7f7b3c6c12413e", "repo": "transformers", "path": "src/transformers/trainer.py", "file_name": "trainer.py", "fun_name": "_load_rng_state", "commit_message": "fix bug for the log of RNG states are not properly loaded exception. (#15638)\n\nCo-authored-by: muz ", "code": "def _load_rng_state(self, checkpoint):\n # Load RNG states from `checkpoint`\n if checkpoint is None:\n return\n\n local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank\n if local_rank != -1:\n rng_file = os.path.join(checkpoint, f\"rng_state_{local_rank}.pth\")\n if not os.path.isfile(os.path.join(checkpoint, rng_file)):\n logger.info(\n f\"Didn't find an RNG file for process {local_rank}, if you are resuming a training that \"\n \"wasn't launched in a distributed fashion, reproducibility is not guaranteed.\"\n )\n return\n else:\n rng_file = os.path.join(checkpoint, \"rng_state.pth\")\n if not os.path.isfile(rng_file):\n logger.info(\n \"Didn't find an RNG file, if you are resuming a training that was launched in a distributed \"\n \"fashion, reproducibility is not guaranteed.\"\n )\n return\n\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n if torch.cuda.is_available():\n if self.args.local_rank != -1:\n torch.cuda.random.set_rng_state(checkpoint_rng_state[\"cuda\"])\n else:\n try:\n torch.cuda.random.set_rng_state_all(checkpoint_rng_state[\"cuda\"])\n except Exception as e:\n logger.info(\n f\"Didn't manage to set back the RNG states of the GPU because of the following error:\\n {e}\"\n \"\\nThis won't yield the same results as if the training had not been interrupted.\"\n )\n if is_torch_tpu_available():\n xm.set_rng_state(checkpoint_rng_state[\"xla\"])\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 630, "n_words": 152, "vocab_size": 92, "complexity": 10, "nloc": 37, "token_counts": 226, "n_ast_nodes": 402, "n_identifiers": 28, "random_cut": "def _load_rng_state(self, checkpoint):\n # Load RNG states from `checkpoint`\n if checkpoint is None:\n return\n\n local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank\n if local_rank != -1:\n rng_file = os.path.join(checkpoint, f\"rng_state_{local_rank}.pth\")\n if not os.path.isfile(os.path.join(checkpoint, rng_file)):\n logger.info(\n f\"Didn't find an RNG file for process {local_rank}, if you are resuming a training that \"\n \"wasn't launched in a distributed fashion, reproducibility is not guaranteed.\"\n )\n return\n else:\n rng_file = os.path.join(checkpoint, \"rng_state.pth\")\n if not os.path.isfile(rng_file):\n logger.info(\n \"Didn't find an RNG file, if you are resuming a training that was launched in a distributed \"\n \"fashion, reproducibility is not guaranteed.\"\n )\n return\n\n checkpoint_rng_state = torch.load(rng_file)\n random.setstate(checkpoint_rng_state[\"python\"])\n np.random.set_state(checkpoint_rng_state[\"numpy\"])\n torch.random.set_rng_state(checkpoint_rng_state[\"cpu\"])\n if torch.cuda.is_available():\n if self.args.local_rank != -1:\n torch.cuda.random.set_rng_state(checkpoint_rng_state[\"cuda\"])\n else:\n " }, { "id": 75022, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/__init__.py", "file_name": "__init__.py", "fun_name": "get_image_model", "commit_message": "Reformat with black", "code": "def get_image_model():\n \n from django.apps import apps\n\n model_string = get_image_model_string()\n try:\n return apps.get_model(model_string, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"WAGTAILIMAGES_IMAGE_MODEL must be of the form 'app_label.model_name'\"\n )\n except LookupError:\n raise ImproperlyConfigured(\n \"WAGTAILIMAGES_IMAGE_MODEL refers to model '%s' that has not been installed\"\n % model_string\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 128, "n_words": 42, "vocab_size": 36, "complexity": 3, "nloc": 14, "token_counts": 47, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def get_image_model():\n \n from django.apps import apps\n\n model_string = get_image_model_string()\n try:\n return apps.get_model(model_string, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"WAGTAILIMAGES_IMAGE_MODEL must be of the form 'app_label.model_name'\"\n )\n except LookupError:\n raise I" }, { "id": 266768, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/config.py", "file_name": "config.py", "fun_name": "only_targets", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig]\n \n if not self.targets:\n raise Exception('There must be one or more targets.')\n\n assert type_guard(self.targets, target_type)\n\n return t.cast(t.List[THostConfig], self.targets)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 25, "vocab_size": 25, "complexity": 2, "nloc": 5, "token_counts": 44, "n_ast_nodes": 72, "n_identifiers": 10, "random_cut": "def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig]\n \n if not self.targets:\n raise Exception('There must be" }, { "id": 193916, "commit_id": "4d4711d970f5cbd0a9e1adb465dca2703c8efbfd", "repo": "vision", "path": "test/prototype_transforms_kernel_infos.py", "file_name": "prototype_transforms_kernel_infos.py", "fun_name": "sample_inputs_rotate_bounding_box", "commit_message": "[prototype] Switch to `spatial_size` (#6736)\n\n* Change `image_size` to `spatial_size`\r\n\r\n* Fix linter\r\n\r\n* Fixing more tests.\r\n\r\n* Adding get_num_channels_video and get_spatial_size_* kernels for video, masks and bboxes.\r\n\r\n* Refactor get_spatial_size\r\n\r\n* Reduce the usage of `query_chw` where possible\r\n\r\n* Rename `query_chw` to `query_spatial_size`\r\n\r\n* Adding `get_num_frames` dispatcher and kernel.\r\n\r\n* Adding jit-scriptability tests", "code": "def sample_inputs_rotate_bounding_box():\n for bounding_box_loader in make_bounding_box_loaders():\n yield ArgsKwargs(\n bounding_box_loader,\n format=bounding_box_loader.format,\n spatial_size=bounding_box_loader.spatial_size,\n angle=_ROTATE_ANGLES[0],\n )\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 73, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 8, "token_counts": 36, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def sample_inputs_rotate_bounding_box():\n for bounding_box_loader in make_bounding_box_loaders():\n yield ArgsKwargs(\n bounding_box_loader,\n " }, { "id": 179364, "commit_id": "cc0cff893f9d7d472788adc2510c123967b384fe", "repo": "gradio", "path": "test/test_outputs.py", "file_name": "test_outputs.py", "fun_name": "test_as_component", "commit_message": "Format The Codebase\n- black formatting\n- isort formatting", "code": "def test_as_component(self):\n y = \"happy\"\n label_output = gr.outputs.Label()\n label = label_output.postprocess(y)\n self.assertDictEqual(label, {\"label\": \"happy\"})\n self.assertEqual(label_output.deserialize(y), y)\n self.assertEqual(label_output.deserialize(label), y)\n with tempfile.TemporaryDirectory() as tmpdir:\n to_save = label_output.save_flagged(tmpdir, \"label_output\", label, None)\n self.assertEqual(to_save, y)\n y = {3: 0.7, 1: 0.2, 0: 0.1}\n label_output = gr.outputs.Label()\n label = label_output.postprocess(y)\n self.assertDictEqual(\n label,\n {\n \"label\": 3,\n \"confidences\": [\n {\"label\": 3, \"confidence\": 0.7},\n {\"label\": 1, \"confidence\": 0.2},\n {\"label\": 0, \"confidence\": 0.1},\n ],\n },\n )\n label_output = gr.outputs.Label(num_top_classes=2)\n label = label_output.postprocess(y)\n self.assertDictEqual(\n label,\n {\n \"label\": 3,\n \"confidences\": [\n {\"label\": 3, \"confidence\": 0.7},\n {\"label\": 1, \"confidence\": 0.2},\n ],\n },\n )\n with self.assertRaises(ValueError):\n label_output.postprocess([1, 2, 3])\n\n with tempfile.TemporaryDirectory() as tmpdir:\n to_save = label_output.save_flagged(tmpdir, \"label_output\", label, None)\n self.assertEqual(to_save, '{\"3\": 0.7, \"1\": 0.2}')\n self.assertEqual(\n label_output.restore_flagged(tmpdir, to_save, None),\n {\n \"label\": \"3\",\n \"confidences\": [\n {\"label\": \"3\", \"confidence\": 0.7},\n {\"label\": \"1\", \"confidence\": 0.2},\n ],\n },\n )\n with self.assertRaises(ValueError):\n label_output = gr.outputs.Label(type=\"unknown\")\n label_output.deserialize([1, 2, 3])\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 772, "n_words": 138, "vocab_size": 62, "complexity": 1, "nloc": 54, "token_counts": 385, "n_ast_nodes": 606, "n_identifiers": 22, "random_cut": "def test_as_component(self):\n y = \"happy\"\n label_output = gr.outputs.Label()\n label = label_output.postprocess(y)\n self.assertDictEqual(label, {\"label\": \"happy\"})\n self.assertEqual(label_output.deserialize(y), y)\n" }, { "id": 58319, "commit_id": "2649fa325433aa219d6569ed77ef018f79480479", "repo": "prefect", "path": "tests/orion/api/test_work_queues.py", "file_name": "test_work_queues.py", "fun_name": "scheduled_flow_runs", "commit_message": "Add work queue backend", "code": "async def scheduled_flow_runs(self, session, deployment, work_queue, work_queue_2):\n for i in range(3):\n for wq in [work_queue, work_queue_2]:\n await models.flow_runs.create_flow_run(\n session=session,\n flow_run=schemas.core.FlowRun(\n flow_id=deployment.flow_id,\n deployment_id=deployment.id,\n work_queue_name=wq.name,\n state=schemas.states.State(\n type=\"SCHEDULED\",\n timestamp=pendulum.now(\"UTC\").add(minutes=i),\n state_details=dict(\n scheduled_time=pendulum.now(\"UTC\").add(minutes=i)\n ),\n ),\n ),\n )\n await session.commit()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 27, "n_whitespaces": 399, "n_words": 34, "vocab_size": 29, "complexity": 3, "nloc": 19, "token_counts": 127, "n_ast_nodes": 193, "n_identifiers": 34, "random_cut": "async def scheduled_flow_runs(self, session, deployment, work_queue, work_queue_2):\n for i in range(3):\n for wq in [work_queue, work_queue_2]:\n await models.flow_runs.create_flow_run(\n session=session,\n flow_run=schemas.core.FlowRun(\n flow_id=deployment.flow_id,\n deployment_id=deployment.id,\n work_queue_name=wq.name,\n state=schemas.states.State(\n type=\"SCHEDULED\",\n timestamp=pendulum.now(\"UTC\").add(minutes=i),\n state_details=dict(\n scheduled_time=pendulum.now(\"UTC\").add(minutes=i)\n ),\n ),\n ),\n " }, { "id": 164112, "commit_id": "f46df091df3afea25a273f491d1f6b2c7d20b32c", "repo": "pandas", "path": "pandas/tests/io/xml/test_to_xml.py", "file_name": "test_to_xml.py", "fun_name": "test_attrs_cols_prefix", "commit_message": "TST: Remove unused fixtures (#45692)\n\n* TST: Remove unused fixtures\r\n\r\n* Undo a removed fixture\r\n\r\n* Add back other fixtures\r\n\r\n* Undo a file\r\n\r\n* Try undoing this?\r\n\r\n* Revert \"Try undoing this?\"\r\n\r\nThis reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.", "code": "def test_attrs_cols_prefix(parser):\n expected = \n\n output = geom_df.to_xml(\n attr_cols=[\"index\", \"shape\", \"degrees\", \"sides\"],\n namespaces={\"doc\": \"http://example.xom\"},\n prefix=\"doc\",\n parser=parser,\n )\n output = equalize_decl(output)\n\n assert output == expected\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 66, "n_words": 23, "vocab_size": 18, "complexity": 1, "nloc": 19, "token_counts": 53, "n_ast_nodes": 97, "n_identifiers": 10, "random_cut": "def test_attrs_cols_prefix(parser):\n expected = \n\n output = geom_df.to_xml(\n attr_cols=[\"index\", \"shape\", \"degrees\", \"sides\"],\n namespaces={\"doc\": \"http://example.xom\"},\n prefix=\"doc\",\n parser=p" }, { "id": 263295, "commit_id": "64ccb7aea824fbec57f7ed1bbe483ec486183c13", "repo": "pyinstaller", "path": "bootloader/waflib/Tools/c_preproc.py", "file_name": "c_preproc.py", "fun_name": "filter_comments", "commit_message": "Bootloader: Building: Unpack waf's lib archive.\n\nDoing so makes it easier to modify. This is a temporary measure until the next\nwaf version is released (although I'm tempted to keep it since it's much more\nIDE completion friendly).", "code": "def filter_comments(self, node):\n code = node.read()\n if use_trigraphs:\n for (a, b) in trig_def:\n code = code.split(a).join(b)\n code = re_nl.sub('', code)\n code = re_cpp.sub(repl, code)\n return re_lines.findall(code)\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 86, "n_words": 26, "vocab_size": 19, "complexity": 3, "nloc": 8, "token_counts": 66, "n_ast_nodes": 104, "n_identifiers": 17, "random_cut": "def filter_comments(self, node):\n c" }, { "id": 8454, "commit_id": "4d2d81f9fdefc52eea6a9bf0826a6f2ffc8d681b", "repo": "ludwig", "path": "tests/integration_tests/test_mlflow.py", "file_name": "test_mlflow.py", "fun_name": "test_export_mlflow_local", "commit_message": "Config Object (#2426)\n\n* Fixed loss instances across features\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed binary OneOfImplementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix custom loss components\r\n\r\n* Fix gbm category\r\n\r\n* Remove config object code, out of scope\r\n\r\n* Fixed more tests\r\n\r\n* Fixed incorrect text preproc default, added clip to category feature level\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixes additional tests\r\n\r\n* Cache jsonschema validator to reduce memory pressure\r\n\r\n* Fix imports\r\n\r\n* Skip neuropod test\r\n\r\n* Added upgrade audio to default preproc back compat and cleaned up\r\n\r\n* Small nits\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Change backfill constant for audio\r\n\r\n* Add docstring to compute feature hash\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Unused import\r\n\r\n* Another backfill constant change\r\n\r\n* Unused import\r\n\r\n* remove default population functions\r\n\r\n* Added config object test\r\n\r\n* rewired build_inputs\r\n\r\n* rewired combiner in ecd, added logic to config object\r\n\r\n* Refactored ecd.py\r\n\r\n* Fixing up merge_with_defaults, need metadata changes in master\r\n\r\n* Refactored defaults section and mega upgraded config obj\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed some formatting\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed feature col, proc col, and render config from defaults.py\r\n\r\n* Fix duplicate import\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added config initializer to merge defaults flow\r\n\r\n* Refactored update_config_with_metadata\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added dict conversion method to config object and refactored merge config function in config_utils\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Refactored until preproc entrypoint\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed update_config_with_metadata\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Removed load config base feature method - no longer necessary\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Formatting\r\n\r\n* Fixed input size assignment\r\n\r\n* Temp fix\r\n\r\n* Fixed pretrained encoder path referencing temp until preproc refactor\r\n\r\n* Solved the WORST BUG EVER\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Switch reduce_input to None for sequence tagger\r\n\r\n* Fixed another one\r\n\r\n* Fixed typo\r\n\r\n* Various test fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed excess defaults params issue\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Minor fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed some defaults tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* Formatting\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* More test fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed defaults tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix more tests\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix more tests\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fixing ghost tests attempt\r\n\r\n* Deep copy to smash the ghost failures\r\n\r\n* Copied top level modules now too\r\n\r\n* Started fixing hyperopt\r\n\r\n* Fixed Hyperopt Issues\r\n\r\n* Flake 8\r\n\r\n* Remove commented out code\r\n\r\n* Address Piero feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* Removed merge with defaults\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed various issues with preprocessing and splitting positioning\r\n\r\n* Fixed hyperopt issues\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Refactored api pipeline to use all config obj references\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix more tests\r\n\r\n* Fixed auto tune learning rate and batch size\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed sequence feature tests\r\n\r\n* Fixed image feature test\r\n\r\n* Fixed last test\r\n\r\n* flake 8\r\n\r\n* Marshmallowify Config object, remove manual to dict method, add Factory method constructors\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Validate config within config object\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* All Travis feedback addressed\r\n\r\n* Using all new constructors now\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* removed from class attributes\r\n\r\n* Added deep copies back and piped repr inheritance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Format\r\n\r\n* Small error fix, moved back compat into Config Object\r\n\r\n* Flake8\r\n\r\n* Docstring for hyperopt defaults method\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address Joppe feedback\r\n\r\n* Revert \"Address Joppe feedback\"\r\n\r\nThis reverts commit 42f1665ef917d062a010550bb960594c355285ff.\r\n\r\n* Fix tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake8\r\n\r\n* fix test\r\n\r\n* Small improvement\r\n\r\n* Changed repr for input features, added feature enabling/disabling\r\n\r\n* Added feature enabling/disabling, and better reprs for SDK dev\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* Added rich to requirements.txt\r\n\r\n* Add some more CO tests and comment more on CO code\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix explain issue\r\n\r\n* Julian feedback\r\n\r\n* Added TODOs for future refactor PRs\r\n\r\n* Fix explain test failure, test shared state improvement and bug fix, remove unncessary code from convert_submodules\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* implement Daniel's feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix residual errors\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Error fix\r\n\r\n* Using mixins now so no loose attributes on defaults, fixed height width schema restrictions\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Removed unnecessary filtering from defaults schema logic\r\n\r\n* Piero's simplification and cleanup\r\n\r\n* Flake 8\r\n\r\n* Fix test and update docstrings from Pieros change\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address most of Justin's feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix tests and more feedback implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Renamed files to correspond to ModelConfig class name\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Missing constant import\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed incorrect merge conflict resolution\r\n\r\n* Flake8\r\n\r\n* Fix remaining tests (except old models training from trainer type removal)\r\n\r\n* Fixed old models not validating trainer type\r\n\r\n* Add output_feature=False to test_hyperopt_ray.py\r\n\r\n* Implement Kabir's feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Travis Addair \r\nCo-authored-by: w4nderlust ", "code": "def test_export_mlflow_local(tmpdir):\n epochs = 2\n batch_size = 8\n num_examples = 32\n\n input_features = [sequence_feature(reduce_output=\"sum\")]\n output_features = [category_feature(vocab_size=2, reduce_input=\"sum\", output_feature=True)]\n\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": epochs, \"batch_size\": batch_size},\n }\n\n data_csv = generate_data(\n input_features, output_features, os.path.join(tmpdir, \"train.csv\"), num_examples=num_examples\n )\n\n exp_name = \"mlflow_test\"\n output_dir = os.path.join(tmpdir, \"output\")\n model = LudwigModel(config, backend=FakeRemoteBackend())\n _, _, output_directory = model.train(training_set=data_csv, experiment_name=exp_name, output_directory=output_dir)\n\n model_path = os.path.join(output_directory, \"model\")\n output_path = os.path.join(tmpdir, \"data/results/mlflow\")\n export_mlflow(model_path, output_path)\n assert set(os.listdir(output_path)) == {\"MLmodel\", \"model\", \"conda.yaml\"}\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 165, "n_words": 80, "vocab_size": 63, "complexity": 1, "nloc": 23, "token_counts": 198, "n_ast_nodes": 327, "n_identifiers": 36, "random_cut": "def test_export_mlflow_local(tmpdir):\n epochs = 2\n batch_size = 8\n num_examples = 32\n\n input_features = [sequence_feature(reduce_output=\"sum\")]\n output_features = [category_feature(vocab_size=2, reduce_input=\"sum\", output_feature=True)]\n\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": epochs, \"batch_size\": batch_size},\n }\n\n data_csv = generate_data(\n " }, { "id": 48674, "commit_id": "c10f2266222c434485889b08cc1463acdb8fa169", "repo": "django-rest-framework", "path": "rest_framework/utils/encoders.py", "file_name": "encoders.py", "fun_name": "default", "commit_message": "Refactor: Replace try/except with contextlib.suppress() (#8676)", "code": "def default(self, obj):\n # For Date Time string spec, see ECMA 262\n # https://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15\n if isinstance(obj, Promise):\n return force_str(obj)\n elif isinstance(obj, datetime.datetime):\n representation = obj.isoformat()\n if representation.endswith('+00:00'):\n representation = representation[:-6] + 'Z'\n return representation\n elif isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, datetime.time):\n if timezone and timezone.is_aware(obj):\n raise ValueError(\"JSON can't represent timezone-aware times.\")\n representation = obj.isoformat()\n return representation\n elif isinstance(obj, datetime.timedelta):\n return str(obj.total_seconds())\n elif isinstance(obj, decimal.Decimal):\n # Serializers will coerce decimals to strings by default.\n return float(obj)\n elif isinstance(obj, uuid.UUID):\n return str(obj)\n elif isinstance(obj, QuerySet):\n return tuple(obj)\n elif isinstance(obj, bytes):\n # Best-effort for binary blobs. See #4187.\n return obj.decode()\n elif hasattr(obj, 'tolist'):\n # Numpy arrays and array scalars.\n return obj.tolist()\n elif (coreapi is not None) and isinstance(obj, (coreapi.Document, coreapi.Error)):\n raise RuntimeError(\n 'Cannot return a coreapi object from a JSON view. '\n 'You should be using a schema renderer instead for this view.'\n )\n elif hasattr(obj, '__getitem__'):\n cls = (list if isinstance(obj, (list, tuple)) else dict)\n with contextlib.suppress(Exception):\n return cls(obj)\n elif hasattr(obj, '__iter__'):\n return tuple(item for item in obj)\n return super().default(obj)\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 597, "n_words": 169, "vocab_size": 110, "complexity": 20, "nloc": 39, "token_counts": 291, "n_ast_nodes": 473, "n_identifiers": 41, "random_cut": "def default(self, obj):\n # For Date Time string spec, see ECMA 262\n # https://ecma-international.org/ecma-262/5.1/#sec-15.9.1.15\n if isinstance(obj, Promise):\n return force_str(obj)\n elif isinstance(obj, datetime.datetime):\n representation = obj.isoformat()\n if representation.endswith('+00:00'):\n representation = representation[:-6] + 'Z'\n return representation\n elif isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, datetime.time):\n if timezone and timezone.is_aware(obj):\n raise ValueError(\"JSON can't represent timezone-aware times.\")\n representation = obj.isoformat()\n return represen" }, { "id": 144837, "commit_id": "610930ae6aeafb37be75851a8c1b9ff39d5f7d22", "repo": "ray", "path": "python/ray/serve/tests/test_healthcheck.py", "file_name": "test_healthcheck.py", "fun_name": "test_user_defined_method_fails", "commit_message": "[serve] Improve health check failure semantics (#22297)", "code": "def test_user_defined_method_fails(serve_instance):\n Patient.deploy()\n h = Patient.get_handle()\n actor = ray.get(h.remote())\n ray.get(h.set_should_fail.remote())\n\n wait_for_condition(check_new_actor_started, handle=h, original_actors=actor)\n ray.get([h.remote() for _ in range(100)])\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 7, "token_counts": 72, "n_ast_nodes": 117, "n_identifiers": 17, "random_cut": "def test_user_defined_method_fails(serve_instance):\n Patient.deploy()\n h = Patient.get_handle()\n actor = ray.get(h.remote())\n ray.get(h.set_should_fail.remote())\n\n wait_fo" }, { "id": 61436, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/wheel_builder.py", "file_name": "wheel_builder.py", "fun_name": "_clean_one_legacy", "commit_message": "upd; format", "code": "def _clean_one_legacy(req, global_options):\n # type: (InstallRequirement, List[str]) -> bool\n clean_args = make_setuptools_clean_args(\n req.setup_py_path,\n global_options=global_options,\n )\n\n logger.info('Running setup.py clean for %s', req.name)\n try:\n call_subprocess(clean_args, cwd=req.source_dir)\n return True\n except Exception:\n logger.error('Failed cleaning build dir for %s', req.name)\n return False\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 96, "n_words": 37, "vocab_size": 33, "complexity": 2, "nloc": 12, "token_counts": 59, "n_ast_nodes": 95, "n_identifiers": 14, "random_cut": "def _clean_one_legacy(req, global_options):\n # type: (InstallRequirement, List[str]) -> bool\n clean_args = make_setuptools_clean_args(\n req.setup_py_path,\n global_options=global_options,\n )\n\n lo" }, { "id": 137364, "commit_id": "81237e05838757dde196688a20631daad48010dd", "repo": "ray", "path": "python/ray/train/tests/test_huggingface_gpu.py", "file_name": "test_huggingface_gpu.py", "fun_name": "create_checkpoint", "commit_message": "[Train] `HuggingFacePredictor` & docs improvements (#30945)\n\nThis PR introduces 2 changes:\r\n\r\nRemoves a confusing suggestion in the docstring of HuggingFaceCheckpoint - checkpoints created using from_checkpoint will not work for prediction as intended.\r\nAdds use_gpu argument and logic to automatically use GPU if one is available to HuggingFacePredictor.\r\n\r\nSigned-off-by: Antoni Baum ", "code": "def create_checkpoint():\n with tempfile.TemporaryDirectory() as tmpdir:\n model_config = AutoConfig.from_pretrained(model_checkpoint)\n model = AutoModelForCausalLM.from_config(model_config)\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)\n checkpoint = HuggingFaceCheckpoint.from_model(model, tokenizer, path=tmpdir)\n # Serialize to dict so we can remove the temporary directory\n return HuggingFaceCheckpoint.from_dict(checkpoint.to_dict())\n\n\n# TODO(ml-team): Add np.ndarray to batch_type\n@pytest.mark.parametrize(\"batch_type\", [pd.DataFrame])\n@pytest.mark.parametrize(\"device\", [None, 0])", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"batch_type\", [pd.DataFrame])\n@pytest.mark.parametrize(\"device\", [None, 0])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 85, "n_words": 44, "vocab_size": 39, "complexity": 1, "nloc": 7, "token_counts": 62, "n_ast_nodes": 153, "n_identifiers": 25, "random_cut": "def create_checkpoint():\n with tempfile.TemporaryDirectory() as tmpdir:\n model_config = AutoConfig.from_pretrained(model_checkpoint)\n model = AutoModelForCausalLM.from_c" }, { "id": 91455, "commit_id": "284e980df0018f8baee659999268bdd4c7d08255", "repo": "sentry", "path": "tests/sentry/search/events/test_builder.py", "file_name": "test_builder.py", "fun_name": "test_limit_validation", "commit_message": "ref: replace self.assertRaises with pytest.raises (#35685)\n\n* add flake8 plugin to detect assertRaises\r\n\r\n* ref: replace self.assertRaises with pytest.raises\r\n\r\n* non-sed fixes", "code": "def test_limit_validation(self):\n # 51 is ok\n MetricsQueryBuilder(self.params, limit=51)\n # None is ok, defaults to 50\n query = MetricsQueryBuilder(self.params)\n assert query.limit.limit == 50\n # anything higher should throw an error\n with pytest.raises(IncompatibleMetricsQuery):\n MetricsQueryBuilder(self.params, limit=10_000)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 92, "n_words": 33, "vocab_size": 28, "complexity": 1, "nloc": 6, "token_counts": 49, "n_ast_nodes": 82, "n_identifiers": 9, "random_cut": "def test_limit_validation(self):\n # 51 is ok\n MetricsQueryBuilder(self.params, limit=51)\n # None is ok, defaults to 50\n query = MetricsQueryBuilder(self.params)\n assert query.limit.limit == 50\n # anything higher should throw an error\n with pytest.raises(IncompatibleMe" }, { "id": 58882, "commit_id": "c02383e4a879c95586cfbc19787904da2d4be22b", "repo": "prefect", "path": "tests/infrastructure/test_docker_container.py", "file_name": "test_docker_container.py", "fun_name": "test_adds_docker_host_gateway_on_linux", "commit_message": "Update default infrastructure command to be set at runtime\n\nAdd commands to Docker container tests with no command", "code": "def test_adds_docker_host_gateway_on_linux(mock_docker_client, monkeypatch):\n monkeypatch.setattr(\"sys.platform\", \"linux\")\n\n DockerContainer(\n command=[\"echo\", \"hello\"],\n ).run()\n\n mock_docker_client.containers.create.assert_called_once()\n call_extra_hosts = mock_docker_client.containers.create.call_args[1].get(\n \"extra_hosts\"\n )\n assert call_extra_hosts == {\"host.docker.internal\": \"host-gateway\"}\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 10, "token_counts": 64, "n_ast_nodes": 113, "n_identifiers": 13, "random_cut": "def test_adds_docker_host_gateway_on_linux(mock_docker_client, monkeypatch):\n monkeypatch.setattr(\"sys.platform\", \"linux\")\n\n DockerContainer(\n command=[\"echo\", \"hello\"],\n ).run()\n\n mock_docker_client.containers.create.assert_called_once()\n call_extra_hosts = mock_docker_client.containers.create.call_args[1].get(\n \"extra_host" }, { "id": 155050, "commit_id": "11ba4811e6db11740e11fd33d3cdfba8ce5bec54", "repo": "modin", "path": "modin/pandas/indexing.py", "file_name": "indexing.py", "fun_name": "_set_item_existing_loc", "commit_message": "FIX-#3764: Ensure df.loc with a scalar out of bounds appends to df (#3765)\n\nCo-authored-by: Devin Petersohn \r\nCo-authored-by: Bill Wang \r\nCo-authored-by: Vasily Litvinov ", "code": "def _set_item_existing_loc(self, row_loc, col_loc, item):\n \n row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)\n self._setitem_positional(\n row_lookup,\n col_lookup,\n item,\n axis=self._determine_setitem_axis(\n row_lookup, col_lookup, is_scalar(row_loc), is_scalar(col_loc)\n ),\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 119, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 10, "token_counts": 56, "n_ast_nodes": 81, "n_identifiers": 12, "random_cut": "def _set_item_existing_loc(self, row_loc, col_loc, item):\n \n row_lookup, col_lookup = self._compute_lookup(row_loc, col_loc)\n self._setitem_positional(\n row_lookup,\n " }, { "id": 114412, "commit_id": "76a30708e24bca37169df44d8b31573c7b5beb43", "repo": "mindsdb", "path": "mindsdb/integrations/mysql_handler/mysql_handler/mysql_handler.py", "file_name": "mysql_handler.py", "fun_name": "check_status", "commit_message": "test: move testing logic into unittest modules; CI still pending", "code": "def check_status(self):\n try:\n return self.connection.is_connected()\n except Exception:\n return False\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 5, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def check_status(self):\n try:\n return self.connection.is_connected()\n except Exception:\n return Fals" }, { "id": 72515, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/pages/moderation.py", "file_name": "moderation.py", "fun_name": "reject_moderation", "commit_message": "Reformat with black", "code": "def reject_moderation(request, revision_id):\n revision = get_object_or_404(PageRevision, id=revision_id)\n if not revision.page.permissions_for_user(request.user).can_publish():\n raise PermissionDenied\n\n if not revision.submitted_for_moderation:\n messages.error(\n request,\n _(\"The page '{0}' is not currently awaiting moderation.\").format(\n revision.page.specific_deferred.get_admin_display_title()\n ),\n )\n return redirect(\"wagtailadmin_home\")\n\n if request.method == \"POST\":\n revision.reject_moderation(user=request.user)\n\n messages.success(\n request,\n _(\"Page '{0}' rejected for publication.\").format(\n revision.page.specific_deferred.get_admin_display_title()\n ),\n buttons=[\n messages.button(\n reverse(\"wagtailadmin_pages:edit\", args=(revision.page.id,)),\n _(\"Edit\"),\n )\n ],\n )\n\n if not send_moderation_notification(revision, \"rejected\", request.user):\n messages.error(request, _(\"Failed to send rejection notifications\"))\n\n return redirect(\"wagtailadmin_home\")\n\n\n@require_GET", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "@require_GET", "n_ast_errors": 1, "ast_levels": 19, "n_whitespaces": 332, "n_words": 66, "vocab_size": 52, "complexity": 5, "nloc": 29, "token_counts": 174, "n_ast_nodes": 290, "n_identifiers": 28, "random_cut": "def reject_moderation(request, revision_id):\n revision = get_object_or_404(PageRevision, id=revision_id)\n if not revision.page.permissions_for_user(request.user).can_publish():\n raise PermissionDenied\n\n if not revision.submitted_for_moderation:\n messages.error(\n request,\n _(\"The page '{0}' is not currently awaiting moderation.\").format(\n revision.page.specific_deferred.get_admin_display_title()\n ),\n )\n return redirect(\"wagtailadmin_home\")\n\n if request.method == \"POST\":\n revision.reject_moderation(user=request.user)\n\n messages.success(\n request,\n _(\"Page '{0}' rejected for publication.\").format(\n revision.page.specific_deferred.get_admin_display_title()\n ),\n buttons=[\n messages.button(\n reverse(\"wagtailadmin_pages:edit\", args=(revision.page.id,)),\n _(\"Edit\"),\n )\n ],\n )\n\n if not send_moderation_notification(revision, \"rejected\", request.user):\n messages.error(request, _(\"Failed to send rejection notifications\"))\n\n return redirect(\"wagtailadmin_home\")" }, { "id": 296840, "commit_id": "a2c74b978664b627bafc4a43b26aa2be7b15b229", "repo": "core", "path": "tests/components/calendar/test_trigger.py", "file_name": "test_trigger.py", "fun_name": "test_event_payload", "commit_message": "Add initial implementation of a calendar trigger (#68674)\n\n* Add initial implementation of calendar trigger\r\n\r\nThis is an initial implementation of a calendar trigger, that supports\r\ntriggering on calendar start time.\r\n\r\nSee architecture proposal in:\r\nhttps://github.com/home-assistant/architecture/discussions/700\r\n\r\n* Address reviewer feedback\r\n\r\n* Use f-strings for all tests\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Remove logging f-strings, and move to main code\r\n\r\n* Remove mypy ignore\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Update calendar triggers to use new calendar data model\r\n\r\n* Update tests/components/calendar/test_trigger.py\r\n\r\nCo-authored-by: Franck Nijhof \r\n\r\n* Rewrite tests using freezegun\r\n\r\nRewrite tests using freezegun and improve edge case handling, and use utc consistently for all alarms.\r\n\r\n* Update homeassistant/components/calendar/trigger.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Update homeassistant/components/calendar/trigger.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Increase test coverage based on pr feedback\r\n\r\nCo-authored-by: Martin Hjelmare \r\nCo-authored-by: Franck Nijhof ", "code": "async def test_event_payload(hass, calls, fake_schedule):\n \n event_data = fake_schedule.create_event(\n start=datetime.datetime.fromisoformat(\"2022-04-19 11:00:00+00:00\"),\n end=datetime.datetime.fromisoformat(\"2022-04-19 11:30:00+00:00\"),\n description=\"Description\",\n location=\"Location\",\n )\n await create_automation(hass, EVENT_START)\n assert len(calls()) == 0\n\n await fake_schedule.fire_until(\n datetime.datetime.fromisoformat(\"2022-04-19 11:15:00+00:00\")\n )\n assert calls() == [\n {\n \"platform\": \"calendar\",\n \"event\": EVENT_START,\n \"calendar_event\": event_data,\n }\n ]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 149, "n_words": 40, "vocab_size": 36, "complexity": 1, "nloc": 19, "token_counts": 98, "n_ast_nodes": 169, "n_identifiers": 16, "random_cut": "async def test_event_payload(hass, calls, fake_schedule):\n \n event_data = fake_schedule.create_event(\n start=datetime.datetime.fromisoformat(\"2022-04-19 11:00:00+00:00\"),\n end=datetime.datetime.fromisoformat(\"2022-04-19 11:30:00+00:00\"),\n description=\"Description\",\n location=\"Location\",\n )\n await create_automation(hass, EVENT_START)\n assert len(calls()) == 0\n\n await fake_schedule.fire_until(\n datetime.datetime.fromisoformat(\"2022-04-19 11:15:00+00:00\")\n )\n assert calls() == [\n {\n \"platform\": " }, { "id": 1861, "commit_id": "b480217f5bc07d97a691bfed74eb7489667788dd", "repo": "PySyft", "path": "packages/syft/src/syft/core/tensor/nn/model.py", "file_name": "model.py", "fun_name": "publish", "commit_message": "update domain update script to add branch name during hagrid launch\nadd loss to parameter list in model publish\nprint loss during model training", "code": "def publish(self, deduct_epsilon_for_user, get_budget_for_user, ledger, sigma):\n print(\"Publish Model Weights\")\n # relative\n from ..autodp.gamma_tensor import GammaTensor\n\n parameters = {}\n for i, layer in enumerate(self.layers):\n print(\"Layer\", str(layer))\n\n print(\"Before Publish\")\n for param in layer.params:\n print(param.shape, end=\" \")\n print()\n if hasattr(layer, \"params\"):\n parameters[str(layer) + str(i)] = [\n param.publish(\n deduct_epsilon_for_user=deduct_epsilon_for_user,\n get_budget_for_user=get_budget_for_user,\n ledger=ledger,\n sigma=sigma,\n )\n if isinstance(param, (GammaTensor))\n else param\n for param in layer.params\n ]\n print(\"After Publish\")\n for param in parameters[str(layer) + str(i)]:\n print(param.shape, end=\" \")\n print()\n\n parameters[\"loss\"] = self.aggregated_loss\n\n return parameters\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 477, "n_words": 76, "vocab_size": 56, "complexity": 7, "nloc": 28, "token_counts": 178, "n_ast_nodes": 285, "n_identifiers": 23, "random_cut": "def publish(self, deduct_epsilon_for_user, get_budget_for_user, ledger, sigma):\n print(\"Publish Model Weights\")\n # relative\n from ..autodp.gamma_tensor import GammaTensor\n\n parameters = {}\n for i, layer in enumerate(self.layers):\n print(\"Layer\", str(layer))\n\n print(\"Before Publish\")\n for param in layer.params:\n print(param.shape, end=\" \")\n print()\n if hasattr(layer, \"params\"):\n parameters[str(layer" }, { "id": 47749, "commit_id": "c3d883a971a8e4e65ccc774891928daaaa0f4442", "repo": "airflow", "path": "kubernetes_tests/test_kubernetes_pod_operator_backcompat.py", "file_name": "test_kubernetes_pod_operator_backcompat.py", "fun_name": "test_volume_mount", "commit_message": "KubernetesPodOperator should patch \"already checked\" always (#22734)\n\nWhen not configured to delete pods, at end of task execution the current behavior is to patch the pod as \"already checked\", but only if pod not successful. We should also patch when successful so it isn't \"reattached\" to after a task clear.", "code": "def test_volume_mount(self):\n with patch.object(PodManager, 'log') as mock_logger:\n volume_mount = VolumeMount(\n 'test-volume', mount_path='/tmp/test_volume', sub_path=None, read_only=False\n )\n\n volume_config = {'persistentVolumeClaim': {'claimName': 'test-volume'}}\n volume = Volume(name='test-volume', configs=volume_config)\n args = [\n \"echo \\\"retrieved from mount\\\" > /tmp/test_volume/test.txt \"\n \"&& cat /tmp/test_volume/test.txt\"\n ]\n k = KubernetesPodOperator(\n namespace='default',\n image=\"ubuntu:16.04\",\n cmds=[\"bash\", \"-cx\"],\n arguments=args,\n labels={\"foo\": \"bar\"},\n volume_mounts=[volume_mount],\n volumes=[volume],\n is_delete_operator_pod=False,\n name=\"test\",\n task_id=\"task\",\n in_cluster=False,\n do_xcom_push=False,\n )\n context = create_context(k)\n k.execute(context=context)\n mock_logger.info.assert_any_call('retrieved from mount')\n actual_pod = self.api_client.sanitize_for_serialization(k.pod)\n expected_pod = copy(self.expected_pod)\n expected_pod['spec']['containers'][0]['args'] = args\n expected_pod['spec']['containers'][0]['volumeMounts'] = [\n {'name': 'test-volume', 'mountPath': '/tmp/test_volume', 'readOnly': False}\n ]\n expected_pod['spec']['volumes'] = [\n {'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}}\n ]\n expected_pod['metadata']['labels']['already_checked'] = 'True'\n assert expected_pod == actual_pod\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 579, "n_words": 98, "vocab_size": 73, "complexity": 1, "nloc": 39, "token_counts": 254, "n_ast_nodes": 453, "n_identifiers": 41, "random_cut": "def test_volume_mount(self):\n with patch.object(PodManager, 'log') as mock_logger:\n volume_mount = VolumeMount(\n 'test-volume', mount_path='/tmp/test_volume', sub_path=None, read_only=False\n )\n\n volume_config = {'persistentVolumeClaim': {'claimName': 'test-volume'}}\n volume = Volume(name='test-volume', configs=volume_config)\n args = [\n \"echo \\\"retrieved from mount\\\" > /tmp/test_volume/test.txt \"\n \"&& cat /tmp/test_volume/test.txt\"\n ]\n k = KubernetesPodOperator(\n namespace='default',\n image=\"ubuntu:16.04\",\n cmds=[\"bash\", \"-cx\"],\n arguments=args,\n labels={\"foo\": \"bar\"},\n volume_mounts=[volume_mount],\n volumes=[volume],\n is_delete_operator_pod=False,\n name=\"test\",\n task_id=\"task\",\n in_cluster=False,\n do_xcom_push=False,\n )\n context = create_context(k)\n k.execute(context=context)\n mock_logger.info.assert_any_call('retrieved from mount')\n actual_pod = self.api_client.sanitize_for_serialization(k.pod)\n expected_pod = copy(self.expected_pod)\n expected_pod['spec']['containers'][0]['args'] = args\n " }, { "id": 253762, "commit_id": "f0be7e672bc0a7c77005d5c79452d796cfe1a06b", "repo": "d2l-en", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "forward", "commit_message": "Refactor Multihead Attn, Self Attn, and Transformer (#2096)\n\n* multihead attn\r\n\r\n* self attn and pos encoding\r\n\r\n* simplify\r\n\r\n* before EncoderBlock\r\n\r\n* before tmencoder\r\n\r\n* before decoder block\r\n\r\n* before training\r\n\r\n* transformer code\r\n\r\n* rm seq2seq encoder old\r\n\r\n* fix bahdanau attn map\r\n\r\n* transformer done, perf tuned\r\n\r\n* clean super", "code": "def forward(self, X, valid_lens):\n # Since positional encoding values are between -1 and 1, the embedding\n # values are multiplied by the square root of the embedding dimension\n # to rescale before they are summed up\n X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))\n self.attention_weights = [None] * len(self.blks)\n for i, blk in enumerate(self.blks):\n X = blk(X, valid_lens)\n self.attention_weights[\n i] = blk.attention.attention.attention_weights\n return X\n", "url": "https://github.com/d2l-ai/d2l-en.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 146, "n_words": 61, "vocab_size": 47, "complexity": 2, "nloc": 8, "token_counts": 81, "n_ast_nodes": 127, "n_identifiers": 16, "random_cut": "def forward(self, X, valid_lens):\n # Since positional encoding values are between -1 and 1, the embedding\n # values are multiplied by the square root of the embedding dimension\n # to rescale before they are summed up\n X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))\n self.attention_weights = [None] * len(self.blks)\n for i, blk in enumerate(self.blks):\n X = blk(X, valid_lens)\n self.attention_weights[\n i] = blk.attention.attention.attention_wei" }, { "id": 116152, "commit_id": "d304fa61c43e5248c0cb111d5553db653be92cff", "repo": "mindsdb", "path": "tests/unit/executor_test_base.py", "file_name": "executor_test_base.py", "fun_name": "clear_db", "commit_message": "executor base test", "code": "def clear_db(db):\n # drop\n db.Base.metadata.drop_all(db.engine)\n\n # create\n db.Base.metadata.create_all(db.engine)\n\n # fill with data\n r = db.Integration(name='files', data={}, engine='files')\n db.session.add(r)\n r = db.Integration(name='views', data={}, engine='views')\n db.session.add(r)\n db.session.commit()\n return db\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 103, "n_words": 27, "vocab_size": 21, "complexity": 1, "nloc": 9, "token_counts": 92, "n_ast_nodes": 155, "n_identifiers": 14, "random_cut": "def clear_db(db):\n # drop\n db.Base.metadata.drop_all(db.engine)\n\n # create\n db.Base.metadata.create_all(db.engine)\n\n # fill with data\n" }, { "id": 250972, "commit_id": "ef3f9e492e8f1d197ddab24bf5f80a76d2fe566d", "repo": "mitmproxy", "path": "mitmproxy/net/udp.py", "file_name": "udp.py", "fun_name": "resume_writing", "commit_message": "[dns] rewrite of udp, merge dnsserver>proxyserver", "code": "def resume_writing(self) -> None:\n assert self._paused > 0\n self._paused = self._paused - 1\n if self._paused == 0:\n self._can_write.set()\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 49, "n_words": 18, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 36, "n_ast_nodes": 58, "n_identifiers": 5, "random_cut": "def resume_writing(self) -> None:\n assert self._paused > 0\n self._paused = self._paused - 1\n if self._pa" }, { "id": 211333, "commit_id": "e55e41945d42db787a0f7c557d53d06a6b24536b", "repo": "PaddleDetection", "path": "configs/rotate/tools/slicebase.py", "file_name": "slicebase.py", "fun_name": "get_poly4_from_poly5", "commit_message": "Refactor rbox (#6704)\n\n* refactor rbox\r\n\r\n* modify the code of save results\r\n\r\n* fix some problem\r\n\r\n* add .gitignore in dataset/dota\r\n\r\n* fix test anno path", "code": "def get_poly4_from_poly5(self, poly):\n distances = [\n cal_line_length((poly[i * 2], poly[i * 2 + 1]),\n (poly[(i + 1) * 2], poly[(i + 1) * 2 + 1]))\n for i in range(int(len(poly) / 2 - 1))\n ]\n distances.append(\n cal_line_length((poly[0], poly[1]), (poly[8], poly[9])))\n pos = np.array(distances).argsort()[0]\n count = 0\n out_poly = []\n while count < 5:\n if (count == pos):\n out_poly.append(\n (poly[count * 2] + poly[(count * 2 + 2) % 10]) / 2)\n out_poly.append(\n (poly[(count * 2 + 1) % 10] + poly[(count * 2 + 3) % 10]) /\n 2)\n count = count + 1\n elif (count == (pos + 1) % 5):\n count = count + 1\n continue\n\n else:\n out_poly.append(poly[count * 2])\n out_poly.append(poly[count * 2 + 1])\n count = count + 1\n return out_poly\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 449, "n_words": 124, "vocab_size": 63, "complexity": 5, "nloc": 27, "token_counts": 258, "n_ast_nodes": 389, "n_identifiers": 16, "random_cut": "def get_poly4_from_poly5(self, poly):\n distances = [\n cal_line_length((poly[i * 2], poly[i * 2 + 1]),\n (poly[(i + 1) * 2], poly[(i + 1) * 2 + 1]))\n for i in range(int(len(poly) / 2 - 1))\n ]\n distances.append(\n cal_line_length((poly[0], poly[1]), (poly[8], poly[9])))\n pos = np.array(distances).argsort()[0]\n count = 0\n out_poly = []\n while count < 5:\n if (count == pos):\n out_poly.append(\n (poly[count * 2] + poly[(count * 2 + 2) % 10]) / 2)\n out_poly.append(\n (poly[(count * 2 + 1) % 10] + poly[(count * 2 + 3) % 10]) /\n 2)\n count = count + 1\n elif (count == (pos + 1) % 5):\n count = count + 1\n continue\n\n else" }, { "id": 260606, "commit_id": "84c6421a9067de7d1b54b7a6d8e21ce38e1f0eca", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_estimator_html_repr.py", "file_name": "test_estimator_html_repr.py", "fun_name": "test_invalid_parameters_in_stacking", "commit_message": "FIX Show a HTML repr for meta-estimatosr with invalid parameters (#24015)\n\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def test_invalid_parameters_in_stacking():\n \n stacker = StackingClassifier(estimators=[])\n\n html_output = estimator_html_repr(stacker)\n assert html.escape(str(stacker)) in html_output\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 9, "random_cut": "def test_invalid_parameters_in_stacking():\n \n stacker = StackingClassifier(estimators=[])\n\n html_output = estimator_html_rep" }, { "id": 17144, "commit_id": "c9b141d8b46d6bc771d9305e403440654bbe03b2", "repo": "ccxt", "path": "python/ccxt/coinex.py", "file_name": "coinex.py", "fun_name": "fetch_markets", "commit_message": "1.71.68\n\n[ci skip]", "code": "def fetch_markets(self, params={}):\n response = self.publicGetMarketInfo(params)\n #\n # {\n # \"code\": 0,\n # \"data\": {\n # \"WAVESBTC\": {\n # \"name\": \"WAVESBTC\",\n # \"min_amount\": \"1\",\n # \"maker_fee_rate\": \"0.001\",\n # \"taker_fee_rate\": \"0.001\",\n # \"pricing_name\": \"BTC\",\n # \"pricing_decimal\": 8,\n # \"trading_name\": \"WAVES\",\n # \"trading_decimal\": 8\n # }\n # }\n # }\n #\n markets = self.safe_value(response, 'data', {})\n result = []\n keys = list(markets.keys())\n for i in range(0, len(keys)):\n key = keys[i]\n market = markets[key]\n id = self.safe_string(market, 'name')\n tradingName = self.safe_string(market, 'trading_name')\n baseId = tradingName\n quoteId = self.safe_string(market, 'pricing_name')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n symbol = base + '/' + quote\n if tradingName == id:\n symbol = id\n result.append({\n 'id': id,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': None,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': None,\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'taker': self.safe_number(market, 'taker_fee_rate'),\n 'maker': self.safe_number(market, 'maker_fee_rate'),\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'price': self.safe_integer(market, 'pricing_decimal'),\n 'amount': self.safe_integer(market, 'trading_decimal'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.safe_number(market, 'min_amount'),\n 'max': None,\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1520, "n_words": 205, "vocab_size": 119, "complexity": 3, "nloc": 68, "token_counts": 352, "n_ast_nodes": 619, "n_identifiers": 27, "random_cut": "def fetch_markets(self, params={}):\n response = self.publicGetMarketInfo(params)\n #\n # {\n # \"code\": 0,\n # \"data\": {\n # \"WAVESBTC\": {\n # \"name\": \"WAVESBTC\",\n # \"min_amount\": \"1\",\n # \"maker_fee_rate\": \"0.001\",\n # \"taker_fee_rate\": \"0.001\",\n # \"pricing_name\": \"BTC\",\n # \"pricing_decimal\": 8,\n # \"trading_name\": \"WAVES\",\n # \"trading_decimal\": 8\n # }\n # }\n # }\n #\n markets = self.safe_value(response, 'data', {})\n result = []\n keys = list(markets.keys())\n for i in range(0, len(keys)):\n key = keys[i]\n market = markets[key]\n id = self.safe_string(market, 'name')\n tradingName = self.safe_string(market, 'trading_name')\n baseId = tradingName\n quoteId = self.safe_string(market, 'pricing_name')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n symbol = base + '/' + quote\n if tradingName == id:\n symbol = id\n result.append({\n 'id': id,\n 'symbol': symbol,\n " }, { "id": 33184, "commit_id": "de8548ebf3242305d0f9792dacb6f86b196a3a33", "repo": "transformers", "path": "tests/models/layoutlmv3/test_modeling_tf_layoutlmv3.py", "file_name": "test_modeling_tf_layoutlmv3.py", "fun_name": "_prepare_for_class", "commit_message": "[LayoutLMv3] Add TensorFlow implementation (#18678)\n\nCo-authored-by: Esben Toke Christensen \r\nCo-authored-by: Lasse Reedtz \r\nCo-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>\r\nCo-authored-by: Joao Gante ", "code": "def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:\n inputs_dict = copy.deepcopy(inputs_dict)\n\n if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):\n inputs_dict = {\n k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))\n if isinstance(v, tf.Tensor) and v.ndim > 0\n else v\n for k, v in inputs_dict.items()\n }\n\n if return_labels:\n if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):\n inputs_dict[\"labels\"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)\n elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING):\n inputs_dict[\"start_positions\"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)\n inputs_dict[\"end_positions\"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)\n elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):\n inputs_dict[\"labels\"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)\n elif model_class in get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING):\n inputs_dict[\"labels\"] = tf.zeros(\n (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.int32\n )\n\n return inputs_dict\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 348, "n_words": 86, "vocab_size": 55, "complexity": 10, "nloc": 22, "token_counts": 250, "n_ast_nodes": 383, "n_identifiers": 30, "random_cut": "def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:\n inputs_dict = copy.deepcopy(inputs_dict)\n\n if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):\n inputs_dict = {\n k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))\n if isinstance(v, tf.Tensor) and v.ndim > 0\n else v\n for k, v in inputs_dict.items()\n }\n\n if return_labels:\n if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING):\n inputs_dict[\"labels\"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)\n elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING):\n inputs_dict[\"start_positions\"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)\n inputs_dict[\"end_positions\"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)\n elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):\n inputs_dict[\"labels\"] = tf.zeros(self" }, { "id": 336014, "commit_id": "ba3c9a9a3a9cf76e4ff8292b66d7cc3206732627", "repo": "diffusers", "path": "tests/test_modeling_utils.py", "file_name": "test_modeling_utils.py", "fun_name": "test_score_sde_ve_pipeline", "commit_message": "[SDE] Merge to unconditional model (#89)\n\n* up\r\n\r\n* more\r\n\r\n* uP\r\n\r\n* make dummy test pass\r\n\r\n* save intermediate\r\n\r\n* p\r\n\r\n* p\r\n\r\n* finish\r\n\r\n* finish\r\n\r\n* finish", "code": "def test_score_sde_ve_pipeline(self):\n model = UNetUnconditionalModel.from_pretrained(\"fusing/ffhq_ncsnpp\", sde=True)\n\n torch.manual_seed(0)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(0)\n\n scheduler = ScoreSdeVeScheduler.from_config(\"fusing/ffhq_ncsnpp\")\n\n sde_ve = ScoreSdeVePipeline(model=model, scheduler=scheduler)\n\n torch.manual_seed(0)\n image = sde_ve(num_inference_steps=2)\n\n if model.device.type == \"cpu\":\n expected_image_sum = 3384805632.0\n expected_image_mean = 1076.000732421875\n else:\n expected_image_sum = 3382849024.0\n expected_image_mean = 1075.3787841796875\n\n assert (image.abs().sum() - expected_image_sum).abs().cpu().item() < 1e-2\n assert (image.abs().mean() - expected_image_mean).abs().cpu().item() < 1e-4\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 181, "n_words": 50, "vocab_size": 36, "complexity": 3, "nloc": 17, "token_counts": 165, "n_ast_nodes": 257, "n_identifiers": 27, "random_cut": "def test_score_sde_ve_pipeline(self):\n model = UNetUncondi" }, { "id": 268481, "commit_id": "76b746655a36807fa9198064ca9fe7c6cc00083a", "repo": "ansible", "path": "test/lib/ansible_test/_internal/host_profiles.py", "file_name": "host_profiles.py", "fun_name": "get_inventory_variables", "commit_message": "Add `use_rsa_sha2_algorithms` option for paramiko (#78789)\n\nFixes #76737\r\nFixes #77673\r\n\r\nCo-authored-by: Matt Clay ", "code": "def get_inventory_variables(self) -> dict[str, t.Optional[t.Union[str, int]]]:\n \n core_ci = self.wait_for_instance()\n connection = core_ci.connection\n\n variables: dict[str, t.Optional[t.Union[str, int]]] = dict(\n ansible_connection=self.config.connection,\n ansible_pipelining='yes',\n ansible_host=connection.hostname,\n ansible_port=connection.port,\n ansible_user=connection.username,\n ansible_ssh_private_key_file=core_ci.ssh_key.key,\n ansible_paramiko_use_rsa_sha2_algorithms='no',\n ansible_network_os=f'{self.config.collection}.{self.config.platform}' if self.config.collection else self.config.platform,\n )\n\n return variables\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 163, "n_words": 33, "vocab_size": 29, "complexity": 2, "nloc": 15, "token_counts": 122, "n_ast_nodes": 199, "n_identifiers": 28, "random_cut": "def get_inventory_variables(self) -> dict[str, t.Optional[t.Union[str, int]]]:\n \n core_ci = self.wait_for_instance()\n connection = core_ci.connection\n\n variables: dict[str, t.Optional[t.Union[str, int]]] = dict(\n ansible_connection=self.config.connection,\n ansible_pipelining='yes',\n ansible_host=connection.hostname,\n ansible_port=connection.port,\n ansible_user=connection.username,\n ansible_ssh_private_key_file=core_ci.ssh_key.key,\n ansible_paramiko_use_rsa_sha2_algorithms='no',\n ansible_network_os" }, { "id": 257559, "commit_id": "82df677ebf853340d331ff0868304cc958307ee0", "repo": "haystack", "path": "rest_api/test/test_rest_api.py", "file_name": "test_rest_api.py", "fun_name": "test_file_upload_with_wrong_meta", "commit_message": "API tests (#2738)\n\n* clean up tests and run earlier\r\n\r\n* use change detection\r\n\r\n* better naming, skip ES\r\n\r\n* more cleanup\r\n\r\n* fix job name\r\n\r\n* dummy commit to trigger the CI\r\n\r\n* mock away the PDF converter\r\n\r\n* make the test compatible with 3.7\r\n\r\n* removed leftover\r\n\r\n* always run the api tests, use a matrix for the OS\r\n\r\n* refactor all the tests\r\n\r\n* remove outdated dependency\r\n\r\n* pylint\r\n\r\n* new abstract method\r\n\r\n* adjust for older python versions\r\n\r\n* rename pipeline file\r\n\r\n* address PR comments", "code": "def test_file_upload_with_wrong_meta(client):\n file_to_upload = {\"files\": (Path(__file__).parent / \"samples\" / \"pdf\" / \"sample_pdf_1.pdf\").open(\"rb\")}\n response = client.post(url=\"/file-upload\", files=file_to_upload, data={\"meta\": \"1\"})\n assert 500 == response.status_code\n # Ensure the `convert` method was never called\n MockPDFToTextConverter.mocker.convert.assert_not_called()\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 45, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 5, "token_counts": 67, "n_ast_nodes": 121, "n_identifiers": 17, "random_cut": "def test_file_upload_with_wrong_meta(client):\n file_to_upload = {\"files\": (Path(__file__).parent / \"samples\" / \"pdf\" / \"sample_pdf_1.pdf\").open(\"rb\")}\n response = client.post(url=\"/file-upload\", files=file_to_upload, data={\"meta\": \"1\"})\n assert 500 == response.status_code\n # E" }, { "id": 288657, "commit_id": "9850709b37fdfa704ac3db4c45a2660880a7ca65", "repo": "core", "path": "homeassistant/components/skybell/sensor.py", "file_name": "sensor.py", "fun_name": "native_value", "commit_message": "Add strict typing to Skybell (#79800)", "code": "def native_value(self) -> StateType | datetime:\n \n return self.entity_description.value_fn(self._device)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "def native_value(self) -> StateType | datetime:\n \n " }, { "id": 178057, "commit_id": "5a0415ea99e3ef95bdbb2d6b62577c0c868b9540", "repo": "label-studio", "path": "label_studio/projects/models.py", "file_name": "models.py", "fun_name": "_get_annotation_key", "commit_message": "fix: DEV-3164: Remove potential data exposure from logs (#2828)\n\n* Remove potential data exposure from logs\r\n\r\n* Bump converter & tools pip versions\r\n\r\nCo-authored-by: nik ", "code": "def _get_annotation_key(self, result):\n result_type = result.get('type', None)\n if result_type in ('relation', 'pairwise', None):\n return None\n if 'from_name' not in result or 'to_name' not in result:\n logger.error(\n 'Unexpected annotation.result format: \"from_name\" or \"to_name\" not found',\n extra={'sentry_skip': True},\n )\n return None\n result_from_name = result['from_name']\n key = get_annotation_tuple(result_from_name, result['to_name'], result_type or '')\n return key\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 166, "n_words": 51, "vocab_size": 36, "complexity": 5, "nloc": 13, "token_counts": 81, "n_ast_nodes": 138, "n_identifiers": 11, "random_cut": "def _get_annotation_key(self, result):\n result_type = result.get('type', None)\n if result_type in ('relation', 'pairwise', None):\n return None\n if 'from_name' not in result or 'to_name' not in result:\n logger.error(\n 'Unexpected annotation.result format: \"from_name\" or \"to_name\" not found',\n extra={'sentry_skip': True},\n )\n return None\n result_from_name = result['from_name']\n key = get_annotation_" }, { "id": 94829, "commit_id": "ab993b32614bb83d17d10e1041817e43dd6f5980", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events.py", "file_name": "test_organization_events.py", "fun_name": "test_stack_wildcard_condition", "commit_message": "fix(tests): Fix dnd backend test flakes (#37916)\n\nThis PR fixes 3 major flakes:\r\n\r\nFixes SENTRY-TESTS-3J5: Just sort the project id order\r\n\r\nFixes SENTRY-TESTS-3HQ: Flakes because we calculate the retention\r\nin the test once and the value returned in the response is calculated\r\na little while after. We don't need to test for seconds granularity\r\nso replacing seconds to 0.\r\n\r\nFixes SENTRY-TESTS-3J0: Successively calling before_now results in some flakes\r\nparticularly in tests that are calculating aggregates\r\non transaction.duration. Introduced a load_data method\r\nthat takes a datetime object timestamp and a timedelta duration\r\ncalculates the offset based on timestamp to get start_timestamp.", "code": "def test_stack_wildcard_condition(self):\n data = self.load_data(platform=\"javascript\")\n data[\"timestamp\"] = self.ten_mins_ago\n self.store_event(data=data, project_id=self.project.id)\n\n query = {\"field\": [\"stack.filename\", \"message\"], \"query\": \"stack.filename:*.js\"}\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert len(response.data[\"data\"]) == 1\n assert response.data[\"meta\"][\"fields\"][\"message\"] == \"string\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 88, "n_words": 33, "vocab_size": 26, "complexity": 1, "nloc": 9, "token_counts": 99, "n_ast_nodes": 172, "n_identifiers": 16, "random_cut": "def test_stack_wildcard_condition(self):\n data = self.load_data(platform=\"javascript\")\n data[\"timestamp\"] = self.ten_mins_ago\n self.store_event(data=data, project_id=self.project.id)\n\n query = {\"field\": [\"stack.filename\", \"message\"], \"query\": \"stack.filename:*.js\"}\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert len(response.data[\"data\"]) == 1\n assert respons" }, { "id": 6877, "commit_id": "b59ce782e675d1c4511fad9f13b12fc3f2f02e90", "repo": "ludwig", "path": "tests/integration_tests/test_hyperopt_ray_horovod.py", "file_name": "test_hyperopt_ray_horovod.py", "fun_name": "test_hyperopt_run_hyperopt", "commit_message": "Fix ray hyperopt (#1999)\n\n* WIP fix ray hyperopt\r\n\r\n* Fixed kwargs\r\n\r\n* Updated the nones\r\n\r\n* Placement groups\r\n\r\n* Updated test cpus\r\n\r\n* Test with dynamic resource allocation\r\n\r\n* Using 0 CPUs for evaluation and using dask annotate\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Updates to ray backend and hyperopt execution\r\n\r\n* Added dask global config\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Uncommented tests\r\n\r\n* Disabled async hyperband tests\r\n\r\n* Responded to comments\r\n\r\n* Fixed all hyperopt horovod tests to use 10 CPUs\r\n\r\n* Moved dask config setting to ray backend\r\n\r\n* Calculate stats for distributed datasets (#2016)\r\n\r\n* Fixed tests, responded to comments\r\n\r\n* Responded to comments\r\n\r\n* Updated horovod hyperopt tests to be consistent with the hyperopt refactor, added a df_engine attribute to RayPredictor\r\n\r\n* Added parentheses on pandas\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: Travis Addair \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def test_hyperopt_run_hyperopt(csv_filename, ray_mock_dir):\n input_features = [number_feature(), number_feature()]\n output_features = [binary_feature()]\n\n csv_filename = os.path.join(ray_mock_dir, \"dataset.csv\")\n dataset_csv = generate_data(input_features, output_features, csv_filename, num_examples=100)\n dataset_parquet = create_data_set_to_use(\"parquet\", dataset_csv)\n\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"num_fc_layers\": 2},\n TRAINER: {\"epochs\": 4, \"learning_rate\": 0.001},\n \"backend\": {\"type\": \"ray\", **RAY_BACKEND_KWARGS},\n }\n\n output_feature_name = output_features[0][\"name\"]\n\n hyperopt_configs = {\n \"parameters\": {\n \"trainer.learning_rate\": {\n \"space\": \"loguniform\",\n \"lower\": 0.001,\n \"upper\": 0.1,\n },\n output_feature_name + \".output_size\": {\"space\": \"randint\", \"lower\": 2, \"upper\": 32},\n output_feature_name + \".num_fc_layers\": {\"space\": \"randint\", \"lower\": 2, \"upper\": 6},\n },\n \"goal\": \"minimize\",\n \"output_feature\": output_feature_name,\n \"validation_metrics\": \"loss\",\n \"executor\": {\"type\": \"ray\", \"num_samples\": 2},\n \"search_alg\": {\"type\": \"variant_generator\"},\n }\n\n # add hyperopt parameter space to the config\n config[\"hyperopt\"] = hyperopt_configs\n run_hyperopt(config, dataset_parquet, ray_mock_dir)\n\n\n@spawn", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@spawn", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 322, "n_words": 112, "vocab_size": 81, "complexity": 1, "nloc": 32, "token_counts": 229, "n_ast_nodes": 404, "n_identifiers": 22, "random_cut": "def test_hyperopt_run_hyperopt(csv_filename, ray_mock_dir):\n input_features = [number_feature(), number_feature()]\n output_features = [binary_feature()]\n\n csv_filename = os.path.join(ray_mock_dir, \"dataset.csv\")\n dataset_csv = generate_data(input_features, output_features, csv_filename, num_examples=100)\n dataset_parquet = create_data_set_to_use(\"parquet\", dataset_csv)\n\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"num_fc_layers\": 2},\n TRAINER: {\"epochs\": 4, \"learning_rate\": 0.001},\n \"backend\": {\"type\": \"ray\", **RAY_BACKEND_KWARGS},\n }\n\n output_feature_name = output_features[0][\"name\"]\n\n hyperopt_config" }, { "id": 218969, "commit_id": "0820c040ec2815f40bd0e469e27c2bf4d2cc33bc", "repo": "XX-Net", "path": "code/default/gae_proxy/local/web_control.py", "file_name": "web_control.py", "fun_name": "req_importip_handler", "commit_message": "v4.6.0 compactiable with python 2.7.", "code": "def req_importip_handler(self):\n req = urlparse(self.path).query\n reqs = parse_qs(req, keep_blank_values=True)\n data = ''\n\n if reqs['cmd'] == ['importip']:\n count = 0\n ip_list = self.postvars['ipList'][0]\n lines = ip_list.split(\"\\n\")\n for line in lines:\n addresses = line.split('|')\n for ip in addresses:\n ip = ip.strip()\n if not utils.check_ip_valid(ip):\n continue\n if front.ip_manager.add_ip(ip, 100, \"google.com\", \"gws\"):\n count += 1\n data = '{\"res\":\"%s\"}' % count\n front.ip_manager.save(force=True)\n\n elif reqs['cmd'] == ['exportip']:\n data = '{\"res\":\"'\n for ip in front.ip_manager.ip_list:\n if front.ip_manager.ip_dict[ip]['fail_times'] > 0:\n continue\n data += \"%s|\" % ip\n data = data[0: len(data) - 1]\n data += '\"}'\n\n self.send_response_nc('text/html', data)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 422, "n_words": 89, "vocab_size": 56, "complexity": 9, "nloc": 27, "token_counts": 196, "n_ast_nodes": 336, "n_identifiers": 29, "random_cut": "def req_importip_handler(self):\n req = urlparse(self.path).query\n reqs = parse_qs(req, keep_blank_values=True)\n data = ''\n\n if reqs['cmd'] == ['importip']:\n count = 0\n ip_list = self.postvars['ipList'][0]\n lines = ip_list.split(\"\\n\")\n for line in lines:\n addresses = line.split('|')\n for ip in addresses:\n ip = ip.strip()\n if not utils.check_ip_valid(ip):\n continue\n if front.ip_manager.add_ip(ip, 100, \"google.com\", \"gws\"):\n count += 1\n data = '{\"res\":\"%s\"}' % c" }, { "id": 55912, "commit_id": "168483e9cf038a3629f880f838b5aa9291a48411", "repo": "prefect", "path": "tests/blocks/test_core.py", "file_name": "test_core.py", "fun_name": "test_block_load", "commit_message": "Block capabilities (PrefectHQ/orion#1898)\n\n* Add capabilities to BlockSchemas\r\n\r\n* Remove type field from BlockSchemas\r\n\r\n* Create postgres migration, bump API version", "code": "async def test_block_load(self, test_block, block_document):\n my_block = await test_block.load(block_document.name)\n\n assert my_block._block_document_name == block_document.name\n assert my_block._block_document_id == block_document.id\n assert my_block._block_type_id == block_document.block_type_id\n assert my_block._block_schema_id == block_document.block_schema_id\n assert my_block.foo == \"bar\"\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 70, "n_words": 29, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 58, "n_ast_nodes": 91, "n_identifiers": 15, "random_cut": "async def test_block_load(self, test_block, block_document):\n my_block = await test_block.load(block_document.name)\n\n assert my_block._block_document_name == block_document.name\n assert my_block._block_document_id == block_document.id\n a" }, { "id": 297156, "commit_id": "923fa473e171fcdf396556ea200612e378f9b0a5", "repo": "core", "path": "tests/components/blebox/test_climate.py", "file_name": "test_climate.py", "fun_name": "test_reding_hvac_actions", "commit_message": "Blebox add thermoBox to climate (#81090)\n\nCo-authored-by: Martin Hjelmare ", "code": "async def test_reding_hvac_actions(saunabox, hass, caplog):\n \n\n caplog.set_level(logging.ERROR)\n\n feature_mock, entity_id = saunabox\n await async_setup_entity(hass, entity_id)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 15, "token_counts": 108, "n_ast_nodes": 50, "n_identifiers": 10, "random_cut": "async def test_reding_hvac_actions(saunabox, hass, caplog):\n \n\n caplog.set_level(logging.ERROR)\n\n feature_mock, entity_id = saunabox\n await async_setup_entity(hass, entity_id)\n" }, { "id": 27323, "commit_id": "ab7e4e203fd23a5fec1d27d0774905c52c509dc3", "repo": "saleor", "path": "saleor/plugins/tests/test_manager.py", "file_name": "test_manager.py", "fun_name": "test_manager_calculates_order_line_total", "commit_message": "Revert \"Add fix for multiplied prices on Avatax side (#9699)\" (#9750)\n\nThis reverts commit 5dc3a30ef3bb8dfce67ede276fa465e2c420d003.", "code": "def test_manager_calculates_order_line_total(order_line, plugins):\n currency = order_line.order.currency\n expected_total = (\n TaxedMoney(Money(\"1.0\", currency), Money(\"1.0\", currency))\n if plugins\n else quantize_price(order_line.unit_price * order_line.quantity, currency)\n )\n taxed_total = (\n PluginsManager(plugins=plugins)\n .calculate_order_line_total(\n order_line.order, order_line, order_line.variant, order_line.variant.product\n )\n .price_with_discounts\n )\n assert expected_total == taxed_total\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 114, "n_words": 37, "vocab_size": 30, "complexity": 2, "nloc": 15, "token_counts": 84, "n_ast_nodes": 127, "n_identifiers": 17, "random_cut": "def test_manager_calculates_order_line_total(order_line, plugins):\n currency = order_line.order.currency\n expected_total = (\n TaxedMoney(Money(\"1.0\", currency), Money(\"1.0\", currency))\n if plugins\n else quantize_price(order_line.unit_price * order_line.quantity, currency)\n )\n taxed_total = (\n PluginsManager(plugins=plugins)\n .cal" }, { "id": 191063, "commit_id": "301124c5b377fa56b940d298900dbc5816dbc24e", "repo": "thumbor", "path": "thumbor/filters/blur.py", "file_name": "blur.py", "fun_name": "apply_blur", "commit_message": "Reformat to 80 chars and mypy.ini", "code": "def apply_blur(mode, data, size, radius, sigma=0):\n if sigma == 0:\n sigma = radius\n radius = min(radius, MAX_RADIUS)\n matrix, matrix_size = generate_1d_matrix(sigma, radius)\n data = _convolution.apply(\n mode, data, size[0], size[1], matrix, matrix_size, True\n )\n return _convolution.apply(mode, data, size[0], size[1], matrix, 1, True)\n\n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 72, "n_words": 41, "vocab_size": 30, "complexity": 2, "nloc": 9, "token_counts": 92, "n_ast_nodes": 124, "n_identifiers": 13, "random_cut": "def apply_blur(mode, data, size, radius, sigma=0):\n if sigma " }, { "id": 74876, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/tests/test_views.py", "file_name": "test_views.py", "fun_name": "test_content", "commit_message": "Reformat with black", "code": "def test_content(self):\n self.assertEqual(\n b\"\".join(self.get().streaming_content), b\"A boring example document\"\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 33, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 26, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def test_content(self):\n self.assertEqual(\n " }, { "id": 179939, "commit_id": "070b8a96b5b8448e306bd40f2b12d44b759afd48", "repo": "gradio", "path": "test/test_components.py", "file_name": "test_components.py", "fun_name": "test_tokenize", "commit_message": "blocks-components-tests\n- move gradio/test_data to test/test_data/media_data", "code": "def test_tokenize(self):\n x_wav = media_data.BASE64_AUDIO\n audio_input = gr.Audio()\n tokens, _, _ = audio_input.tokenize(x_wav)\n self.assertEquals(len(tokens), audio_input.interpretation_segments)\n x_new = audio_input.get_masked_inputs(tokens, [[1] * len(tokens)])[0]\n similarity = SequenceMatcher(a=x_wav[\"data\"], b=x_new).ratio()\n self.assertGreater(similarity, 0.9)\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 75, "n_words": 27, "vocab_size": 23, "complexity": 1, "nloc": 8, "token_counts": 93, "n_ast_nodes": 143, "n_identifiers": 22, "random_cut": "def test_tokenize(self):\n x_wav = media_data.BASE64_AUDIO\n audio_input = gr.Audio()\n tokens, _, _ = audio_input.tokenize(x_wav)\n self.assertEquals(len(tokens), audio_input.interpretation_segments)\n x_new = audio_input.get_masked_inputs(tokens, [[1" }, { "id": 204789, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/signing.py", "file_name": "signing.py", "fun_name": "signature", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def signature(self, value, key=None):\n key = key or self.key\n return base64_hmac(self.salt + \"signer\", value, key, algorithm=self.algorithm)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 3, "token_counts": 37, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def signature(self, value, key=None):\n key = key or self.key\n return b" }, { "id": 200564, "commit_id": "e31e048fe4834f7259193c5e13e7e7b0d5fcd230", "repo": "sympy", "path": "sympy/tensor/tests/test_tensor.py", "file_name": "test_tensor.py", "fun_name": "test_TensMul_subs", "commit_message": "Tests for subs and xreplace with dummy index conflicts\n\nsee https://github.com/sympy/sympy/issues/24337", "code": "def test_TensMul_subs():\n \n R3 = TensorIndexType('R3', dim=3)\n p, q, r = tensor_indices(\"p q r\", R3)\n K = TensorHead(\"K\", [R3])\n V = TensorHead(\"V\", [R3])\n C0 = TensorIndex(R3.dummy_name + \"_0\", R3, True)\n\n assert ( K(p)*V(r)*K(-p) ).subs({V(r): K(q)*K(-q)}) == K(p)*K(q)*K(-q)*K(-p)\n assert ( K(p)*V(r)*K(-p) ).xreplace({V(r): K(q)*K(-q)}) == K(p)*K(q)*K(-q)*K(-p)\n assert ( K(p)*V(r) ).xreplace({p: C0, V(r): K(q)*K(-q)}) == K(C0)*K(q)*K(-q)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 79, "n_words": 52, "vocab_size": 37, "complexity": 1, "nloc": 9, "token_counts": 236, "n_ast_nodes": 386, "n_identifiers": 16, "random_cut": "def test_TensMul_subs():\n \n R3 = TensorIndexType('R3', dim=3)\n p, q, r = tensor_indices(\"p q r\", R3)\n K = TensorHead(\"K\", [R3])\n V = TensorHead(\"V\", [R3])\n " }, { "id": 135330, "commit_id": "9c9977f814facdebc1828fa576531fc95f553172", "repo": "ray", "path": "python/ray/tests/kuberay/test_autoscaling_config.py", "file_name": "test_autoscaling_config.py", "fun_name": "test_cr_image_consistency", "commit_message": "[autoscaler][kuberay] Never request more than maxReplicas worker pods (#29770)\n\nPartially addresses ray-project/kuberay#560, in which it was observed that \"replicas\" was being set higher than \"maxReplicas\" in the KubeRay CR.\r\n\r\nApplies a surface-level fix by making sure that the autoscaler does not set replicas higher the maxReplicas when creating nodes.\r\n\r\nSigned-off-by: Dmitri Gekhtman ", "code": "def test_cr_image_consistency():\n \n cr = get_basic_ray_cr()\n\n group_specs = [cr[\"spec\"][\"headGroupSpec\"]] + cr[\"spec\"][\"workerGroupSpecs\"]\n # Head, CPU group, GPU group.\n assert len(group_specs) == 3\n\n ray_containers = [\n group_spec[\"template\"][\"spec\"][\"containers\"][0] for group_spec in group_specs\n ]\n\n # All Ray containers in the example config have \"ray-\" in their name.\n assert all(\"ray-\" in ray_container[\"name\"] for ray_container in ray_containers)\n\n # All Ray images are from the Ray repo.\n assert all(\n \"rayproject/ray\" in ray_container[\"image\"] for ray_container in ray_containers\n )\n\n # All Ray images are the same.\n assert len({ray_container[\"image\"] for ray_container in ray_containers}) == 1\n\n\n@pytest.mark.parametrize(\"exception\", [Exception, requests.HTTPError])\n@pytest.mark.parametrize(\"num_exceptions\", range(6))", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"exception\", [Exception, requests.HTTPError])\n@pytest.mark.parametrize(\"num_exceptions\", range(6))", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 143, "n_words": 89, "vocab_size": 57, "complexity": 5, "nloc": 12, "token_counts": 101, "n_ast_nodes": 229, "n_identifiers": 16, "random_cut": "def test_cr_image_consistency():\n \n cr = get_basic_ray_cr()\n\n group_specs = [cr[\"spec\"][\"headGroupSpec\"]] + cr[\"spec\"][\"workerGroupSpecs\"]\n # Head, CPU group, GPU group.\n assert len(group_specs) == 3\n\n ray_containers = [\n group_spec[\"template\"][\"spec\"][\"containers\"][0] for group_spec in group_specs\n ]\n\n # All Ray containers in the example config have \"ray-\" in their name.\n assert all(\"ray-\" in ray_container[\"name\"] for ray_container in ray_containers)\n\n # All Ray images are from the Ray repo.\n assert all(\n \"rayproject/" }, { "id": 151238, "commit_id": "eeebb78a5c772b0c3e569fd476587facb1f8a9dc", "repo": "freqtrade", "path": "tests/freqai/test_freqai_interface.py", "file_name": "test_freqai_interface.py", "fun_name": "is_mac", "commit_message": "skip darwin in RL tests, remove example scripts, improve doc", "code": "def is_mac() -> bool:\n machine = platform.system()\n return \"Darwin\" in machine\n\n\n@pytest.mark.parametrize('model', [\n 'LightGBMRegressor',\n 'XGBoostRegressor',\n 'CatboostRegressor',\n 'ReinforcementLearner',\n 'ReinforcementLearner_multiproc'\n ])", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('model', [\n 'LightGBMRegressor',\n 'XGBoostRegressor',\n 'CatboostRegressor',\n 'ReinforcementLearner',\n 'ReinforcementLearner_multiproc'\n ])", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 41, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 71, "n_identifiers": 8, "random_cut": "def is_mac() -> bool:\n machine = platform.system()\n return \"Darwin\" in machine\n\n\n@pytest.mark.parametrize('model', [\n 'LightGBMReg" }, { "id": 85387, "commit_id": "6aaaf5089b2c39757883179df5a8512db3b0c716", "repo": "sentry", "path": "src/sentry/eventstore/models.py", "file_name": "models.py", "fun_name": "groups", "commit_message": "feat(perf_issues): Add `GroupEvent` and split some functionality in `Event` into a base class. (#38143)\n\nSince we can now have events with multiple groups, we can no longer rely on the `Event.group`\r\nproperty. This pr adds in a `GroupEvent` subclass that should be passed around wherever we expect an\r\nevent to have a single `Group` associated with it.\r\n\r\n`Event` has been split up into `BaseEvent` and `Event`. We will deprecate and remove uses of\r\n`group_id` and `group` in the `Event` class going forward. If we need an event with a `Group`, we\r\ncan use `build_group_events` to fetch all `GroupEvents` associated with the `Event`, or `for_group`\r\nif we just need a specific `Event`/`Group` pairing.\r\n\r\nGoing forward, the plan is to store all groups in the `groups` property. This means that error\r\nevents being sent via eventstream will have their group included in `groups` as well. We'll\r\nneed to update the errors processor in snuba to look there instead of `group_id`. This seems cleaner\r\nlong term, instead of having both `group_id` and `group_ids` passed through.\r\n\r\nTo figure out where we need to use `build_group_events` and `for_group` we can do a mix of searching\r\nthe codebase and commenting out the `group_id` and `group` properties and see how CI goes.", "code": "def groups(self) -> Sequence[Group]:\n from sentry.models import Group\n\n if getattr(self, \"_groups_cache\"):\n return self._groups_cache\n\n if self._group_ids is not None:\n group_ids = self._group_ids\n else:\n snuba_group_id = self.group_id\n # TODO: Replace `snuba_group_id` with this once we deprecate `group_id`.\n # snuba_group_id = self._snuba_data.get(self._get_column_name(Columns.GROUP_ID))\n snuba_group_ids = self._snuba_data.get(self._get_column_name(Columns.GROUP_IDS))\n group_ids = []\n if snuba_group_id:\n group_ids.append(snuba_group_id)\n if snuba_group_ids:\n group_ids.extend(snuba_group_ids)\n\n if group_ids:\n groups = list(Group.objects.filter(id__in=group_ids))\n else:\n groups = []\n\n self._groups_cache = groups\n return groups\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 271, "n_words": 65, "vocab_size": 43, "complexity": 6, "nloc": 20, "token_counts": 118, "n_ast_nodes": 194, "n_identifiers": 24, "random_cut": "def groups(self) -> Sequence[Group]:\n from sentry.models import Group\n\n if getattr(self, \"_groups_cache\"):\n return self._groups_cache\n\n if self._group_ids is not None:\n group_ids = self._group_ids\n else:\n snuba_group_id = self.group_id\n # TODO: Replace `snuba_group_id` with this once we deprecate `group_id`.\n # snuba_group_id = self._snuba_data.get(self._get_column_name(C" }, { "id": 105911, "commit_id": "2945690ea731f85a356220a71cdc630281c676f4", "repo": "datasets", "path": "src/datasets/iterable_dataset.py", "file_name": "iterable_dataset.py", "fun_name": "__iter__", "commit_message": "Multiprocessed dataset builder [WIP] (#5107)\n\n* multiprocessing-compatible naming scheme and refactor\r\n\r\n* multiprocessed shard writing for GeneratorBasedBuilder\r\n\r\n* multiprocessed shard writing for ArrowBasedBuilder\r\n\r\n* style\r\n\r\n* multiprocessed dataset loading\r\n\r\n* compatibility with non-sharded datasets\r\n\r\n* bugfix\r\n\r\n* bugfix\r\n\r\n* removed unused import\r\n\r\n* fixed bad ordering\r\n\r\n* less misleading tqdm\r\n\r\n* fix gen_kwargs distribution + read shards\r\n\r\n* minor\r\n\r\n* minor2\r\n\r\n* support beam datasets\r\n\r\n* docstrings + minor\r\n\r\n* add iflatmap_unordered for parallel write & progress updates\r\n\r\n* use 1 tqdm bar receiving updates from subprocesses\r\n\r\n* docs\r\n\r\n* add test_iflatmap_unordered\r\n\r\n* style\r\n\r\n* test arrow_reader.py\r\n\r\n* fix test_iflatmap_unordered\r\n\r\n* add Beam test_download_and_prepare_sharded\r\n\r\n* test gen_kwargs distribution\r\n\r\n* test download_and_prepare with num_proc\r\n\r\n* style\r\n\r\n* improve test\r\n\r\n* don't close the pool\r\n\r\n* fix multiprocessing on windows\r\n\r\n* keep multiprocessing disabled by default\r\n\r\n* again + docs\r\n\r\n* more docs\r\n\r\n* more docs\r\n\r\n* some var renaming\r\n\r\n* style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\n* added utils/sharding.py\r\n\r\n* style\r\n\r\n* style\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Mario Šaško ", "code": "def __iter__(self):\n \n rng = deepcopy(self.generator)\n kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)\n yield from self.generate_examples_fn(**kwargs_with_shuffled_shards)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 56, "n_identifiers": 9, "random_cut": "def __iter__(self):\n \n rng = deepcopy(self.generator)\n kwargs_with_shuff" }, { "id": 187056, "commit_id": "6325c74e6869b45051ec111e4243d77cc536ba66", "repo": "streamlink", "path": "tests/utils/test_cache.py", "file_name": "test_cache.py", "fun_name": "test_lru_cache", "commit_message": "chore: remove unnecessary collection.OrderedDict\n\n- Replace collection.OrderedDict with builtins.dict where possible:\n Python 3.7+ ensures the correct order in builtins.dict objects and is\n no longer an implementation detail of cpython.\n- Fix OrderedDict type annotation in streamlink.utils.cache.LRUCache\n- Add unit test for streamlink.utils.cache.LRUCache", "code": "def test_lru_cache():\n cache = LRUCache(num=3)\n assert cache.get(\"foo\") is None, \"Getter returns None for unknown items\"\n\n cache.set(\"foo\", \"FOO\")\n assert list(cache.cache.items()) == [(\"foo\", \"FOO\")], \"Setter adds new items\"\n\n assert cache.get(\"foo\") == \"FOO\", \"Getter returns correct value of known items\"\n\n cache.set(\"bar\", \"BAR\")\n cache.set(\"baz\", \"BAZ\")\n cache.set(\"qux\", \"QUX\")\n assert list(cache.cache.items()) == [(\"bar\", \"BAR\"), (\"baz\", \"BAZ\"), (\"qux\", \"QUX\")], \"Setter respects max queue size\"\n\n cache.get(\"bar\")\n assert list(cache.cache.items()) == [(\"baz\", \"BAZ\"), (\"qux\", \"QUX\"), (\"bar\", \"BAR\")], \"Getter moves known items to the end\"\n\n cache.get(\"unknown\")\n assert list(cache.cache.items()) == [(\"baz\", \"BAZ\"), (\"qux\", \"QUX\"), (\"bar\", \"BAR\")], \"Getter keeps order on unknown items\"\n\n cache.set(\"foo\", \"FOO\")\n assert list(cache.cache.items()) == [(\"qux\", \"QUX\"), (\"bar\", \"BAR\"), (\"foo\", \"FOO\")], \"Setter moves new items to the end\"\n\n cache.set(\"qux\", \"QUUX\")\n assert list(cache.cache.items()) == [(\"bar\", \"BAR\"), (\"foo\", \"FOO\"), (\"qux\", \"QUUX\")], \"Setter moves known items to the end\"\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 176, "n_words": 126, "vocab_size": 64, "complexity": 1, "nloc": 18, "token_counts": 280, "n_ast_nodes": 515, "n_identifiers": 8, "random_cut": "def test_lru_cache():\n cache = LRUCache(num=3)\n assert cache.get(\"foo\") is None, \"Getter returns None for unknown items\"\n\n cache.set(\"foo\", \"FOO\")\n assert list(cache.cache.items()) == [(\"foo\", \"FOO\")" }, { "id": 190355, "commit_id": "4fc3616712edb19179b17dd270ad6cf63abf99c2", "repo": "DeOldify", "path": "fastai/gen_doc/nbtest.py", "file_name": "nbtest.py", "fun_name": "get_qualname", "commit_message": "Upgrading to support latest Pytorch version", "code": "def get_qualname(elt):\n return elt.__qualname__ if hasattr(elt, '__qualname__') else fn_name(elt)\n", "url": "https://github.com/jantic/DeOldify.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 11, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 2, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def get_qualname(elt):\n return elt.__qualname__" }, { "id": 176242, "commit_id": "7669e7f2f31485015f3ea7cdd535e086467fa433", "repo": "networkx", "path": "networkx/generators/small.py", "file_name": "small.py", "fun_name": "truncated_cube_graph", "commit_message": "Use from_dict_of_lists instead of make_small_graph in generators.small (#5267)\n\n* Add test for digraph creation behavior.\r\n\r\n* Use from_dict_of_lists instead of make_small_graph\r\n\r\n* Make sure generators don't support digraph.\r\n\r\n* Rm redundant create_using check.", "code": "def truncated_cube_graph(create_using=None):\n \n G = nx.from_dict_of_lists(\n {\n 0: [1, 2, 4],\n 1: [11, 14],\n 2: [3, 4],\n 3: [6, 8],\n 4: [5],\n 5: [16, 18],\n 6: [7, 8],\n 7: [10, 12],\n 8: [9],\n 9: [17, 20],\n 10: [11, 12],\n 11: [14],\n 12: [13],\n 13: [21, 22],\n 14: [15],\n 15: [19, 23],\n 16: [17, 18],\n 17: [20],\n 18: [19],\n 19: [23],\n 20: [21],\n 21: [22],\n 22: [23],\n },\n create_using=create_using,\n )\n G.name = \"Truncated Cube Graph\"\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 364, "n_words": 75, "vocab_size": 66, "complexity": 1, "nloc": 31, "token_counts": 193, "n_ast_nodes": 260, "n_identifiers": 6, "random_cut": "def truncated_cube_graph(create_using=None):\n \n G = nx.from_dict_of_lists(\n {\n 0: [1, 2, 4],\n 1: [11, 14],\n 2: [3, 4],\n 3: [6, 8],\n 4: [5],\n " }, { "id": 58520, "commit_id": "2f22824cd7af9bb89c103698c05036f2542caff1", "repo": "prefect", "path": "src/prefect/cli/profile.py", "file_name": "profile.py", "fun_name": "check_orion_connection", "commit_message": "Remove extra \"f\" (#6384)", "code": "async def check_orion_connection(profile_name):\n with use_profile(profile_name, include_current_context=False):\n httpx_settings = dict(timeout=3)\n try:\n # attempt to infer Cloud 2.0 API from the connection URL\n cloud_client = get_cloud_client(\n httpx_settings=httpx_settings, infer_cloud_url=True\n )\n res = await cloud_client.api_healthcheck()\n exit_method, msg = (\n exit_with_success,\n f\"Connected to Prefect Cloud using profile {profile_name!r}\",\n )\n except CloudUnauthorizedError:\n # if the Cloud 2.0 API exists and fails to authenticate, notify the user\n exit_method, msg = (\n exit_with_error,\n f\"Error authenticating with Prefect Cloud using profile {profile_name!r}\",\n )\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code == status.HTTP_404_NOT_FOUND:\n # if the route does not exist, attmpt to connect as a hosted Orion instance\n try:\n # inform the user if Prefect Orion endpoints exist, but there are\n # connection issues\n client = get_client(httpx_settings=httpx_settings)\n connect_error = await client.api_healthcheck()\n if connect_error is not None:\n exit_method, msg = (\n exit_with_error,\n f\"Error connecting to Prefect Orion using profile {profile_name!r}\",\n )\n elif await client.using_ephemeral_app():\n # if the client is using an ephemeral Orion app, inform the user\n exit_method, msg = (\n exit_with_success,\n f\"No Prefect Orion instance specified using profile {profile_name!r}. \"\n f\"Flow run metadata will be stored at the locally configured database: {prefect.settings.PREFECT_ORION_DATABASE_CONNECTION_URL.value()}\",\n )\n else:\n exit_method, msg = (\n exit_with_success,\n f\"Connected to Prefect Orion using profile {profile_name!r}\",\n )\n except Exception as exc:\n exit_method, msg = (\n exit_with_error,\n f\"Error connecting to Prefect Orion using profile {profile_name!r}\",\n )\n else:\n exit_method, msg = (\n exit_with_error,\n f\"Error connecting to Prefect Cloud: {exc!r}\",\n )\n except TypeError:\n # if no Prefect Orion API URL has been set, httpx will throw a TypeError\n try:\n # try to connect with the client anyway, it will likely use an\n # ephemeral Orion instance\n client = get_client(httpx_settings=httpx_settings)\n connect_error = await client.api_healthcheck()\n if connect_error is not None:\n exit_method, msg = (\n exit_with_error,\n f\"Error connecting to Prefect Orion using profile {profile_name!r}\",\n )\n elif await client.using_ephemeral_app():\n exit_method, msg = (\n exit_with_success,\n f\"No Prefect Orion instance specified using profile {profile_name!r}. \"\n f\"Flow run metadata will be stored at the locally configured database: {prefect.settings.PREFECT_ORION_DATABASE_CONNECTION_URL.value()}\",\n )\n else:\n exit_method, msg = (\n exit_with_success,\n f\"Connected to Prefect Orion using profile {profile_name!r}\",\n )\n except Exception as exc:\n exit_method, msg = (\n exit_with_error,\n f\"Error connecting to Prefect Orion using profile {profile_name!r}\",\n )\n except (httpx.ConnectError, httpx.UnsupportedProtocol) as exc:\n exit_method, msg = exit_with_error, \"Invalid Prefect API URL\"\n\n return exit_method, msg\n\n\n@profile_app.command()", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@profile_app.command()", "n_ast_errors": 1, "ast_levels": 26, "n_whitespaces": 1781, "n_words": 367, "vocab_size": 122, "complexity": 12, "nloc": 76, "token_counts": 298, "n_ast_nodes": 585, "n_identifiers": 38, "random_cut": "async def check_orion_connection(profile_name):\n with use_profile(profile_name, include_current_context=False):\n httpx_settings = dict(timeout=3)\n try:\n # attempt to infer Cloud 2.0 API from the connection URL\n cloud_client = get_cloud_client(\n httpx_settings=httpx_settings, infer_cloud_url=True\n )\n res = await cloud_client.api_healthcheck()\n exit_method, msg = (\n exit_with_success,\n f\"Connected to Prefect Cloud using profile {profile_name!r}\",\n )\n except CloudUnauthorizedError:\n # if the Cloud 2.0 API exists and fails to authenticate, notify the user\n exit_method, msg = (\n exit_with_error,\n f\"Error authenticating with Prefect Cloud using profile {profile_name!r}\",\n )\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code == status.HTTP_404_NOT_FOUND:\n # if the route does not exist, attmpt to connect as a hosted Orion instance\n try:\n # inform the user if Prefect Orion endpoints exist, but there are\n # connection issues\n client = get_client(httpx_settings=httpx_settings)\n connect_error = await client.api_healthcheck()\n if connect_error is not None:\n exit_method, msg = (\n exit_with_error,\n f\"Error connecting to Prefect Orion using profile {profile_name!r}\",\n )\n elif await client.using_ephemeral_app():\n # if the client is using an ephemeral Orion app, inform the user\n exit_method, msg = (\n exit_with_success,\n f\"No Prefect Orion instance specified using profile {profile_name!r}. \"\n f\"Flow run metadata will be stored at the locally configured database: {prefect.settings.PREFECT_ORION_DATABASE_CONNECTION_URL.value()}\",\n )\n else:\n exit_method, msg = (\n exit_with_success,\n f\"Connected to Prefect Orion using profile {profile_name!r}\",\n )\n except Exception as exc:\n exit_method, msg = (\n exit_with_error,\n f\"Error connecting to Prefect Orion using profile {profile_name!r}\",\n )\n else:\n exit_method, msg = (\n exit_with_error,\n f\"Error connecting to Prefect Cloud: {exc!r}\",\n )\n except TypeError:\n # if no Prefect Orion API URL has been set, httpx will throw a TypeError\n try:\n # try to connect with the client anyway, it will likely use an\n # ephemeral Orion instance\n client = get_client(httpx_settings=httpx_settings)\n connect_error = await client.api_healthcheck()\n if connect_error is not None:\n exit_method, msg = (\n exit_with_error,\n f\"Error connecting to Prefect Orion using profile {profile_name!r}\",\n )\n elif await client.using_ephemeral_app():\n exit_method, msg = (\n exit_with_success,\n f\"No Prefect Orion instance specified using profile {profile_name!r}. \"\n f\"Flow run metadata will be stored at the locally configured database: {prefect.settings.PREFECT_ORION_DATABASE_CONNECTION_URL.value()}\",\n )\n else:\n exit_method, msg = (\n exit_with_success,\n f\"Connected to Prefect Orion using profile {profile_name!r}\",\n )\n except Exception as exc:\n exit_method, msg = (\n exit_with_er" }, { "id": 53044, "commit_id": "3a2d581ec0540dab8efc5e30c1bc10dfa321f2b5", "repo": "prefect", "path": "src/prefect/utilities/logging.py", "file_name": "logging.py", "fun_name": "process", "commit_message": "Update src/prefect/utilities/logging.py\n\nCo-authored-by: Michael Adkins ", "code": "def process(self, msg, kwargs):\n kwargs[\"extra\"] = {**self.extra, **(kwargs.get(\"extra\") or {})}\n return (msg, kwargs)\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 26, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 3, "token_counts": 39, "n_ast_nodes": 62, "n_identifiers": 6, "random_cut": "def process(self, msg, kwargs):\n kwargs[\"extra\"] = {**self.extra, **(kwarg" }, { "id": 274469, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/legacy_tf_layers/variable_scope_shim_test.py", "file_name": "variable_scope_shim_test.py", "fun_name": "call", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def call(self, inputs):\n with tf.compat.v1.variable_scope(\"foo\"):\n return self.scale_by_y(inputs)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 24, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 45, "n_identifiers": 8, "random_cut": "def call(self, inputs):\n with tf.compat.v1.variable_scope(" }, { "id": 194353, "commit_id": "a8007dcdfb5159a711fa343d2ac4bb7df826975f", "repo": "vision", "path": "test/test_prototype_transforms.py", "file_name": "test_prototype_transforms.py", "fun_name": "test__get_params", "commit_message": "rename features._Feature to datapoints._Datapoint (#7002)\n\n* rename features._Feature to datapoints.Datapoint\r\n\r\n* _Datapoint to Datapoint\r\n\r\n* move is_simple_tensor to transforms.utils\r\n\r\n* fix CI\r\n\r\n* move Datapoint out of public namespace", "code": "def test__get_params(self, padding, pad_if_needed, size, mocker):\n image = mocker.MagicMock(spec=datapoints.Image)\n image.num_channels = 3\n image.spatial_size = (24, 32)\n h, w = image.spatial_size\n\n transform = transforms.RandomCrop(size, padding=padding, pad_if_needed=pad_if_needed)\n params = transform._get_params([image])\n\n if padding is not None:\n if isinstance(padding, int):\n pad_top = pad_bottom = pad_left = pad_right = padding\n elif isinstance(padding, list) and len(padding) == 2:\n pad_left = pad_right = padding[0]\n pad_top = pad_bottom = padding[1]\n elif isinstance(padding, list) and len(padding) == 4:\n pad_left, pad_top, pad_right, pad_bottom = padding\n\n h += pad_top + pad_bottom\n w += pad_left + pad_right\n else:\n pad_left = pad_right = pad_top = pad_bottom = 0\n\n if pad_if_needed:\n if w < size[1]:\n diff = size[1] - w\n pad_left += diff\n pad_right += diff\n w += 2 * diff\n if h < size[0]:\n diff = size[0] - h\n pad_top += diff\n pad_bottom += diff\n h += 2 * diff\n\n padding = [pad_left, pad_top, pad_right, pad_bottom]\n\n assert 0 <= params[\"top\"] <= h - size[0] + 1\n assert 0 <= params[\"left\"] <= w - size[1] + 1\n assert params[\"height\"] == size[0]\n assert params[\"width\"] == size[1]\n assert params[\"needs_pad\"] is any(padding)\n assert params[\"padding\"] == padding\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 558, "n_words": 179, "vocab_size": 72, "complexity": 10, "nloc": 37, "token_counts": 308, "n_ast_nodes": 470, "n_identifiers": 30, "random_cut": "def test__get_params(self, padding, pad_if_needed, size, mocker):\n image = mocker.MagicMock(spec=datapoints.Image)\n image.num_channels = 3\n image.spatial_size = (24, 32)\n h, w = image.spatial_size\n\n transform = transforms.RandomCrop(size, padding=padding, pad_if_needed=pad_if_needed)\n params = transform._get_params([image])\n\n if padding is not None:\n if isinstance(padding, int):\n pad_top = pad_bottom = pad_left = pad_right = padding\n elif isinstance(padding, list) and len(padding) == 2:\n" }, { "id": 288080, "commit_id": "84b2c74746b694d217fe6d448a8dfff4bc2d7a9e", "repo": "core", "path": "tests/components/mqtt/test_discovery.py", "file_name": "test_discovery.py", "fun_name": "test_discovery_expansion", "commit_message": "Move MQTT discovery hass.data globals to dataclass (#78706)\n\n* Add MQTT discovery hass.data globals to dataclass\r\n\r\n* isort\r\n\r\n* Additional rework\r\n\r\n* Add hass.data[\"mqtt_tags\"] to dataclass\r\n\r\n* Follow-up comment\r\n\r\n* Corrections", "code": "async def test_discovery_expansion(hass, mqtt_mock_entry_no_yaml_config, caplog):\n \n await mqtt_mock_entry_no_yaml_config()\n data = (\n '{ \"~\": \"some/base/topic\",'\n ' \"name\": \"DiscoveryExpansionTest1\",'\n ' \"stat_t\": \"test_topic/~\",'\n ' \"cmd_t\": \"~/test_topic\",'\n ' \"availability\": ['\n \" {\"\n ' \"topic\":\"~/avail_item1\",'\n ' \"payload_available\": \"available\",'\n ' \"payload_not_available\": \"not_available\"'\n \" },\"\n \" {\"\n ' \"topic\":\"avail_item2/~\",'\n ' \"payload_available\": \"available\",'\n ' \"payload_not_available\": \"not_available\"'\n \" }\"\n \" ],\"\n ' \"dev\":{'\n ' \"ids\":[\"5706DF\"],'\n ' \"name\":\"DiscoveryExpansionTest1 Device\",'\n ' \"mdl\":\"Generic\",'\n ' \"hw\":\"rev1\",'\n ' \"sw\":\"1.2.3.4\",'\n ' \"mf\":\"None\",'\n ' \"sa\":\"default_area\"'\n \" }\"\n \"}\"\n )\n\n async_fire_mqtt_message(hass, \"homeassistant/switch/bla/config\", data)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"switch.DiscoveryExpansionTest1\")\n assert state.state == STATE_UNAVAILABLE\n\n async_fire_mqtt_message(hass, \"avail_item2/some/base/topic\", \"available\")\n await hass.async_block_till_done()\n\n state = hass.states.get(\"switch.DiscoveryExpansionTest1\")\n assert state is not None\n assert state.name == \"DiscoveryExpansionTest1\"\n assert (\"switch\", \"bla\") in hass.data[\"mqtt\"].discovery_already_discovered\n assert state.state == STATE_UNKNOWN\n\n async_fire_mqtt_message(hass, \"test_topic/some/base/topic\", \"ON\")\n\n state = hass.states.get(\"switch.DiscoveryExpansionTest1\")\n assert state.state == STATE_ON\n\n async_fire_mqtt_message(hass, \"some/base/topic/avail_item1\", \"not_available\")\n await hass.async_block_till_done()\n\n state = hass.states.get(\"switch.DiscoveryExpansionTest1\")\n assert state.state == STATE_UNAVAILABLE\n\n\n@patch(\"homeassistant.components.mqtt.PLATFORMS\", [Platform.SWITCH])", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@patch(\"homeassistant.components.mqtt.PLATFORMS\", [Platform.SWITCH])", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 451, "n_words": 134, "vocab_size": 73, "complexity": 1, "nloc": 48, "token_counts": 184, "n_ast_nodes": 375, "n_identifiers": 18, "random_cut": "async def test_discovery_expansion(hass, mqtt_mock_entry_no_yaml_config, caplog):\n \n await mqtt_mock_entry_no_yaml_config()\n data = (\n '{ \"~\": \"some/base/topic\",'\n ' \"name\": \"DiscoveryExpansionTest1\",'\n ' \"stat_t\": \"test_topic/~\",'\n ' \"cmd_t\": \"~/test_topic\",'\n ' \"availability\": ['\n \" {\"\n ' \"topic\":\"~/avail_item1\",'\n ' \"payload_available\": \"available\",'\n ' \"payload_not_available\": \"not_available\"'\n \" },\"\n \" {\"\n ' \"topic\":\"avail_item2/~\",'\n ' \"payload_available\": \"available\",'\n ' \"payload_not_ava" }, { "id": 21493, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", "file_name": "tarfile.py", "fun_name": "__iter__", "commit_message": "Vendor in pip 22.1.2", "code": "def __iter__(self):\n \n while True:\n line = self.readline()\n if not line:\n break\n yield line\n#class ExFileObject\n\n#------------------\n# Exported Classes\n#------------------", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 78, "n_words": 20, "vocab_size": 18, "complexity": 3, "nloc": 6, "token_counts": 23, "n_ast_nodes": 47, "n_identifiers": 4, "random_cut": "def __iter__(self):\n \n while True:\n line = self.readline()\n if not line:\n break\n " }, { "id": 20198, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/android.py", "file_name": "android.py", "fun_name": "_android_folder", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _android_folder() -> str:\n \n try:\n # First try to get path to android app via pyjnius\n from jnius import autoclass\n\n Context = autoclass(\"android.content.Context\") # noqa: N806\n result: str = Context.getFilesDir().getParentFile().getAbsolutePath()\n except Exception:\n # if fails find an android folder looking path on the sys.path\n pattern = re.compile(r\"/data/(data|user/\\d+)/(.+)/files\")\n for path in sys.path:\n if pattern.match(path):\n result = path.split(\"/files\")[0]\n break\n else:\n raise OSError(\"Cannot find path to android app folder\")\n return result\n\n\n@lru_cache(maxsize=1)", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "@lru_cache(maxsize=1)", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 189, "n_words": 69, "vocab_size": 53, "complexity": 4, "nloc": 15, "token_counts": 84, "n_ast_nodes": 163, "n_identifiers": 20, "random_cut": "def _android_folder() -> str:\n \n try:\n # First try to get path to android app via pyjnius\n from jnius import autoclass\n\n Context = autoclass(\"android.content.Context\") # noqa: N806\n result: str = Context.getFilesDir().getParentFile().getAbsolutePath()\n except Exception:\n # if fails find an android folder looking path on the sys.path\n pattern = re.compile(r\"/data/(data|user/\\d+)/(.+)/files\")\n for path in sys.path:\n if pattern.match(path):\n re" }, { "id": 129300, "commit_id": "4a55d10bb1b70971f50a3872421f2c1eebd84e64", "repo": "ray", "path": "python/ray/data/tests/test_dataset.py", "file_name": "test_dataset.py", "fun_name": "test_from_pandas_refs", "commit_message": "[Dataset] [DataFrame 2/n] Add pandas block format implementation (partial) (#20988)\n\nThis PR adds pandas block format support by implementing `PandasRow`, `PandasBlockBuilder`, `PandasBlockAccessor`.\r\n\r\nNote that `sort_and_partition`, `combine`, `merge_sorted_blocks`, `aggregate_combined_blocks` in `PandasBlockAccessor` redirects to arrow block format implementation for now. They'll be implemented in a later PR.\r\n\r\nCo-authored-by: Clark Zinzow \r\nCo-authored-by: Eric Liang ", "code": "def test_from_pandas_refs(ray_start_regular_shared, enable_pandas_block):\n ctx = ray.data.context.DatasetContext.get_current()\n old_enable_pandas_block = ctx.enable_pandas_block\n ctx.enable_pandas_block = enable_pandas_block\n try:\n df1 = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [\"a\", \"b\", \"c\"]})\n df2 = pd.DataFrame({\"one\": [4, 5, 6], \"two\": [\"e\", \"f\", \"g\"]})\n ds = ray.data.from_pandas_refs([ray.put(df1), ray.put(df2)])\n assert ds._dataset_format(\n ) == \"pandas\" if enable_pandas_block else \"arrow\"\n values = [(r[\"one\"], r[\"two\"]) for r in ds.take(6)]\n rows = [(r.one, r.two) for _, r in pd.concat([df1, df2]).iterrows()]\n assert values == rows\n\n # test from single pandas dataframe ref\n ds = ray.data.from_pandas_refs(ray.put(df1))\n assert ds._dataset_format(\n ) == \"pandas\" if enable_pandas_block else \"arrow\"\n values = [(r[\"one\"], r[\"two\"]) for r in ds.take(3)]\n rows = [(r.one, r.two) for _, r in df1.iterrows()]\n assert values == rows\n finally:\n ctx.enable_pandas_block = old_enable_pandas_block\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 238, "n_words": 112, "vocab_size": 61, "complexity": 8, "nloc": 21, "token_counts": 269, "n_ast_nodes": 435, "n_identifiers": 27, "random_cut": "def test_from_pandas_refs(ray_start_regular_shared, enable_pandas_block):\n ctx = ray.data.context.DatasetContext.get_current()\n old_enable_pandas_block = ctx.enable_pandas_block\n ctx.enable_pandas_block = enable_pandas_block\n try:\n df1 = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [\"a\", \"b\", \"c\"]})\n df2 = pd.DataFrame({\"one\": [4, 5, 6], \"two\": [\"e\", \"f\", \"g\"]})\n ds = ray.data.from_pandas_refs([ray.put(df1), ray.put(df2)])\n assert ds._dataset_format(\n ) == \"pandas\" if enable_pandas_block else \"arrow\"\n values = [(r[\"one\"], r[\"two\"]) for r in ds.take(6)]\n rows = [(r.one, r.two) for _, r in pd.concat([df1, df" }, { "id": 301326, "commit_id": "0c2f22d4780612545c483627da729e44d46ee9fd", "repo": "core", "path": "homeassistant/components/zha/switch.py", "file_name": "switch.py", "fun_name": "async_turn_on", "commit_message": "Add configurable zha switch entity (#71784)\n\n* add configurable zha switch entity\r\n\r\n* final zha configurable switch\r\n\r\n* fix codecov\r\n\r\n* replaced errorneous cluster with local quirk\r\n\r\n* test fix\r\n\r\n* minor changes", "code": "async def async_turn_on(self, **kwargs) -> None:\n \n await self.async_turn_on_off(True)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 33, "n_identifiers": 4, "random_cut": "async def async_turn_on(self, **kwargs) -> None:\n \n await self.async_turn_on_off(True)\n" }, { "id": 291300, "commit_id": "cd2377bc054ebe4c5c0432aac525d768dcfbe57a", "repo": "core", "path": "homeassistant/components/fibaro/climate.py", "file_name": "climate.py", "fun_name": "hvac_action", "commit_message": "Support hvacsystem in fibaro integration (#78234)\n\nfixes undefined", "code": "def hvac_action(self) -> HVACAction | None:\n \n if not self._op_mode_device:\n return None\n\n prop = self._op_mode_device.fibaro_device.properties\n if \"thermostatOperatingState\" in prop:\n with suppress(ValueError):\n return HVACAction(prop.thermostatOperatingState.lower())\n\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 96, "n_words": 24, "vocab_size": 20, "complexity": 3, "nloc": 9, "token_counts": 51, "n_ast_nodes": 89, "n_identifiers": 11, "random_cut": "def hvac_action(self) -> HVACAction | None:\n \n if not self._op_mode_device:\n return None\n\n prop = self._op_mode_device.fibaro_device.properties\n if \"thermostatOperatingState\" in prop:\n with suppress(ValueError):\n" }, { "id": 59860, "commit_id": "1a6dee5e9eb71e6e6d1d3492002e9cd674ab9f9b", "repo": "prefect", "path": "tests/cli/test_cloud.py", "file_name": "test_cloud.py", "fun_name": "test_login_with_invalid_key", "commit_message": "Add login with a browser to `prefect cloud login` (#7334)", "code": "def test_login_with_invalid_key(key, expected_output, respx_mock):\n respx_mock.get(PREFECT_CLOUD_API_URL.value() + \"/me/workspaces\").mock(\n return_value=httpx.Response(status.HTTP_403_FORBIDDEN)\n )\n invoke_and_assert(\n [\"cloud\", \"login\", \"--key\", key, \"--workspace\", \"foo\"],\n expected_code=1,\n expected_output=expected_output,\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 58, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 60, "n_ast_nodes": 98, "n_identifiers": 15, "random_cut": "def test_login_with_invalid_key(key, expected_output, respx_mock):\n respx_mock.get(PREFECT_CLOUD_API_URL.value() + \"/me/workspaces\").mock(\n return_value=httpx.Response(status.HTTP_403_FORBIDDEN)\n )\n invoke_and_assert(\n [\"cloud\", \"login\", \"--key\", key, \"--workspace\", \"foo\"],\n expected_code=1,\n expe" }, { "id": 190254, "commit_id": "4fc3616712edb19179b17dd270ad6cf63abf99c2", "repo": "DeOldify", "path": "fastai/data_block.py", "file_name": "data_block.py", "fun_name": "_decode", "commit_message": "Upgrading to support latest Pytorch version", "code": "def _decode(df):\n return np.array([[df.columns[i] for i,t in enumerate(x) if t==1] for x in df.values], dtype=np.object)\n", "url": "https://github.com/jantic/DeOldify.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 17, "n_words": 15, "vocab_size": 13, "complexity": 4, "nloc": 2, "token_counts": 46, "n_ast_nodes": 68, "n_identifiers": 12, "random_cut": "def _decode(df):\n return np.array([[df.columns[i] for i,t in enumerate(x) if t==1] for x i" }, { "id": 269587, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "normalize_batch_in_training", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):\n \n if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:\n if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:\n return _broadcast_normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=epsilon\n )\n return _fused_normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=epsilon\n )\n else:\n if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:\n return _regular_normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=epsilon\n )\n else:\n return _broadcast_normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=epsilon\n )\n\n\n@keras_export(\"keras.backend.batch_normalization\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.batch_normalization\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 232, "n_words": 69, "vocab_size": 35, "complexity": 6, "nloc": 18, "token_counts": 154, "n_ast_nodes": 245, "n_identifiers": 21, "random_cut": "def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):\n \n if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:\n if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:\n return _broadcast_normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=epsilon\n )\n return _fused_" }, { "id": 252592, "commit_id": "f4dc2f2cfdb40e04022e4deb4aa67578deff5d23", "repo": "mitmproxy", "path": "mitmproxy/tools/console/grideditor/base.py", "file_name": "base.py", "fun_name": "set_current_value", "commit_message": "Replace blinker with custom implementation (#5528)\n\n* replace blinker with custom implementation\r\n\r\nThe major benefit here is type checking, followed by proper support for async receivers.\r\n\r\n* fix compatibility with Python 3.9\r\n\r\n* fix nits\r\n\r\n* try harder to force gc\r\n\r\n* try harderer\r\n\r\n* coverage++\r\n\r\n* coverage++\r\n\r\n* nits", "code": "def set_current_value(self, val) -> None:\n errors = self.lst[self.focus][1]\n emsg = self.editor.is_error(self.focus_col, val)\n if emsg:\n signals.status_message.send(message=emsg, expire=5)\n errors.add(self.focus_col)\n else:\n errors.discard(self.focus_col)\n self.set_value(val, self.focus, self.focus_col, errors)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 90, "n_words": 23, "vocab_size": 21, "complexity": 2, "nloc": 9, "token_counts": 87, "n_ast_nodes": 132, "n_identifiers": 18, "random_cut": "def set_current_value(self, val) -> None:\n errors = self.lst[self.focus][1]\n emsg = self.editor.is_error(self.focus_col, val)\n if emsg:\n signals.status_message.send(message=emsg, expire=5)\n " }, { "id": 221244, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/calendar.py", "file_name": "calendar.py", "fun_name": "formatmonthname", "commit_message": "add python 3.10.4 for windows", "code": "def formatmonthname(self, theyear, themonth, withyear=True):\n \n if withyear:\n s = '%s %s' % (month_name[themonth], theyear)\n else:\n s = '%s' % month_name[themonth]\n return '%s' % (\n self.cssclass_month_head, s)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 89, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 7, "token_counts": 49, "n_ast_nodes": 80, "n_identifiers": 8, "random_cut": "def formatmonthname(self, theyear, themonth, withyear=True):\n \n if withyear:\n s = '%s %s' % (month_name[themonth], theyear)\n else:\n s = '%s' % month_name[themonth]\n return '%s' % (\n self.cssclass_month_head, s)\n" }, { "id": 52122, "commit_id": "ca09b195daa8033a6f85bccf27362d0b114f9706", "repo": "PaddleHub", "path": "modules/image/Image_editing/colorization/deoldify/test.py", "file_name": "test.py", "fun_name": "test_predict1", "commit_message": "update deoldify (#1992)\n\n* update deoldify\r\n\r\n* add clean func\r\n\r\n* update README\r\n\r\n* update format", "code": "def test_predict1(self):\n pred_img, out_path = self.module.predict(input='tests/test.jpg')\n self.assertIsInstance(pred_img, np.ndarray)\n self.assertIsInstance(out_path, str)\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 30, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 60, "n_identifiers": 11, "random_cut": "def test_predict1(self):\n pr" }, { "id": 318150, "commit_id": "8181da70901c6b848ebc2efb2d39a7a3536599f3", "repo": "core", "path": "homeassistant/components/axis/device.py", "file_name": "device.py", "fun_name": "async_reset", "commit_message": "Improve type hints in axis (#75910)", "code": "async def async_reset(self) -> bool:\n \n self.disconnect_from_stream()\n\n return await self.hass.config_entries.async_unload_platforms(\n self.config_entry, PLATFORMS\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 29, "n_ast_nodes": 50, "n_identifiers": 9, "random_cut": "async def async_reset(self) -> bool:\n \n self.disconnect_from_stream()\n\n return await self.hass.config_entries.async_unload_platforms(\n self.config_entry, PLATFORMS\n " }, { "id": 104939, "commit_id": "d6ae1ea3f93a48d03eab78eecf7b6599144143e1", "repo": "datasets", "path": "tests/test_builder.py", "file_name": "test_builder.py", "fun_name": "test_cache_dir_for_features", "commit_message": "Set builder name from module instead of class (#4388)\n\n* Set builder name from module instead of class\r\n\r\n* Fix tests\r\n\r\n* Rename dummy_builder to builder in tests", "code": "def test_cache_dir_for_features(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n f1 = Features({\"id\": Value(\"int8\")})\n f2 = Features({\"id\": Value(\"int32\")})\n builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name=\"dummy\", features=f1)\n other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name=\"dummy\", features=f1)\n self.assertEqual(builder.cache_dir, other_builder.cache_dir)\n other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name=\"dummy\", features=f2)\n self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 116, "n_words": 33, "vocab_size": 21, "complexity": 1, "nloc": 9, "token_counts": 112, "n_ast_nodes": 188, "n_identifiers": 17, "random_cut": "def test_cache_dir_for_features(self):\n with tempfile.TemporaryDirectory() as tmp_dir:\n f1 = Features({\"id\": Value(\"int8\")})\n f2 = Features({\"id\": Value(\"int32\")})\n builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name=\"dummy\", features=f1)\n other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name=\"dummy\", features=f1)\n self.assertEqual(builder.cache_dir, other_builder.cache_dir)\n other_builder = DummyGeneratorBasedBuilderWithIntegers(cache_dir=tmp_dir, name=\"dummy\", features=f2)\n self.assertNotEqual(builder.cache_dir, other_builder.cache_dir)\n" }, { "id": 99664, "commit_id": "1b1e1ed83fa3ee7da1009b927efbd7af94609301", "repo": "sentry", "path": "src/sentry/snuba/metrics/query_builder.py", "file_name": "query_builder.py", "fun_name": "_parse_limit", "commit_message": "ref(metrics): Honor snuba group limits without orderBy [TET-5] (#34287)\n\n* ref(metrics): Honor snuba group limits without orderBy\r\n\r\nThis PR fixes the else branch to apply similar session V2 limits without explicit orderBy. Essentially how we achieve this now is through the following logic:\r\nLet's say fields across the three different entities are requested with a limit of 3, groupBy project and no orderBy clause\r\n\r\n- If the results of query to entity 1, hits the limit then we use the project groups as filters for subsequent queries\r\n- If the results of query to entity 1 do not hit the limit, but results of query 2 does, then we nuke the groups from query 1 that do not exist in query 2 results and apply those as a filter to query 3\r\n- If the results of all three queries to all three entities don't hit the limit, then at the very end, we might end up with an extra number of groups greater than the limit, which is why we nuke the excess groups", "code": "def _parse_limit(self, paginator_kwargs) -> Optional[Limit]:\n if \"limit\" not in paginator_kwargs:\n return\n return Limit(paginator_kwargs[\"limit\"])\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 5, "random_cut": "def _parse_limit(self, paginator_kwargs) -> Optional[Limit]:\n if \"limit\" not in paginator_kwargs:\n return\n return Limit(paginator_kwargs[\"limit\"])\n" }, { "id": 181237, "commit_id": "e6336d688259494205ff4616ff2c03d5460b36bc", "repo": "gradio", "path": "test/test_examples.py", "file_name": "test_examples.py", "fun_name": "test_caching_with_dict", "commit_message": "Fix bug with gr.update and interactive=True (#2639)\n\n* Fix update interactivity\r\n\r\n* Lint\r\n\r\n* CHANGELOG\r\n\r\n* Fix\r\n\r\n* Undo interactive=True\r\n\r\n* Do not call update twice\r\n\r\n* Add unit test\r\n\r\n* Revert change\r\n\r\n* Lint", "code": "async def test_caching_with_dict(self):\n text = gr.Textbox()\n out = gr.Label()\n\n io = gr.Interface(\n lambda _: {text: gr.update(lines=4, interactive=False), out: \"lion\"},\n \"textbox\",\n [text, out],\n examples=[\"abc\"],\n cache_examples=True,\n )\n prediction = await io.examples_handler.load_from_cache(0)\n assert not any(d[\"trigger\"] == \"fake_event\" for d in io.config[\"dependencies\"])\n assert prediction == [\n {\"lines\": 4, \"__type__\": \"update\", \"mode\": \"static\"},\n {\"label\": \"lion\"},\n ]\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 183, "n_words": 51, "vocab_size": 44, "complexity": 2, "nloc": 16, "token_counts": 124, "n_ast_nodes": 209, "n_identifiers": 21, "random_cut": "async def test_caching_with_dict(self):\n text = gr.Textbox()\n out = gr.Label()\n\n io = gr.Interface(\n lambda _: {text: gr.update(lines=4, interactive=False), out: \"lion\"},\n \"textbox\",\n [text, out],\n examples=[\"abc\"],\n cache_examples=True,\n )\n prediction = await io.examples_handler.load" }, { "id": 292531, "commit_id": "f30681dae7efffd8980b3ee3ae7f355c603b842c", "repo": "core", "path": "homeassistant/components/sonarr/sensor.py", "file_name": "sensor.py", "fun_name": "native_value", "commit_message": "Use aiopyarr for sonarr (#65349)", "code": "def native_value(self) -> StateType:\n \n key = self.entity_description.key\n\n if key == \"diskspace\" and self.data.get(key) is not None:\n total_free = sum(disk.freeSpace for disk in self.data[key])\n free = total_free / 1024**3\n return f\"{free:.2f}\"\n\n if key == \"commands\" and self.data.get(key) is not None:\n return len(self.data[key])\n\n if key == \"queue\" and self.data.get(key) is not None:\n return self.data[key].totalRecords\n\n if key == \"series\" and self.data.get(key) is not None:\n return len(self.data[key])\n\n if key == \"upcoming\" and self.data.get(key) is not None:\n return len(self.data[key])\n\n if key == \"wanted\" and self.data.get(key) is not None:\n return self.data[key].totalRecords\n\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 238, "n_words": 87, "vocab_size": 34, "complexity": 14, "nloc": 18, "token_counts": 194, "n_ast_nodes": 316, "n_identifiers": 14, "random_cut": "def native_value(self) -> StateType:\n \n key = self.entity_description.key\n\n if key == \"diskspace\" and self.data.get(key) is not None:\n total_free = sum(disk.freeSpace for disk in self.data[key])\n free = total_free / 1024**3\n return f\"{free:.2f}\"\n\n if key == \"commands\" and self.data.get(key) is not None:\n return len(self.data[key])\n\n if key == \"queue\" and self.data.get(key) is not None:\n return self.data[key].totalRecords\n\n if key == \"series\" and self.data.get(key) is not No" }, { "id": 46141, "commit_id": "87c1246b79769f20214a339aadc6a8270d453953", "repo": "airflow", "path": "tests/providers/google/cloud/operators/test_dataplex.py", "file_name": "test_dataplex.py", "fun_name": "test_execute", "commit_message": "Add Dataplex operators (#20377)", "code": "def test_execute(self, hook_mock):\n op = DataplexDeleteTaskOperator(\n project_id=PROJECT_ID,\n region=REGION,\n lake_id=LAKE_ID,\n dataplex_task_id=DATAPLEX_TASK_ID,\n task_id=\"delete_dataplex_task\",\n api_version=API_VERSION,\n gcp_conn_id=GCP_CONN_ID,\n delegate_to=DELEGATE_TO,\n impersonation_chain=IMPERSONATION_CHAIN,\n )\n op.execute(context=None)\n hook_mock.assert_called_once_with(\n gcp_conn_id=GCP_CONN_ID,\n delegate_to=DELEGATE_TO,\n api_version=API_VERSION,\n impersonation_chain=IMPERSONATION_CHAIN,\n )\n hook_mock.return_value.delete_task.assert_called_once_with(\n project_id=PROJECT_ID,\n region=REGION,\n lake_id=LAKE_ID,\n dataplex_task_id=DATAPLEX_TASK_ID,\n retry=None,\n timeout=None,\n metadata=(),\n )\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 300, "n_words": 32, "vocab_size": 22, "complexity": 1, "nloc": 28, "token_counts": 115, "n_ast_nodes": 161, "n_identifiers": 30, "random_cut": "def test_execute(self, hook_mock):\n op = DataplexDeleteTaskOperator(\n project_id=PROJECT_ID,\n region=REGION,\n lake_id=LAKE_ID,\n dataplex_task_id=DATAPLEX_TASK_ID,\n task_id=\"delete_dataplex_task\",\n api_version=API_VERSION,\n gcp_conn_id=GCP_CONN_ID,\n delegate_to=DELEGATE_TO,\n impersonation_chain=IMPERSONATION_CHAIN,\n )\n op.execute(context=None)\n hook_mock.assert_called_once_with(\n gcp_conn_id=GCP_CONN_ID,\n delegate_to=DELEGATE_TO,\n ap" }, { "id": 281863, "commit_id": "379cf31cfe7473c6b5747861bb2ec2dbb9974b5d", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/stocks/insider/test_openinsider_view.py", "file_name": "test_openinsider_view.py", "fun_name": "test_print_insider_filter_no_table", "commit_message": "Tests : Stocks (#1240)\n\n* Updating tests : stocks/sector_industry_analysis\r\n\r\n* Updating tests : stocks/prediction_techniques\r\n\r\n* Updating tests : doc\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : stocks/sector_industry_analysis\r\n\r\n* Updating tests : stocks/technical_analysis\r\n\r\n* Updating tests : etf/technical_analysis\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : stocks/quantitative_analysis\r\n\r\n* Updating tests : stocks/quantitative_analysis\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : stocks/prediction_techniques\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating tests : etf\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : fundamental_analysis\r\n\r\n* Updating tests : dark_pool_shorts/finra_model\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : stocks/dark_pook_shorts\r\n\r\n* Updating tests : stocks/discovery\r\n\r\n* Updating tests : stocks/insider\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : stocks/options/yfinance_model\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating tests : stocks/insider", "code": "def test_print_insider_filter_no_table(mocker):\n # MOCK SOUP\n mocker.patch(\n target=\"gamestonk_terminal.stocks.insider.openinsider_view.get_open_insider_link\",\n return_value=None,\n )\n\n openinsider_view.print_insider_filter(\n preset_loaded=\"whales\",\n ticker=\"\",\n limit=10,\n links=False,\n export=\"\",\n )\n\n\n@pytest.mark.default_cassette(\"test_print_insider_data\")\n@pytest.mark.vcr\n@pytest.mark.parametrize(\n \"color\",\n [True, False],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.default_cassette(\"test_print_insider_data\")\n@pytest.mark.vcr\n@pytest.mark.parametrize(\n \"color\",\n [True, False],\n)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 88, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 43, "n_ast_nodes": 121, "n_identifiers": 17, "random_cut": "def test_print_insider_filter_no_table(mocker):\n # MOCK SOUP\n mocker.patch(\n target=\"gamestonk_terminal.stocks.insider.openinsider_view.get_open_insider_link\",\n return_value=None,\n )\n\n openinsider_vi" }, { "id": 169912, "commit_id": "90b4add77859d1349530fff3c8cadeef95f36f39", "repo": "pandas", "path": "pandas/tests/scalar/timedelta/test_constructors.py", "file_name": "test_constructors.py", "fun_name": "test_overflow_on_construction", "commit_message": "REF: _reso->_creso (#49107)", "code": "def test_overflow_on_construction():\n # GH#3374\n value = Timedelta(\"1day\").value * 20169940\n msg = \"Cannot cast 1742682816000000000000 from ns to 'ns' without overflow\"\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n Timedelta(value)\n\n # xref GH#17637\n msg = \"Cannot cast 139993 from D to 'ns' without overflow\"\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n Timedelta(7 * 19999, unit=\"D\")\n\n # used to overflow before non-ns support\n td = Timedelta(timedelta(days=13 * 19999))\n assert td._creso == NpyDatetimeUnit.NPY_FR_us.value\n assert td.days == 13 * 19999\n\n\n@pytest.mark.parametrize(\n \"val, unit\",\n [\n (3508, \"M\"),\n (15251, \"W\"), # 1\n (106752, \"D\"), # change from previous:\n (2562048, \"h\"), # 0 hours\n (153722868, \"m\"), # 13 minutes\n (9223372037, \"s\"), # 44 seconds\n ],\n)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"val, unit\",\n [\n (3508, \"M\"),\n (15251, \"W\"), # 1\n (106752, \"D\"), # change from previous:\n (2562048, \"h\"), # 0 hours\n (153722868, \"m\"), # 13 minutes\n (9223372037, \"s\"), # 44 seconds\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 200, "n_words": 100, "vocab_size": 71, "complexity": 1, "nloc": 11, "token_counts": 89, "n_ast_nodes": 236, "n_identifiers": 17, "random_cut": "def test_overflow_on_construction():\n # GH#3374\n value = Timedelta(\"1day\").value * 20169940\n msg = \"Cannot cast 1742682816000000000000 from ns to 'ns' without overflow\"\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n Timedelta(value)\n\n # xref GH#17637\n msg = \"Cannot cast 139993 from D to 'ns' without overflow\"\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n Timedelta(7 * 19999, unit=\"D\")\n\n # used to overflow before non-ns support\n td = Timedelta(timedelta(days=13 * 19999))\n assert td._creso == NpyDatetimeUnit.NPY_FR_us.value\n assert td.days == 13 * 19999\n\n\n@pytest.mark.parametrize(\n \"val, unit\",\n [\n (3508, \"M\"),\n (15251, \"W\"), # 1\n (106752, \"D\"), # change from previous:\n (2562048, \"h\"), # 0 hours\n (153722868, \"m\"), # 13 minutes\n (9223372037, \"s\"), # 44 " }, { "id": 294802, "commit_id": "425b825ae990b054838fea09b86202407d14dae1", "repo": "core", "path": "homeassistant/components/motion_blinds/cover.py", "file_name": "cover.py", "fun_name": "async_set_cover_position", "commit_message": "Motion Blinds API lock (#68587)", "code": "async def async_set_cover_position(self, **kwargs):\n \n position = kwargs[ATTR_POSITION]", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 7, "token_counts": 44, "n_ast_nodes": 27, "n_identifiers": 5, "random_cut": "async def async_set_cover_position(self, **kwargs):\n \n position = kwargs[ATTR_POSITION]" }, { "id": 204804, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/base.py", "file_name": "base.py", "fun_name": "dec_thread_sharing", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def dec_thread_sharing(self):\n with self._thread_sharing_lock:\n if self._thread_sharing_count <= 0:\n raise RuntimeError(\n \"Cannot decrement the thread sharing count below zero.\"\n )\n self._thread_sharing_count -= 1\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 99, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 7, "token_counts": 27, "n_ast_nodes": 48, "n_identifiers": 5, "random_cut": "def dec_thread_sharing(self):\n with self._thread_" }, { "id": 178706, "commit_id": "053c207229292b7f011937964a69cdf271d47532", "repo": "Nuitka", "path": "nuitka/OutputDirectories.py", "file_name": "OutputDirectories.py", "fun_name": "getResultFullpath", "commit_message": "macOS: Added support for mixing --onefile and --macos-create-app-bundle\n\n* For some software, e.g. PySide2 it will actually be the only way\n to get it working.", "code": "def getResultFullpath(onefile):\n \n\n result = getResultBasepath(onefile=onefile)\n\n if Options.shallMakeModule():\n result += getSharedLibrarySuffix(preferred=True)\n else:\n output_filename = Options.getOutputFilename()\n\n if Options.isOnefileMode() and output_filename is not None:\n if onefile:\n result = output_filename\n else:\n result = os.path.join(\n getStandaloneDirectoryPath(),\n os.path.basename(output_filename),\n )\n elif output_filename is not None:\n result = output_filename\n elif getOS() == \"Windows\":\n result += \".exe\"\n elif (\n not Options.isStandaloneMode()\n or onefile\n and not Options.shallCreateAppBundle()\n ):\n result += \".bin\"\n\n return result\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 298, "n_words": 63, "vocab_size": 35, "complexity": 10, "nloc": 25, "token_counts": 123, "n_ast_nodes": 211, "n_identifiers": 19, "random_cut": "def getResultFullpath(onefile):\n \n\n result = getResultBasepath(onefile=onefile)\n\n if Options.shallMakeModule():\n result += getSharedLibrarySuffix(preferred=True)\n else:\n output_filename = Options.getOutputFilename()\n\n if Options.isOnefileMode() and output_filename is not None:\n if onefile:\n result = output_filename\n else:\n result = os.path.join(\n getStandaloneDirectoryPath(),\n os.path.basename(output_filename),\n )\n elif output_filename is not None:\n result = output_filename\n elif getOS() == \"Windows\":\n result += \".exe\"\n elif (\n n" }, { "id": 149366, "commit_id": "39d925c2950aa3c734c454535fef70d89353211e", "repo": "freqtrade", "path": "tests/exchange/test_exchange.py", "file_name": "test_exchange.py", "fun_name": "test__async_kucoin_get_candle_history", "commit_message": "Change to precise casing instead of .lower()", "code": "async def test__async_kucoin_get_candle_history(default_conf, mocker, caplog):\n caplog.set_level(logging.INFO)\n api_mock = MagicMock()\n api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection(\n \"kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?\"\n \"symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735\"\n \"429 Too Many Requests\" '{\"code\":\"429000\",\"msg\":\"Too Many Requests\"}'))\n exchange = get_patched_exchange(mocker, default_conf, api_mock, id=\"KuCoin\")\n mocker.patch('freqtrade.exchange.Exchange.name', PropertyMock(return_value='KuCoin'))\n\n msg = \"Kucoin 429 error, avoid triggering DDosProtection backoff delay\"\n assert not num_log_has_re(msg, caplog)\n\n for _ in range(3):\n with pytest.raises(DDosProtection, match=r'429 Too Many Requests'):\n await exchange._async_get_candle_history(\n \"ETH/BTC\", \"5m\", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3)\n assert num_log_has_re(msg, caplog) == 3\n\n caplog.clear()\n # Test regular non-kucoin message\n api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection(\n \"kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?\"\n \"symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735\"\n \"429 Too Many Requests\" '{\"code\":\"2222222\",\"msg\":\"Too Many Requests\"}'))\n\n msg = r'_async_get_candle_history\\(\\) returned exception: .*'\n msg2 = r'Applying DDosProtection backoff delay: .*'\n with patch('freqtrade.exchange.common.asyncio.sleep', get_mock_coro(None)):\n for _ in range(3):\n with pytest.raises(DDosProtection, match=r'429 Too Many Requests'):\n await exchange._async_get_candle_history(\n \"ETH/BTC\", \"5m\", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3)\n # Expect the \"returned exception\" message 12 times (4 retries * 3 (loop))\n assert num_log_has_re(msg, caplog) == 12\n assert num_log_has_re(msg2, caplog) == 9\n\n\n@pytest.mark.asyncio", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "@pytest.mark.asyncio", "n_ast_errors": 1, "ast_levels": 20, "n_whitespaces": 341, "n_words": 150, "vocab_size": 89, "complexity": 3, "nloc": 30, "token_counts": 243, "n_ast_nodes": 423, "n_identifiers": 37, "random_cut": "async def test__async_kucoin_get_candle_history(default_conf, mocker, caplog):\n caplog.set_level(logging.INFO)\n api_mock = MagicMock()\n api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection(\n \"kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?\"\n \"symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735\"\n \"429 Too Many Requests\" '{\"code\":\"429000\",\"msg\":\"Too Many Requests\"}'))\n exchange = get_patched_exchange(mocker, default_conf, api_mock, id=\"KuCoin\")\n mocker.patch('freqtrade.exchange.Exchange.name', PropertyMock(return_value='KuCoin'))\n\n msg = \"Kucoin 429 error, avoid triggering DDosProtection backoff delay\"\n assert not num_log_has_re(msg, caplog)\n\n for _ in range(3):\n with pytest.raises(DDosProtection, match=r'429 Too Many Requests'):\n await exchange._async_get_candle_history(\n \"ETH/BTC\", \"5m\", (arrow.utcnow().int_timestamp - 2000) * 1000, count=3)\n assert num_log_has_re(msg, caplog) == 3\n\n caplog.clear()\n # Test regular non-kucoin message\n api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection(\n \"kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?\"\n \"symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735\"\n \"429 Too Many Requests\" '{\"code\":\"2222222\",\"msg\":\"Too Many Requests\"}'))\n\n msg = r'_async_get_candle_history\\(\\) returned exception: .*'\n msg2 = r'Applying DDosProtection backoff delay: .*'\n with patch('freqtrade.exchange.common.asyncio.sleep', get_mock_c" }, { "id": 300807, "commit_id": "69cc6ab5f1d58adc586c3b300a4f7f0cde2cd0c2", "repo": "core", "path": "homeassistant/components/risco/sensor.py", "file_name": "sensor.py", "fun_name": "async_added_to_hass", "commit_message": "Clean up accessing entity_registry.async_get_registry helper via hass (#72005)", "code": "async def async_added_to_hass(self):\n \n self._entity_registry = er.async_get(self.hass)\n self.async_on_remove(\n self.coordinator.async_add_listener(self._refresh_from_coordinator)\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 5, "token_counts": 33, "n_ast_nodes": 57, "n_identifiers": 10, "random_cut": "async def async_added_to_hass(self):\n \n self._entity_registry = er.async_get(self.hass)\n self.async_on_remo" }, { "id": 305438, "commit_id": "d1ecd74a1a153b85b829acf45b5c6a5ea79df5c1", "repo": "core", "path": "homeassistant/components/london_air/sensor.py", "file_name": "sensor.py", "fun_name": "update", "commit_message": "Improve entity type hints [l] (#77655)", "code": "def update(self) -> None:\n \n sites_status = []\n self._api_data.update()\n if self._api_data.data:\n self._site_data = self._api_data.data[self._name]\n self._updated = self._site_data[0][\"updated\"]\n for site in self._site_data:\n if site[\"pollutants_status\"] != \"no_species_data\":\n sites_status.append(site[\"pollutants_status\"])\n\n if sites_status:\n self._state = max(set(sites_status), key=sites_status.count)\n else:\n self._state = None\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 166, "n_words": 35, "vocab_size": 28, "complexity": 5, "nloc": 14, "token_counts": 104, "n_ast_nodes": 173, "n_identifiers": 15, "random_cut": "def update(self) -> None:\n \n sites_status = []\n self._api_data.update()\n if self._api_data.data:\n self._site_data = self._api_data.d" }, { "id": 220187, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ast.py", "file_name": "ast.py", "fun_name": "set_precedence", "commit_message": "add python 3.10.4 for windows", "code": "def set_precedence(self, precedence, *nodes):\n for node in nodes:\n self._precedences[node] = precedence\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 23, "n_ast_nodes": 34, "n_identifiers": 6, "random_cut": "def set_precedence(self, precedence, *nodes):\n for node in nodes:\n self._precedences[nod" }, { "id": 135780, "commit_id": "2ed09c54459cc3f74e2dab13406018698559856c", "repo": "ray", "path": "rllib/algorithms/apex_dqn/apex_dqn.py", "file_name": "apex_dqn.py", "fun_name": "update_target_networks", "commit_message": "[RLlib] Move all config validation logic into AlgorithmConfig classes. (#29854)", "code": "def update_target_networks(self, num_new_trained_samples) -> None:\n \n self._num_ts_trained_since_last_target_update += num_new_trained_samples\n if (\n self._num_ts_trained_since_last_target_update\n >= self.config[\"target_network_update_freq\"]\n ):\n self._num_ts_trained_since_last_target_update = 0\n with self._timers[TARGET_NET_UPDATE_TIMER]:\n to_update = self.workers.local_worker().get_policies_to_train()\n self.workers.local_worker().foreach_policy_to_train(\n lambda p, pid: pid in to_update and p.update_target()\n )\n self._counters[NUM_TARGET_UPDATES] += 1\n self._counters[LAST_TARGET_UPDATE_TS] = self._counters[\n NUM_AGENT_STEPS_TRAINED\n if self.config.count_steps_by == \"agent_steps\"\n else NUM_ENV_STEPS_TRAINED\n ]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 260, "n_words": 46, "vocab_size": 39, "complexity": 4, "nloc": 19, "token_counts": 111, "n_ast_nodes": 180, "n_identifiers": 21, "random_cut": "def update_target_networks(self, num_new_trained_samples) -> None:\n \n self._num_ts_trained_since_last_target_update += num_new_trained_samples\n if (\n self._num_ts_trained_since_last_target_update\n >= self.config[\"target_network_update_freq\"]\n ):\n self._num_ts_trained_since_last_target_update = 0\n with self._timers[TARGET_NET_UPDATE_TIMER]:\n to_update = self.workers.local_worker().get_policies_to_train()\n self.workers.local_worker().foreach_policy_to_train(\n lambda p, pid: pid in to_update and p.update_target()\n )\n self._counters[NUM_TARGET_UPDATES] += 1\n self._counters[LAST_TARGET_UPDATE_TS] = self._counters[\n " }, { "id": 26106, "commit_id": "bf654a5f958fcf0611b61cf43ac13c886761b80a", "repo": "saleor", "path": "saleor/payment/gateways/np_atobarai/tests/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_get_fulfillment_for_order_no_refundable_fulfillment", "commit_message": "Port NP Atobarai gateway to 3.1 (#8684)\n\n* Port net protections (#8640) to 3.1\r\n\r\n* Add NP final code review feedback onto 3.1\r\n\r\n* Fix optional sku in NP payload & add docstrings\r\n\r\n* Refactor tracking_number_updated\r\n\r\n* Change NetProtections timeout value to 20\r\n\r\n* Do not use f-strings in logger warnings\r\n\r\n* Trace only http requests\r\n\r\n* Simplify code\r\n\r\n* Add comment about longer than usual timeout period\r\n\r\n* Remove order from process payment\r\n\r\n* Add comment for 400 status code\r\n\r\n* Reduce scope of Posuto context manager\r\n\r\n* Refactor voucher and shipping amount for payment lines data\r\n\r\n* Update PaymentResult.psp_reference type to Optional[str]\r\n\r\n* Add handler for report error in transaction reregistration\r\n\r\n* Add docstrings to goods functions\r\n\r\n* Add FOR_REREGISTRATION payment status\r\n\r\n* Refactor create_refund_data\r\n\r\n* Fix refund data\r\n\r\n* Add docstrings to goods functions\r\n\r\n* Add prefetch to _create_refund_manual_amount\r\n\r\n* Move refund logic to NP\r\n\r\n* Fix billing amount for partial refunds\r\n\r\n* Fix multiple shipping refunds\r\n\r\n* Set currency to JPY\r\n\r\n* WIP fix refunds\r\n\r\n* Clean up code\r\n\r\n* Refactor\r\n\r\n* Fix get_goods_with_refunds for all returned products\r\n\r\nCo-authored-by: Mateusz Grzyb ", "code": "def test_get_fulfillment_for_order_no_refundable_fulfillment(order):\n # given\n order.fulfillments.create(tracking_number=\"123\", status=FulfillmentStatus.REFUNDED)\n\n # then\n with pytest.raises(PaymentError, match=r\".* not exist .*\"):\n\n # when\n get_fulfillment_for_order(order)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 42, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 38, "n_ast_nodes": 67, "n_identifiers": 13, "random_cut": "def test_get_fulfillment_for_order_no_refundable_fulfillment(order):\n # given\n order.fulfillments.create(tracking_number=\"123\", status=Fulfillmen" }, { "id": 60526, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py", "file_name": "parser.py", "fun_name": "format_heading", "commit_message": "upd; format", "code": "def format_heading(self, heading):\n # type: (str) -> str\n if heading == \"Options\":\n return \"\"\n return heading + \":\\n\"\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 4, "token_counts": 18, "n_ast_nodes": 36, "n_identifiers": 3, "random_cut": "def format_heading(self, heading):\n # type: (str) -> str\n if heading == \"Options\":\n " }, { "id": 200799, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_widgets/tests.py", "file_name": "tests.py", "fun_name": "test_m2m_related_model_not_in_admin", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_m2m_related_model_not_in_admin(self):\n # M2M relationship with model not registered with admin site. Raw ID\n # widget should have no magnifying glass link. See #16542\n consultor1 = Advisor.objects.create(name=\"Rockstar Techie\")\n\n c1 = Company.objects.create(name=\"Doodle\")\n c2 = Company.objects.create(name=\"Pear\")\n consultor1.companies.add(c1, c2)\n rel = Advisor._meta.get_field(\"companies\").remote_field\n\n w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)\n self.assertHTMLEqual(\n w.render(\"company_widget1\", [c1.pk, c2.pk], attrs={}),\n ''\n % {\"c1pk\": c1.pk, \"c2pk\": c2.pk},\n )\n\n self.assertHTMLEqual(\n w.render(\"company_widget2\", [c1.pk]),\n ''\n % {\"c1pk\": c1.pk},\n )\n\n\n@override_settings(ROOT_URLCONF=\"admin_widgets.urls\")", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@override_settings(ROOT_URLCONF=\"admin_widgets.urls\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 218, "n_words": 70, "vocab_size": 58, "complexity": 1, "nloc": 17, "token_counts": 144, "n_ast_nodes": 255, "n_identifiers": 26, "random_cut": "def test_m2m_related_model_not_in_admin(self):\n # M2M relationship with model not registered with admin site. Raw ID\n # widget should have no magnifying glass link. See #16542\n consultor1 = Advisor.objects.create(name=\"Rockstar Techie\")\n\n c1 = Company.objects.create(name=\"Doodle\")\n c2 = Company.objects.create(name=\"Pear\")\n consultor1.companies.add(c1, c2)\n rel = Advisor._meta.get_field(\"companies\").remote_field\n\n w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)\n self.assertHTMLEqual(\n w.render(\"co" }, { "id": 121070, "commit_id": "76fcf63fb4e53fd82faece677ed46db8b0c71707", "repo": "jax", "path": "tests/lobpcg_test.py", "file_name": "lobpcg_test.py", "fun_name": "_make_concrete_cases", "commit_message": "Add initial LOBPCG top-k eigenvalue solver (#3112)\n\nThis initial version is f32-only for accelerators, since it relies on an eigh call (which itself is f32 at most) in its inner loop.\n\nFor details, see jax.experimental.linalg.standard_lobpcg documentation.\n\nThis is a partial implementation of the similar [scipy lobpcg\nfunction](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html).", "code": "def _make_concrete_cases(f64):\n dtype = np.float64 if f64 else np.float32\n example_names = list(_concrete_generators(dtype))\n cases = []\n for name in example_names:\n nkm = [(100, 10, 20)]\n if not flags.FLAGS.jax_skip_slow_tests:\n nkm.append((1000, 100, 200))\n for n, k, m in nkm:\n if name == 'ring laplacian':\n m *= 3\n if name.startswith('linear'):\n m *= 2\n if f64:\n m *= 2\n case = [('matrix_name', name), ('n', n), ('k', k), ('m', m)]\n clean_matrix_name = _clean_matrix_name(name)\n case.append(('testcase_name', f'{clean_matrix_name}_n{n}'))\n cases.append(dict(case))\n\n assert len({c['testcase_name'] for c in cases}) == len(cases)\n return cases\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 155, "n_words": 80, "vocab_size": 58, "complexity": 9, "nloc": 21, "token_counts": 176, "n_ast_nodes": 283, "n_identifiers": 26, "random_cut": "def _make_concrete_cases(f64):\n dtype = np.float64 if f64 else np.float32\n example_names = list(_concrete_generators(dtype))\n cases = []\n for name in example_names:\n nkm = [(100, 10, 20)]\n if not flags.FLAGS.jax_skip_slow_tests:\n nkm.append((1000, 100, 200))\n for n, k, m in nkm:\n if name == 'ring laplacian':\n m *= 3\n if name.startswith('linear'):\n m *= 2\n if f64:\n " }, { "id": 187049, "commit_id": "b2557361f734304fbd80b4985c753668fed00db5", "repo": "streamlink", "path": "src/streamlink/plugins/funimationnow.py", "file_name": "funimationnow.py", "fun_name": "login_csrf", "commit_message": "plugins.funimationnow: replace itertags", "code": "def login_csrf(self):\n return self.session.http.get(self.login_url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(f\".//input[@name='{self.CSRF_NAME}'][1]/@value\")\n ))\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 43, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 39, "n_ast_nodes": 68, "n_identifiers": 12, "random_cut": "def login_csrf(self):\n return self.session.http.get(self.login_url, schema=validate.Schema(\n validate.p" }, { "id": 176144, "commit_id": "0dada08f4eedb104bfa40932b576e44d82218547", "repo": "edgedb", "path": "tests/test_edgeql_scope.py", "file_name": "test_edgeql_scope.py", "fun_name": "test_edgeql_scope_ref_outer_02a", "commit_message": "Always include the definition context namespace in computable contexts (#3331)\n\nWe need to include the *original* source namespace in our ctx\r\nnamespace when compiling computables. The current mechanism of trying\r\nto look up in view_sets or failing that using the source namespace\r\nfrom the computable use, but this was failing to find it in some cases\r\nwith FOR.\r\n\r\nFix this by instead directly pulling in the namespace from qlctx. The\r\ninclusion of qlctx's namespace nicely allows us to ditch so later\r\nlogic as well.\r\n\r\nAdditionally we need to merge the namespace into *both* sides in\r\nget_view_map_remapping, to handle cases like referencing a `FOR`\r\nvariable where the current ns doesn't get merged in.\r\n\r\nFixes #3323.", "code": "async def test_edgeql_scope_ref_outer_02a(self):\n await self.assert_query_result(\n ,\n [{\n \"cards\": [\n {\"tag\": [\"Alice\"]},\n {\"tag\": [\"Alice\"]},\n {\"tag\": [\"Alice\"]},\n {\"tag\": [\"Alice\"]}\n ]\n }],\n )\n", "url": "https://github.com/edgedb/edgedb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 172, "n_words": 20, "vocab_size": 15, "complexity": 1, "nloc": 18, "token_counts": 53, "n_ast_nodes": 99, "n_identifiers": 3, "random_cut": "async def test_edgeql_scope_ref_outer_02a(self):\n await self.assert_query_result(\n ,\n [{\n \"cards\": [\n {\"tag\": [\"Alice\"]},\n {\"tag\": [\"Alice\"]},\n {\"tag\": [\"Alice\"]},\n {\"tag\": [\"Alice\"]}\n ]\n }],\n )\n" }, { "id": 148035, "commit_id": "06a57b20de12c840406a3bac69751c83a44f008c", "repo": "ray", "path": "python/ray/ml/preprocessor.py", "file_name": "preprocessor.py", "fun_name": "_check_is_fitted", "commit_message": "[air - preprocessor] Add BatchMapper. (#23700)\n\nAdd BatchMapper preprocessor.\r\nUpdate the semantics of preprocessor.fit() to allow for multiple fit. This is to follow scikitlearn example.\r\nIntroduce FitStatus to explicitly incorporate Chain case.", "code": "def _check_is_fitted(self) -> bool:\n \n fitted_vars = [v for v in vars(self) if v.endswith(\"_\")]\n return bool(fitted_vars)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 36, "n_words": 15, "vocab_size": 15, "complexity": 3, "nloc": 8, "token_counts": 32, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def _check_is_fitted(self) -> bool:\n \n fitted_vars = [v for v in vars(self) " }, { "id": 225428, "commit_id": "ff8552a57abf2c32f2d0344ef12707b88e008493", "repo": "mkdocs", "path": "mkdocs/tests/config/config_options_tests.py", "file_name": "config_options_tests.py", "fun_name": "test_valid_dir", "commit_message": "Add tests for new class-based configs\n\nThe old-style tests are intentionally kept at config_options_legacy_tests.py", "code": "def test_valid_dir(self) -> None:\n for cls in c.Dir, c.FilesystemObject:\n with self.subTest(cls):\n d = os.path.dirname(__file__)\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 9, "token_counts": 82, "n_ast_nodes": 59, "n_identifiers": 12, "random_cut": "def test_valid_dir(self) -> None:\n for cls in c.Dir, c.FilesystemObject:\n with self.subTest(cls):\n d = os.path.dirname(__file__)\n" }, { "id": 281802, "commit_id": "d8ca7556edde9a700706c7802a229cb4439304c5", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/etf/test_yfinance_model.py", "file_name": "test_yfinance_model.py", "fun_name": "test_get_etf_summary_description", "commit_message": "ETF tests (#1208)\n\n* etf tests for stockanalysis\r\n\r\n* add financedatabase etf tests\r\n\r\n* fix financedatabase etf documentation\r\n\r\n* yfinance etf tests\r\n\r\n* add etf/discovery tests\r\n\r\n* add tests to etf/screener\r\n\r\n* add etf controller tests\r\n\r\n* add etf/ta tests\r\n\r\n* remove tabulate and use rich table\r\n\r\n* etf/pred\r\n\r\n* add more etf tests, thanks Chavi\r\n\r\n* update about us website\r\n\r\n* Updating tests : etf\r\n\r\nCo-authored-by: Chavithra PARANA ", "code": "def test_get_etf_summary_description(recorder, name):\n result = yfinance_model.get_etf_summary_description(name)\n\n recorder.capture(result)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 12, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 7, "random_cut": "def test_get_etf_summary_description(recorder, name):\n result = yfinance_model.get" }, { "id": 38499, "commit_id": "5a9957358cebd616e58b2d1ab3b887c2f2793b45", "repo": "transformers", "path": "src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py", "file_name": "modeling_wav2vec2_conformer.py", "fun_name": "_compute_perplexity", "commit_message": "Add Wav2Vec2Conformer (#16812)\n\n* save intermediate\r\n\r\n* add wav2vec2 conformer\r\n\r\n* add more code\r\n\r\n* more\r\n\r\n* first test passes\r\n\r\n* make all checkpoints work\r\n\r\n* update\r\n\r\n* up\r\n\r\n* more clean ups\r\n\r\n* save clean-up\r\n\r\n* save clean-up\r\n\r\n* save more\r\n\r\n* remove bogus\r\n\r\n* finalize design conformer\r\n\r\n* remove vision\r\n\r\n* finish all tests\r\n\r\n* more changes\r\n\r\n* finish code\r\n\r\n* add doc tests\r\n\r\n* add slow tests\r\n\r\n* fix autoconfig test\r\n\r\n* up\r\n\r\n* correct docstring\r\n\r\n* up\r\n\r\n* update\r\n\r\n* fix\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Anton Lozhkov \r\n\r\n* Update docs/source/en/model_doc/wav2vec2-conformer.mdx\r\n\r\n* upload\r\n\r\n* save copied from\r\n\r\n* correct configs\r\n\r\n* fix model outputs\r\n\r\n* add to docs\r\n\r\n* fix imports\r\n\r\n* finish\r\n\r\n* finish code\r\n\r\n* correct copied from\r\n\r\n* correct again\r\n\r\n* correct make fix\r\n\r\n* improve make fix copies\r\n\r\n* save\r\n\r\n* correct fix copy from\r\n\r\n* correct init structure\r\n\r\n* correct\r\n\r\n* fix import\r\n\r\n* apply suggestions\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Anton Lozhkov ", "code": "def _compute_perplexity(probs, mask=None):\n if mask is not None:\n mask_extended = mask.flatten()[:, None, None].expand(probs.shape)\n probs = torch.where(mask_extended, probs, torch.zeros_like(probs))\n marginal_probs = probs.sum(dim=0) / mask.sum()\n else:\n marginal_probs = probs.mean(dim=0)\n\n perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()\n return perplexity\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 108, "n_words": 37, "vocab_size": 31, "complexity": 2, "nloc": 9, "token_counts": 117, "n_ast_nodes": 180, "n_identifiers": 17, "random_cut": "def _compute_perplexity(probs, mask=None):\n if mask is not None:\n mask_extended = mask.flatten()[:, None," }, { "id": 119207, "commit_id": "086a607d8c8ea8487a59d6ced8aaf59834b8846c", "repo": "jax", "path": "jax/experimental/maps.py", "file_name": "maps.py", "fun_name": "_ensure_supports_manual_and", "commit_message": "Add experimental support for SPMD lowering of xmap via MANUAL sharding annotations\n\nNote that it's still limited and turns out to be a bit hard (partly due to\nunclear XLA semantics at this point). Using constants that are not xmap inputs\nis likely to cause SPMD partitioner errors and cross-replica collectives don't seem\nto work either.\n\nIn any case, the next step will be to allow nesting those xmaps inside pjits.\n\nPiperOrigin-RevId: 426447989", "code": "def _ensure_supports_manual_and(f):\n def update(v):\n if v and not hasattr(xc.OpSharding.Type, \"MANUAL\"):\n raise RuntimeError(\"This flag requires a version of jaxlib that supports MANUAL sharding type\")\n return f(v)\n return update\n\ntry:\n config.define_bool_state(\n name=\"experimental_xmap_spmd_lowering\",\n default=False,\n help=(\"When set, multi-device xmap computations will be compiled through \"\n \"the XLA SPMD partitioner instead of explicit cross-replica collectives. \"\n \"Not supported on CPU!\"),\n update_global_hook=_clear_compilation_cache,\n update_thread_local_hook=_thread_local_flag_unsupported)\n config.define_bool_state(\n name=\"experimental_xmap_spmd_lowering_manual\",\n default=False,\n help=(\"When set, multi-device xmap computations will be compiled using \"\n \"the MANUAL partitioning feature of the XLA SPMD partitioner instead of \"\n \"sharding constraints on vectorized code. \"\n \"Requires experimental_xmap_spmd_lowering!\"),\n update_global_hook=_ensure_supports_manual_and(_ensure_spmd_and(_clear_compilation_cache)),\n update_thread_local_hook=_thread_local_flag_unsupported)\n config.define_bool_state(\n name=\"experimental_xmap_ensure_fixed_sharding\",\n default=False,\n help=(\"When set and `experimental_xmap_spmd_lowering` is enabled, the lowering will \"\n \"try to limit the flexibility of the automated SPMD partitioner heuristics \"\n \"by emitting additional sharding annotations for program intermediates.\"),\n update_global_hook=_ensure_spmd_and(_clear_compilation_cache),\n update_thread_local_hook=_thread_local_flag_unsupported)\nexcept Exception:\n raise ImportError(\"jax.experimental.maps has to be imported before JAX flags \"\n \"are parsed\")\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 326, "n_words": 141, "vocab_size": 95, "complexity": 1, "nloc": 3, "token_counts": 9, "n_ast_nodes": 246, "n_identifiers": 21, "random_cut": "def _ensure_supports_manual_and(f):\n def update(v):\n if v and not hasattr(xc.OpSharding.Type, \"MANUAL\"):\n raise RuntimeError(\"This flag requires a version of jaxlib that supports MANUAL sharding type\")\n return f(v)\n return update\n\ntry:\n config.define_bool_state(\n name=\"experimental_xmap_spmd_lowering\",\n default=False,\n help=(\"When set, multi-device xmap computations will be compiled through \"\n \"the XLA SPMD partitioner instead of explicit cross-replica collectives. \"\n \"Not supported on CPU!\"),\n update_global_hook=_clear_compilation_cache,\n update_thread_local_hook=_thread_local_flag_unsupported)" }, { "id": 106199, "commit_id": "2c2c2bd348b7dce0aad55a6fc37a18c6f9a000e3", "repo": "youtube-dl", "path": "test/test_youtube_lists.py", "file_name": "test_youtube_lists.py", "fun_name": "test_youtube_mix", "commit_message": "Fix test_youtube_mix", "code": "def test_youtube_mix(self):\n dl = FakeYDL()\n dl.params['format'] = 'best'\n ie = YoutubeTabIE(dl)\n result = dl.extract_info('https://www.youtube.com/watch?v=uVJ0Il5WvbE&list=PLhQjrBD2T381k8ul4WQ8SQ165XqY149WW',\n download=False, ie_key=ie.ie_key(), process=True)\n entries = (result or {}).get('entries', [{'id': 'not_found', }])\n self.assertTrue(len(entries) >= 50)\n original_video = entries[0]\n self.assertEqual(original_video['id'], 'uVJ0Il5WvbE')\n", "url": "https://github.com/ytdl-org/youtube-dl.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 120, "n_words": 33, "vocab_size": 28, "complexity": 2, "nloc": 10, "token_counts": 98, "n_ast_nodes": 167, "n_identifiers": 18, "random_cut": "def test_youtube_mix(self):\n dl = FakeYDL()\n" }, { "id": 137459, "commit_id": "64d744b4750b749cede563b04c5d32396470a236", "repo": "ray", "path": "rllib/__init__.py", "file_name": "__init__.py", "fun_name": "_register_all", "commit_message": "[RLlib] Deprecate (delete) `contrib` folder. (#30992)", "code": "def _register_all():\n from ray.rllib.algorithms.registry import ALGORITHMS, _get_algorithm_class\n\n for key, get_trainable_class_and_config in ALGORITHMS.items():\n register_trainable(key, get_trainable_class_and_config()[0])\n\n for key in [\"__fake\", \"__sigmoid_fake_data\", \"__parameter_tuning\"]:\n register_trainable(key, _get_algorithm_class(key))\n\n\n_setup_logger()\n\nusage_lib.record_library_usage(\"rllib\")\n\n__all__ = [\n \"Policy\",\n \"TFPolicy\",\n \"TorchPolicy\",\n \"RolloutWorker\",\n \"SampleBatch\",\n \"BaseEnv\",\n \"MultiAgentEnv\",\n \"VectorEnv\",\n \"ExternalEnv\",\n]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 82, "n_words": 37, "vocab_size": 34, "complexity": 3, "nloc": 6, "token_counts": 58, "n_ast_nodes": 153, "n_identifiers": 15, "random_cut": "def _register_all():\n from ray.rllib.algorithms.registry import ALGORITHMS, _get_algorithm_class\n\n for key, get_trainable_class_and_config in ALGORITHMS.items():\n register_trainable(key, get_trainable_class_and_config()[0])\n\n for key in [\"__fake\", \"__sigmoid_fake_data\", \"__parameter_tuning\"]:\n register_trainable(key, _get_algorithm_class(key))\n\n\n_setup_logger()\n\nusage_lib.record_library_usage(\"rllib\")\n\n__all__ = [\n \"Policy\",\n \"TFPolic" }, { "id": 119763, "commit_id": "6355fac8822bced4bfa657187a7284477f373c52", "repo": "jax", "path": "jax/_src/numpy/ufuncs.py", "file_name": "ufuncs.py", "fun_name": "_sinc_maclaurin", "commit_message": "lax_numpy.py: factor ufuncs into their own private submodule\n\nRe-lands part of #9724\n\nPiperOrigin-RevId: 434629548", "code": "def _sinc_maclaurin(k, x):\n # compute the kth derivative of x -> sin(x)/x evaluated at zero (since we\n # compute the monomial term in the jvp rule)\n if k % 2:\n return lax.full_like(x, 0)\n else:\n return lax.full_like(x, (-1) ** (k // 2) / (k + 1))\n\n@_sinc_maclaurin.defjvp", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_sinc_maclaurin.defjvp", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 54, "n_words": 46, "vocab_size": 39, "complexity": 2, "nloc": 5, "token_counts": 38, "n_ast_nodes": 81, "n_identifiers": 6, "random_cut": "def _sinc_maclaurin(k, x):\n # compute the kth derivative of x -> sin(x)/x evaluated at zero (since we\n # compute the monomial term in the jvp rule)\n if k % 2:\n return lax.full_like(x, 0)\n else:\n return lax.full_like(" }, { "id": 158403, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/torch.py", "file_name": "torch.py", "fun_name": "evaluate_loss", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def evaluate_loss(net, data_iter, loss):\n \n metric = d2l.Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n out = net(X)\n y = d2l.reshape(y, out.shape)\n l = loss(out, y)\n metric.add(d2l.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\n\nDATA_HUB = dict()\nDATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 81, "n_words": 42, "vocab_size": 35, "complexity": 2, "nloc": 8, "token_counts": 79, "n_ast_nodes": 139, "n_identifiers": 19, "random_cut": "def evaluate_loss(net, data_iter, loss):\n \n metric = d2l.Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n out = net(X)\n y = d2l.reshape(y, out.shape)\n l = loss(out, y)\n metric.add(d2l.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\n\nDATA_HUB = dict()\nDATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'\n" }, { "id": 197117, "commit_id": "cba899d4137b0b65f6850120ee42cd4fcd4f9dbf", "repo": "sympy", "path": "sympy/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "__iter__", "commit_message": "Update the various tensor deprecations", "code": "def __iter__(self):\n deprecate_data()\n with ignore_warnings(SymPyDeprecationWarning):\n return self.data.__iter__()\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 4, "token_counts": 22, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def __iter__(self):\n deprecate_data()\n with ignore_warnings(SymPyDeprecationWarning" }, { "id": 51961, "commit_id": "2e373966a7fd3119c205350fb14d0b7bfe74185d", "repo": "PaddleHub", "path": "modules/image/Image_editing/super_resolution/swinir_l_real_sr_x4/test.py", "file_name": "test.py", "fun_name": "test_real_sr4", "commit_message": "add swinir_l_real_sr_x4 (#2076)\n\n* git add swinir_l_real_sr_x4\r\n\r\n* fix typo\r\n\r\n* fix typo\r\n\r\nCo-authored-by: chenjian ", "code": "def test_real_sr4(self):\n self.assertRaises(Exception, self.module.real_sr, image=['tests/test.jpg'])\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def test_real_sr4(self):\n " }, { "id": 167948, "commit_id": "ad7dcef6f0dbdbb14240dd13db51f4d8892ad808", "repo": "pandas", "path": "pandas/core/groupby/groupby.py", "file_name": "groupby.py", "fun_name": "cummax", "commit_message": "BUG: numeric_only with axis=1 in DataFrame.corrwith and DataFrameGroupBy.cummin/max (#47724)\n\n* BUG: DataFrame.corrwith and DataFrameGroupBy.cummin/cummax with numeric_only=True\r\n\r\n* test improvements", "code": "def cummax(self, axis=0, numeric_only=False, **kwargs) -> NDFrameT:\n \n skipna = kwargs.get(\"skipna\", True)\n if axis != 0:\n f = lambda x: np.maximum.accumulate(x, axis)\n numeric_only_bool = self._resolve_numeric_only(\"cummax\", numeric_only, axis)\n obj = self._selected_obj\n if numeric_only_bool:\n obj = obj._get_numeric_data()\n return self._python_apply_general(f, obj, is_transform=True)\n\n return self._cython_transform(\n \"cummax\", numeric_only=numeric_only, skipna=skipna\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 160, "n_words": 44, "vocab_size": 36, "complexity": 3, "nloc": 19, "token_counts": 104, "n_ast_nodes": 163, "n_identifiers": 21, "random_cut": "def cummax(self, axis=0, numeric_only=False, **kwargs) -> NDFrameT:\n \n skipna = kwargs.get(\"skipna\", True)\n if axis != 0:\n f = lambda x: np.maximum.accumulate(x, axis)\n numeric_only_bool = self." }, { "id": 122702, "commit_id": "c4d590b1b640cc9fcfdbe91bf3fe34c47bcde917", "repo": "jax", "path": "jax/tools/colab_tpu.py", "file_name": "colab_tpu.py", "fun_name": "setup_tpu", "commit_message": "Update values for release 0.4.1\n\nPiperOrigin-RevId: 494889744", "code": "def setup_tpu(tpu_driver_version='tpu_driver_20221212'):\n \n global TPU_DRIVER_MODE\n\n if not TPU_DRIVER_MODE:\n colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0]\n url = f'http://{colab_tpu_addr}:8475/requestversion/{tpu_driver_version}'\n requests.post(url)\n TPU_DRIVER_MODE = 1\n\n # The following is required to use TPU Driver as JAX's backend.\n config.FLAGS.jax_xla_backend = \"tpu_driver\"\n config.FLAGS.jax_backend_target = \"grpc://\" + os.environ['COLAB_TPU_ADDR']\n # TODO(skyewm): Remove this after SPMD is supported for colab tpu.\n config.update('jax_array', False)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 70, "n_words": 50, "vocab_size": 43, "complexity": 2, "nloc": 10, "token_counts": 72, "n_ast_nodes": 140, "n_identifiers": 15, "random_cut": "def setup_tpu(tpu_driver_version='tpu_driver_20221212'):\n \n global TPU_DRIVER_MODE\n\n if not TPU_DRIVER_MODE:\n colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0]\n url = f'http://{colab_tpu_addr}:8475/requestversion/{tpu_driver_version}'\n requests.post(url)\n TPU_DRIVER_MODE = 1\n\n # The following is required to use TPU Driver as JAX's backend.\n config.FLAGS.jax_xla_backend = \"tpu_driver\"\n config.FLAGS.jax_backend_target = \"grpc://\" + os.environ['COLAB_TPU_ADDR']\n # TODO(skyewm): Remove this after SPMD is supported for colab tpu.\n config." }, { "id": 145653, "commit_id": "1752f17c6d6fceac3d7902d3220a756b8424b7da", "repo": "ray", "path": "dashboard/modules/job/tests/test_cli_integration.py", "file_name": "test_cli_integration.py", "fun_name": "test_list", "commit_message": "[Job submission] Add `list_jobs` API (#22679)\n\nAdds an API to the REST server, the SDK, and the CLI for listing all jobs that have been submitted, along with their information.\r\n\r\nCo-authored-by: Edward Oakes ", "code": "def test_list(self, ray_start_stop):\n _run_cmd(\"ray job submit --job-id='hello_id' -- echo hello\")\n\n runtime_env = {\"env_vars\": {\"TEST\": \"123\"}}\n _run_cmd(\n \"ray job submit --job-id='hi_id' \"\n f\"--runtime-env-json='{json.dumps(runtime_env)}' -- echo hi\"\n )\n stdout, _ = _run_cmd(\"ray job list\")\n assert \"JobInfo\" in stdout\n assert \"123\" in stdout\n assert \"hello_id\" in stdout\n assert \"hi_id\" in stdout\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 132, "n_words": 48, "vocab_size": 32, "complexity": 1, "nloc": 12, "token_counts": 52, "n_ast_nodes": 115, "n_identifiers": 9, "random_cut": "def test_list(self, ray_start_stop):\n _run_cmd(\"ray job submit --job-id='hello_id' -- echo hello\")\n\n runtime_env = {\"env_vars\": {\"TEST\": \"123\"}}\n _run_cmd(\n \"ray job submit --job-id='hi_id' \"\n f\"--runtime-env-json='{json.dumps(runtime_env)}' -- echo hi\"\n )\n stdout, _ = _run_" }, { "id": 222394, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/datetime.py", "file_name": "datetime.py", "fun_name": "isoformat", "commit_message": "add python 3.10.4 for windows", "code": "def isoformat(self, timespec='auto'):\n \n s = _format_time(self._hour, self._minute, self._second,\n self._microsecond, timespec)\n tz = self._tzstr()\n if tz:\n s += tz\n return s\n\n __str__ = isoformat\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 97, "n_words": 23, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 47, "n_ast_nodes": 80, "n_identifiers": 12, "random_cut": "def isoformat(self, timespec='auto'):\n \n s = _format_time(self._hour, self._minute, self._second,\n " }, { "id": 165932, "commit_id": "361021b56f3159afb71d690fac3a1f3b381b0da6", "repo": "pandas", "path": "pandas/tests/reshape/concat/test_index.py", "file_name": "test_index.py", "fun_name": "test_concat_with_duplicated_levels", "commit_message": "TST: add validation checks on levels keyword from pd.concat (#46654)", "code": "def test_concat_with_duplicated_levels(self):\n # keyword levels should be unique\n df1 = DataFrame({\"A\": [1]}, index=[\"x\"])\n df2 = DataFrame({\"A\": [1]}, index=[\"y\"])\n msg = r\"Level values not unique: \\['x', 'y', 'y'\\]\"\n with pytest.raises(ValueError, match=msg):\n concat([df1, df2], keys=[\"x\", \"y\"], levels=[[\"x\", \"y\", \"y\"]])\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 82, "n_words": 37, "vocab_size": 33, "complexity": 1, "nloc": 6, "token_counts": 85, "n_ast_nodes": 146, "n_identifiers": 14, "random_cut": "def test_concat_with_duplicated_levels(self):\n # keyword levels should be unique\n df1 = DataFrame({\"A\": [1]}, index=[\"x\"])\n df2 = DataFrame({\"A\": [1]}, index=[\"y\"])\n msg = r\"Level values not unique: \\['x', 'y', 'y'\\]\"\n with pytest.raises(ValueError, match=msg):\n concat([df1, df2], keys=[\"x\", \"y\"], levels=[[\"x\", \"y\", \"y\"" }, { "id": 75598, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/index.py", "file_name": "index.py", "fun_name": "class_is_indexed", "commit_message": "Reformat with black", "code": "def class_is_indexed(cls):\n return (\n issubclass(cls, Indexed)\n and issubclass(cls, models.Model)\n and not cls._meta.abstract\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 13, "vocab_size": 11, "complexity": 3, "nloc": 6, "token_counts": 30, "n_ast_nodes": 46, "n_identifiers": 8, "random_cut": "def class_is_indexed(cls):\n return (\n issubclass(cls, " }, { "id": 223903, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/encodings/bz2_codec.py", "file_name": "bz2_codec.py", "fun_name": "bz2_encode", "commit_message": "add python 3.10.4 for windows", "code": "def bz2_encode(input, errors='strict'):\n assert errors == 'strict'\n return (bz2.compress(input), len(input))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 15, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 6, "random_cut": "def bz2_encode(input, errors='strict'):\n assert errors == 's" }, { "id": 90353, "commit_id": "3a1d4f5105f9b01e70efa92af651107399e76f99", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_v2.py", "file_name": "test_organization_events_v2.py", "fun_name": "test_equation_simple", "commit_message": "fix(discover): Equation change and meta conflict tests (#34889)\n\n- This fixes this test which broke cause the meta changed in one PR, and\r\n the equation format in another", "code": "def test_equation_simple(self):\n event_data = load_data(\"transaction\", timestamp=before_now(minutes=1))\n event_data[\"breakdowns\"][\"span_ops\"][\"ops.http\"][\"value\"] = 1500\n self.store_event(data=event_data, project_id=self.project.id)\n\n query = {\n \"field\": [\"spans.http\", \"equation|spans.http / 3\"],\n \"project\": [self.project.id],\n \"query\": \"event.type:transaction\",\n }\n response = self.do_request(\n query,\n {\n \"organizations:discover-basic\": True,\n },\n )\n assert response.status_code == 200, response.content\n assert len(response.data[\"data\"]) == 1\n assert (\n response.data[\"data\"][0][\"equation|spans.http / 3\"]\n == event_data[\"breakdowns\"][\"span_ops\"][\"ops.http\"][\"value\"] / 3\n )\n assert response.data[\"meta\"][\"fields\"][\"equation|spans.http / 3\"] == \"number\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 244, "n_words": 58, "vocab_size": 42, "complexity": 1, "nloc": 22, "token_counts": 161, "n_ast_nodes": 278, "n_identifiers": 18, "random_cut": "def test_equation_simple(self):\n event_data = " }, { "id": 259568, "commit_id": "b4da3b406379b241bf5e81d0f60bbcddd424625b", "repo": "scikit-learn", "path": "sklearn/linear_model/tests/test_sgd.py", "file_name": "test_sgd.py", "fun_name": "test_sgd_random_state", "commit_message": "MNT ensure creation of dataset is deterministic in SGD (#19716)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def test_sgd_random_state(Estimator, global_random_seed):\n # Train the same model on the same data without converging and check that we\n # get reproducible results by fixing the random seed.\n if Estimator == linear_model.SGDRegressor:\n X, y = datasets.make_regression(random_state=global_random_seed)\n else:\n X, y = datasets.make_classification(random_state=global_random_seed)\n\n # Fitting twice a model with the same hyper-parameters on the same training\n # set with the same seed leads to the same results deterministically.\n\n est = Estimator(random_state=global_random_seed, max_iter=1)\n with pytest.warns(ConvergenceWarning):\n coef_same_seed_a = est.fit(X, y).coef_\n assert est.n_iter_ == 1\n\n est = Estimator(random_state=global_random_seed, max_iter=1)\n with pytest.warns(ConvergenceWarning):\n coef_same_seed_b = est.fit(X, y).coef_\n assert est.n_iter_ == 1\n\n assert_allclose(coef_same_seed_a, coef_same_seed_b)\n\n # Fitting twice a model with the same hyper-parameters on the same training\n # set but with different random seed leads to different results after one\n # epoch because of the random shuffling of the dataset.\n\n est = Estimator(random_state=global_random_seed + 1, max_iter=1)\n with pytest.warns(ConvergenceWarning):\n coef_other_seed = est.fit(X, y).coef_\n assert est.n_iter_ == 1\n\n assert np.abs(coef_same_seed_a - coef_other_seed).max() > 1.0\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 259, "n_words": 153, "vocab_size": 75, "complexity": 2, "nloc": 19, "token_counts": 179, "n_ast_nodes": 287, "n_identifiers": 26, "random_cut": "def test_sgd_random_state(Estimator, global_random_seed):\n # Train the same model on the same data without converging and check that we\n # get reproducible results by fixing the random seed.\n if Estimator == linear_model.SGDR" }, { "id": 53055, "commit_id": "08e580acf95963a2579971eb0ff4514233b5e7ea", "repo": "prefect", "path": "src/prefect/logging/loggers.py", "file_name": "loggers.py", "fun_name": "process", "commit_message": "Move logging into separate modules at 'prefect.logging'", "code": "def process(self, msg, kwargs):\n kwargs[\"extra\"] = {**self.extra, **(kwargs.get(\"extra\") or {})}\n return (msg, kwargs)\n\n\n@lru_cache()", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@lru_cache()", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 26, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 3, "token_counts": 39, "n_ast_nodes": 70, "n_identifiers": 7, "random_cut": "def process(self, msg, kwargs):\n kwargs[\"extra\"] = {**self.extra, **(kwargs.get(\"extra\") or {})}\n return (msg, kwargs)\n\n\n@lru_cache()" }, { "id": 13199, "commit_id": "cdaf7f87ececf9e13b517379ca183b17f0d7b007", "repo": "jina", "path": "tests/unit/test_yamlparser.py", "file_name": "test_yamlparser.py", "fun_name": "test_load_gateway_override_with", "commit_message": "feat: allow passing custom gateway in Flow (#5189)", "code": "def test_load_gateway_override_with():\n with Gateway.load_config(\n 'yaml/test-custom-gateway.yml',\n uses_with={'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},\n ) as gateway:\n assert gateway.__class__.__name__ == 'DummyGateway'\n assert gateway.arg1 == 'arg1'\n assert gateway.arg2 == 'arg2'\n assert gateway.arg3 == 'arg3'\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 77, "n_words": 30, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 57, "n_ast_nodes": 110, "n_identifiers": 10, "random_cut": "def test_load_gateway_override_with():\n with Gateway.load_config(\n 'yaml/test-custom-gateway.yml',\n " }, { "id": 169003, "commit_id": "54347fe684e0f7844bf407b1fb958a5269646825", "repo": "pandas", "path": "pandas/core/base.py", "file_name": "base.py", "fun_name": "__iter__", "commit_message": "TYP: Autotyping (#48191)\n\n* annotate-magics\r\n\r\n* annotate-imprecise-magics\r\n\r\n* none-return\r\n\r\n* scalar-return\r\n\r\n* pyi files\r\n\r\n* ignore vendored file\r\n\r\n* manual changes\r\n\r\n* ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments)\r\n\r\n* run autotyping in pre-commit\r\n\r\n* remove final and expand safe (and add annotate-imprecise-magics)", "code": "def __iter__(self) -> Iterator:\n \n # We are explicitly making element iterators.\n if not isinstance(self._values, np.ndarray):\n # Check type instead of dtype to catch DTA/TDA\n return iter(self._values)\n else:\n return map(self._values.item, range(self._values.size))\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 91, "n_words": 30, "vocab_size": 28, "complexity": 2, "nloc": 16, "token_counts": 48, "n_ast_nodes": 80, "n_identifiers": 12, "random_cut": "def __iter__(self) -> Iterator:\n \n # We are explicitly making element iterators.\n if not isinstance(self._values, np.ndarray):\n # Check type instead of dtype to catch DTA/TDA\n return " }, { "id": 165307, "commit_id": "6caefb19f4d7c05451fafca182c6eb39fe9901ed", "repo": "pandas", "path": "pandas/tests/window/test_base_indexer.py", "file_name": "test_base_indexer.py", "fun_name": "test_rolling_forward_window", "commit_message": "ENH: Rolling window with step size (GH-15354) (#45765)", "code": "def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs, step):\n # GH 32865\n values = np.arange(10.0)\n values[5] = 100.0\n\n indexer = FixedForwardWindowIndexer(window_size=3)\n\n match = \"Forward-looking windows can't have center=True\"\n with pytest.raises(ValueError, match=match):\n rolling = constructor(values).rolling(window=indexer, center=True)\n getattr(rolling, func)()\n\n match = \"Forward-looking windows don't support setting the closed argument\"\n with pytest.raises(ValueError, match=match):\n rolling = constructor(values).rolling(window=indexer, closed=\"right\")\n getattr(rolling, func)()\n\n rolling = constructor(values).rolling(window=indexer, min_periods=2, step=step)\n result = getattr(rolling, func)()\n\n # Check that the function output matches the explicitly provided array\n expected = constructor(expected)[::step]\n tm.assert_equal(result, expected)\n\n # Check that the rolling function output matches applying an alternative\n # function to the rolling window object\n expected2 = constructor(rolling.apply(lambda x: np_func(x, **np_kwargs)))\n tm.assert_equal(result, expected2)\n\n # Check that the function output matches applying an alternative function\n # if min_periods isn't specified\n # GH 39604: After count-min_periods deprecation, apply(lambda x: len(x))\n # is equivalent to count after setting min_periods=0\n min_periods = 0 if func == \"count\" else None\n rolling3 = constructor(values).rolling(window=indexer, min_periods=min_periods)\n result3 = getattr(rolling3, func)()\n expected3 = constructor(rolling3.apply(lambda x: np_func(x, **np_kwargs)))\n tm.assert_equal(result3, expected3)\n\n\n@pytest.mark.parametrize(\"constructor\", [Series, DataFrame])", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"constructor\", [Series, DataFrame])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 270, "n_words": 166, "vocab_size": 97, "complexity": 2, "nloc": 23, "token_counts": 262, "n_ast_nodes": 441, "n_identifiers": 36, "random_cut": "def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs, step):\n # GH 32865\n values = np.arange(10.0)\n values[5] = 100.0\n\n indexer = FixedForwardWindowIndexer(window_size=3)\n\n match = \"Forward-looking windows can't have center=True\"\n with pytest.raises(ValueError, match=match):\n rolling = constructor(values).rolling(window=indexer, center=True)\n getattr(rolling, func)()\n\n match = \"Forward-looking windows don't support setting the closed argument\"\n with pytest.raises(ValueError, match=match):\n rolling = constructor(values).rolling(window=indexer, closed=\"right\")\n getattr(rolling, func)()\n\n rolling = constructor(values).rolling(window=indexer, min_periods=2, step=step)\n result = getattr(rolling, func)()\n\n # Check that the function output matches the explicitly provided array\n expected = constructor(expected)[::step]\n tm.assert_equal(result, expected)\n\n # Check that the rolling function output matches applying an alternative\n # function to the rolling window object\n expected2 = constructor(rolling.apply(lambda x: np_func(x, **np_kwargs)))\n tm.assert_equal(result, expected2)\n\n # Check that the function output matches applying an alternative function\n # if min_periods isn't specified\n # GH 39604: After count-min_periods deprecation, apply(lambda x: len(x))\n # is" }, { "id": 17706, "commit_id": "50ff6d21431b2f87bc0d7a7c671c34b52d01ef99", "repo": "ccxt", "path": "python/ccxt/async_support/okx.py", "file_name": "okx.py", "fun_name": "set_sandbox_mode", "commit_message": "1.72.35\n\n[ci skip]", "code": "def set_sandbox_mode(self, enable):\n super(okx, self).set_sandbox_mode(enable)\n if enable:\n self.headers['x-simulated-trading'] = '1'\n elif 'x-simulated-trading' in self.headers:\n self.headers = self.omit(self.headers, 'x-simulated-trading')\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 18, "vocab_size": 17, "complexity": 3, "nloc": 6, "token_counts": 50, "n_ast_nodes": 85, "n_identifiers": 7, "random_cut": "def set_sandbox_mode(self, enable):\n super(okx, self).set_sandbox_mode(enable)\n if enable:\n self.headers['x-simulated-trading'] = '1'\n elif 'x-simulated-trading' in self.headers:\n self.heade" }, { "id": 266214, "commit_id": "bd29d1581461f1b97cf0bcdaa10752d89e3ac0ae", "repo": "netbox", "path": "netbox/dcim/models/cables.py", "file_name": "cables.py", "fun_name": "from_origin", "commit_message": "Fixes #10579: Mark cable traces terminating to a provider network as complete", "code": "def from_origin(cls, terminations):\n \n from circuits.models import CircuitTermination\n\n if not terminations:\n return None\n\n # Ensure all originating terminations are attached to the same link\n if len(terminations) > 1:\n assert all(t.link == terminations[0].link for t in terminations[1:])\n\n path = []\n position_stack = []\n is_complete = False\n is_active = True\n is_split = False\n\n while terminations:\n\n # Terminations must all be of the same type\n assert all(isinstance(t, type(terminations[0])) for t in terminations[1:])\n\n # Check for a split path (e.g. rear port fanning out to multiple front ports with\n # different cables attached)\n if len(set(t.link for t in terminations)) > 1:\n is_split = True\n break\n\n # Step 1: Record the near-end termination object(s)\n path.append([\n object_to_path_node(t) for t in terminations\n ])\n\n # Step 2: Determine the attached link (Cable or WirelessLink), if any\n link = terminations[0].link\n if link is None and len(path) == 1:\n # If this is the start of the path and no link exists, return None\n return None\n elif link is None:\n # Otherwise, halt the trace if no link exists\n break\n assert type(link) in (Cable, WirelessLink)\n\n # Step 3: Record the link and update path status if not \"connected\"\n path.append([object_to_path_node(link)])\n if hasattr(link, 'status') and link.status != LinkStatusChoices.STATUS_CONNECTED:\n is_active = False\n\n # Step 4: Determine the far-end terminations\n if isinstance(link, Cable):\n termination_type = ContentType.objects.get_for_model(terminations[0])\n local_cable_terminations = CableTermination.objects.filter(\n termination_type=termination_type,\n termination_id__in=[t.pk for t in terminations]\n )\n # Terminations must all belong to same end of Cable\n local_cable_end = local_cable_terminations[0].cable_end\n assert all(ct.cable_end == local_cable_end for ct in local_cable_terminations[1:])\n remote_cable_terminations = CableTermination.objects.filter(\n cable=link,\n cable_end='A' if local_cable_end == 'B' else 'B'\n )\n remote_terminations = [ct.termination for ct in remote_cable_terminations]\n else:\n # WirelessLink\n remote_terminations = [link.interface_b] if link.interface_a is terminations[0] else [link.interface_a]\n\n # Step 5: Record the far-end termination object(s)\n path.append([\n object_to_path_node(t) for t in remote_terminations\n ])\n\n # Step 6: Determine the \"next hop\" terminations, if applicable\n if not remote_terminations:\n break\n\n if isinstance(remote_terminations[0], FrontPort):\n # Follow FrontPorts to their corresponding RearPorts\n rear_ports = RearPort.objects.filter(\n pk__in=[t.rear_port_id for t in remote_terminations]\n )\n if len(rear_ports) > 1:\n assert all(rp.positions == 1 for rp in rear_ports)\n elif rear_ports[0].positions > 1:\n position_stack.append([fp.rear_port_position for fp in remote_terminations])\n\n terminations = rear_ports\n\n elif isinstance(remote_terminations[0], RearPort):\n\n if len(remote_terminations) > 1 or remote_terminations[0].positions == 1:\n front_ports = FrontPort.objects.filter(\n rear_port_id__in=[rp.pk for rp in remote_terminations],\n rear_port_position=1\n )\n elif position_stack:\n front_ports = FrontPort.objects.filter(\n rear_port_id=remote_terminations[0].pk,\n rear_port_position__in=position_stack.pop()\n )\n else:\n # No position indicated: path has split, so we stop at the RearPorts\n is_split = True\n break\n\n terminations = front_ports\n\n elif isinstance(remote_terminations[0], CircuitTermination):\n # Follow a CircuitTermination to its corresponding CircuitTermination (A to Z or vice versa)\n term_side = remote_terminations[0].term_side\n assert all(ct.term_side == term_side for ct in remote_terminations[1:])\n circuit_termination = CircuitTermination.objects.filter(\n circuit=remote_terminations[0].circuit,\n term_side='Z' if term_side == 'A' else 'A'\n ).first()\n if circuit_termination is None:\n break\n elif circuit_termination.provider_network:\n # Circuit terminates to a ProviderNetwork\n path.extend([\n [object_to_path_node(circuit_termination)],\n [object_to_path_node(circuit_termination.provider_network)],\n ])\n is_complete = True\n break\n elif circuit_termination.site and not circuit_termination.cable:\n # Circuit terminates to a Site\n path.extend([\n [object_to_path_node(circuit_termination)],\n [object_to_path_node(circuit_termination.site)],\n ])\n break\n\n terminations = [circuit_termination]\n\n # Anything else marks the end of the path\n else:\n is_complete = True\n break\n\n return cls(\n path=path,\n is_complete=is_complete,\n is_active=is_active,\n is_split=is_split\n )\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 2280, "n_words": 496, "vocab_size": 226, "complexity": 40, "nloc": 104, "token_counts": 683, "n_ast_nodes": 1077, "n_identifiers": 64, "random_cut": "def from_origin(cls, terminations):\n \n from circuits.models import CircuitTermination\n\n if not terminations:\n return None\n\n # Ensure all originating terminations are attached to the same link\n if len(terminations) > 1:\n assert all(t.link == terminations[0].link for t in terminations[1:])\n\n path = []\n position_stack = []\n is_complete = False\n is_active = True\n is_split = False\n\n while terminations:\n\n # Terminations must all be of the same type\n assert all(isinstance(t, type(terminations[0])) for t in terminations[1:])\n\n # Check for a split path (e.g. rear port fanning out to multiple front ports with\n # different cables attached)\n if len(set(t.link for t in terminations)) > 1:\n is_split = True\n break\n\n # Step 1: Record the near-end termination object(s)\n path.append([\n object_to_path_node(t) for t in terminations\n ])\n\n # Step 2: Determine the attached link (Cable or WirelessLink), if any\n link = terminations[0].link\n if link is None and len(path) == 1:\n # If this is the start of the path and no link exists, return None\n return None\n elif link is None:\n # Otherwise, halt the trace if no link exists\n break\n assert type(link) in (Cable, WirelessLink)\n\n # Step 3: Record the link and update path status if not \"connected\"\n path.append([object_to_path_node(link)])\n if hasattr(link, 'status') and link.status != LinkStatusChoices.STATUS_CONNECTED:\n is_active = False\n\n # Step 4: Determine the far-end terminations\n if isinstance(link, Cable):\n termination_type = ContentType.objects.get_for_model(terminations[0])\n local_cable_terminations = CableTermination.objects.filter(\n termination_type=termination_type,\n termination_id__in=[t.pk for t in terminations]\n )\n # Terminations must all belong to same end of Cable\n local_cable_end = local_cable_terminations[0].cable_end\n assert all(ct.cable_end == local_cable_end for ct in local_cable_terminations[1:])\n remote_cable_terminations = CableTermination.objects.filter(\n cable=link,\n cable_end='A' if local_cable_end == 'B' else 'B'\n )\n remote_terminations = [ct.termination for ct in remote_cable_terminations]\n else:\n # WirelessLink\n remote_terminations = [link.interface_b] if link.interface_a is terminations[0] else [link.interface_a]\n\n # Step 5: Record the far-end termination object(s)\n path.append([\n object_to_path_node(t) for t in remote_terminations\n ])\n\n # Step 6: Determine the \"next hop\" terminations, if applicable\n if not remote_terminations:\n break\n\n if isinstance(remote_terminations[0], FrontPort):\n # Follow FrontPorts to their corresponding RearPorts\n rear_ports = RearPort.objects.filter(\n pk__in=[t.rear_port_id for t in remote_terminations]\n )\n if len(rear_ports) > 1:\n assert all(rp.positions == 1 for rp in rear_ports)\n elif rear_ports[0].positions > 1:\n position_stack.append([fp.rear_port_position for fp in remote_terminations])\n\n terminations = rear_ports\n\n elif isinstance(remote_terminations[0], RearPort):\n\n if len(remote_terminations) > 1 or remote_terminations[0].positions == 1:\n front_ports = FrontPort.objects.filter(\n rear_port_id__in=[rp.pk for rp in remote_terminations],\n rear_port_position=1\n )\n elif position_stack:\n front_ports = FrontPort.objects.filter(\n rear_port_id=remote_terminations[0].pk,\n rear_port_position__in=position_stack.pop()\n )\n else:\n # No position indicated: path has split, so we stop at the RearPor" }, { "id": 67535, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/setup/setup_wizard/operations/install_fixtures.py", "file_name": "install_fixtures.py", "fun_name": "add_uom_data", "commit_message": "style: format code with black", "code": "def add_uom_data():\n\t# add UOMs\n\tuoms = json.loads(\n\t\topen(frappe.get_app_path(\"erpnext\", \"setup\", \"setup_wizard\", \"data\", \"uom_data.json\")).read()\n\t)\n\tfor d in uoms:\n\t\tif not frappe.db.exists(\"UOM\", _(d.get(\"uom_name\"))):\n\t\t\tuom_doc = frappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": \"UOM\",\n\t\t\t\t\t\"uom_name\": _(d.get(\"uom_name\")),\n\t\t\t\t\t\"name\": _(d.get(\"uom_name\")),\n\t\t\t\t\t\"must_be_whole_number\": d.get(\"must_be_whole_number\"),\n\t\t\t\t\t\"enabled\": 1,\n\t\t\t\t}\n\t\t\t).db_insert()\n\n\t# bootstrap uom conversion factors\n\tuom_conversions = json.loads(\n\t\topen(\n\t\t\tfrappe.get_app_path(\"erpnext\", \"setup\", \"setup_wizard\", \"data\", \"uom_conversion_data.json\")\n\t\t).read()\n\t)\n\tfor d in uom_conversions:\n\t\tif not frappe.db.exists(\"UOM Category\", _(d.get(\"category\"))):\n\t\t\tfrappe.get_doc({\"doctype\": \"UOM Category\", \"category_name\": _(d.get(\"category\"))}).db_insert()\n\n\t\tif not frappe.db.exists(\n\t\t\t\"UOM Conversion Factor\", {\"from_uom\": _(d.get(\"from_uom\")), \"to_uom\": _(d.get(\"to_uom\"))}\n\t\t):\n\t\t\tuom_conversion = frappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": \"UOM Conversion Factor\",\n\t\t\t\t\t\"category\": _(d.get(\"category\")),\n\t\t\t\t\t\"from_uom\": _(d.get(\"from_uom\")),\n\t\t\t\t\t\"to_uom\": _(d.get(\"to_uom\")),\n\t\t\t\t\t\"value\": d.get(\"value\"),\n\t\t\t\t}\n\t\t\t).insert(ignore_permissions=True)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 60, "n_words": 97, "vocab_size": 69, "complexity": 6, "nloc": 35, "token_counts": 294, "n_ast_nodes": 533, "n_identifiers": 20, "random_cut": "def add_uom_data():\n\t# add UOMs\n\tuoms = json.loads(\n\t\topen(frappe.get_app_path(\"erpnext\", \"setup\", \"setup_wizard\", \"data\", \"uom_data.json\")).read()\n\t)\n\tfor d in uoms:\n\t\tif not frappe.db.exists(\"UOM\", _(d.get(\"uom_name\"))):\n\t\t\tuom_doc = frappe.get_doc(\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": \"UOM\",\n\t\t\t\t\t\"uom_name\": _(d.get(\"uom_name\")),\n\t\t\t\t\t\"name\": _(d.get(\"uom_name\")),\n\t\t\t\t\t\"must_be_whole_number\": d.get(\"must_be_whole_number\"),\n\t\t\t\t\t\"enabled\": 1,\n\t\t\t\t}\n\t\t\t).db_insert()\n\n\t# bootstrap uom conversion factors\n\tuom_conversions = json.loads(\n\t\topen(\n\t\t\tfrappe.get_app_path(\"erpnext\", \"setup\", \"setup_wizard\", \"data\", \"uom_conversion_data.json\")\n\t\t).read()\n\t)\n\tfor d in uom_conversions:\n\t\tif not frappe.db.exists(\"UOM Category\", _(d.get(\"category\"))):\n\t\t\tfrappe.get_doc({\"doctype\": \"UOM Category\", \"category_name\": _(d.get(\"category\"))}).db_insert()\n\n\t\tif not f" }, { "id": 202016, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/cache/tests.py", "file_name": "tests.py", "fun_name": "test_set_many_invalid_key", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_set_many_invalid_key(self):\n msg = KEY_ERRORS_WITH_MEMCACHED_MSG % \":1:key with spaces\"\n with self.assertWarnsMessage(CacheKeyWarning, msg):\n cache.set_many({\"key with spaces\": \"foo\"})\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 40, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 56, "n_identifiers": 8, "random_cut": "def test_set_many_invalid_key(self):\n msg = KEY_ERRORS_WITH_MEMCACHED_MSG % \":1:key with spaces\"\n with self.assertWarnsMessage(CacheKeyWarning, msg):\n cache.set_many({\"key with spaces\":" }, { "id": 336334, "commit_id": "df90f0ce989dcccd7ef2fe9ff085da3197b2f2ad", "repo": "diffusers", "path": "utils/check_repo.py", "file_name": "check_repo.py", "fun_name": "ignore_undocumented", "commit_message": "Add `is_torch_available`, `is_flax_available` (#204)\n\n* Add is__available, refactor import utils\r\n\r\n* deps\r\n\r\n* quality", "code": "def ignore_undocumented(name):\n \n # NOT DOCUMENTED ON PURPOSE.\n # Constants uppercase are not documented.\n if name.isupper():\n return True\n # ModelMixins / Encoders / Decoders / Layers / Embeddings / Attention are not documented.\n if (\n name.endswith(\"ModelMixin\")\n or name.endswith(\"Decoder\")\n or name.endswith(\"Encoder\")\n or name.endswith(\"Layer\")\n or name.endswith(\"Embeddings\")\n or name.endswith(\"Attention\")\n ):\n return True\n # Submodules are not documented.\n if os.path.isdir(os.path.join(PATH_TO_DIFFUSERS, name)) or os.path.isfile(\n os.path.join(PATH_TO_DIFFUSERS, f\"{name}.py\")\n ):\n return True\n # All load functions are not documented.\n if name.startswith(\"load_tf\") or name.startswith(\"load_pytorch\"):\n return True\n # is_xxx_available functions are not documented.\n if name.startswith(\"is_\") and name.endswith(\"_available\"):\n return True\n # Deprecated objects are not documented.\n if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:\n return True\n # MMBT model does not really work.\n if name.startswith(\"MMBT\"):\n return True\n if name in SHOULD_HAVE_THEIR_OWN_PAGE:\n return True\n return False\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 288, "n_words": 123, "vocab_size": 61, "complexity": 18, "nloc": 27, "token_counts": 166, "n_ast_nodes": 298, "n_identifiers": 14, "random_cut": "def ignore_undocumented(name):\n \n # NOT DOCUMENTED ON PURPOSE.\n # Constants uppercase are not documented.\n if name.isupper():\n return True\n # ModelMixins / Encoders / Decoders /" }, { "id": 306636, "commit_id": "050cb275ffd51891fa58121643086dad304776a3", "repo": "core", "path": "homeassistant/components/volumio/media_player.py", "file_name": "media_player.py", "fun_name": "async_media_pause", "commit_message": "Improve entity type hints [v] (#77885)", "code": "async def async_media_pause(self) -> None:\n \n if self._state.get(\"trackType\") == \"webradio\":\n await self._volumio.stop()\n else:\n await self._volumio.pause()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 57, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 38, "n_ast_nodes": 72, "n_identifiers": 7, "random_cut": "async def async_media_pause(self) -> None:\n \n if self._state.get(\"trackType\") == \"webradio\":\n await self._volumio" }, { "id": 45157, "commit_id": "3c4524b4ec2b42a8af0a8c7b9d8f1d065b2bfc83", "repo": "airflow", "path": "tests/providers/microsoft/azure/hooks/test_azure_cosmos.py", "file_name": "test_azure_cosmos.py", "fun_name": "test_delete_database", "commit_message": "(AzureCosmosDBHook) Update to latest Cosmos API (#21514)\n\n* Bumping the ms azure cosmos providers to work with the 4.x azure python sdk api\r\n\r\nCo-authored-by: gatewoodb ", "code": "def test_delete_database(self, mock_cosmos):\n hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')\n hook.delete_database(self.test_database_name)\n expected_calls = [mock.call().delete_database('test_database_name')]\n mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})\n mock_cosmos.assert_has_calls(expected_calls)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 48, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 59, "n_ast_nodes": 100, "n_identifiers": 15, "random_cut": "def test_delete_database(self, mock_cosmos):\n hook = AzureCosmosDBHook(azure_cosmos_conn_id='azure_cosmos_test_key_id')\n hook.delete_database(self.test_database_name)\n expected_calls = [mock.call().delete_database('test_database_name')]\n mock_cosmos.assert_any_call(self.test_end_point, {'masterKey': self.test_master_key})\n mock_cosmos.assert_has_calls(expected_calls)\n" }, { "id": 163950, "commit_id": "5e40ff55ae2a4e2a1eaab0c924e5c369c591523d", "repo": "pandas", "path": "pandas/tests/arrays/sparse/test_arithmetics.py", "file_name": "test_arithmetics.py", "fun_name": "test_float_same_index_comparison", "commit_message": "TST/CLN: organize SparseArray tests (#45693)", "code": "def test_float_same_index_comparison(self, kind):\n # when sp_index are the same\n values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])\n\n a = SparseArray(values, kind=kind)\n b = SparseArray(rvalues, kind=kind)\n self._check_comparison_ops(a, b, values, rvalues)\n\n values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])\n rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])\n\n a = SparseArray(values, kind=kind, fill_value=0)\n b = SparseArray(rvalues, kind=kind, fill_value=0)\n self._check_comparison_ops(a, b, values, rvalues)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 159, "n_words": 83, "vocab_size": 39, "complexity": 1, "nloc": 11, "token_counts": 243, "n_ast_nodes": 268, "n_identifiers": 13, "random_cut": "def test_float_same_index_comparison(self, kind):\n # when sp_index are the same\n values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])\n rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])\n\n a = SparseArray(values, kind=kind)\n b = SparseArray(rvalues, kind=kind)\n self._check_comparison_ops(a, b, values, rvalues)\n\n values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])\n rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])\n\n a = SparseArray(values, kind=kind, fill_value=0)\n b = SparseArray(rvalues, kind=kind, fill_value=0)\n self._check_comparison_ops(a, b, values, rvalues)\n" }, { "id": 115913, "commit_id": "6eb408a9973fbc24c973d6524dc34cb9b1e0ee05", "repo": "mindsdb", "path": "mindsdb/api/mongo/responders/coll_stats.py", "file_name": "coll_stats.py", "fun_name": "result", "commit_message": "del model interface", "code": "def result(self, query, request_env, mindsdb_env, session):\n db = query['$db']\n collection = query['collStats']\n\n scale = query.get('scale')\n\n if db != 'mindsdb' or collection == 'predictors' or scale is None:\n # old behavior\n # NOTE real answer is huge, i removed most data from it.\n res = {\n 'ns': \"db.collection\",\n 'size': 1,\n 'count': 0,\n 'avgObjSize': 1,\n 'storageSize': 16384,\n 'capped': False,\n 'wiredTiger': {\n },\n 'nindexes': 1,\n 'indexDetails': {\n },\n 'totalIndexSize': 16384,\n 'indexSizes': {\n '_id_': 16384\n },\n 'ok': 1\n }\n\n res['ns'] = f\"{db}.{collection}\"\n if db == 'mindsdb' and collection == 'predictors':\n res['count'] = len(mindsdb_env['model_controller'].get_models())\n else:\n\n ident_parts = [collection]\n if scale is not None:\n ident_parts.append(scale)\n\n ast_query = Describe(Identifier(\n parts=ident_parts\n ))\n\n data = run_sql_command(mindsdb_env, ast_query)\n res = {\n 'data': data\n }\n\n res['ns'] = f\"{db}.{collection}\"\n\n return res\n\n\nresponder = Responce()\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 616, "n_words": 122, "vocab_size": 79, "complexity": 7, "nloc": 39, "token_counts": 189, "n_ast_nodes": 359, "n_identifiers": 23, "random_cut": "def result(self, query, request_env, mindsdb_env, session):\n db = query['$db']\n collection = query['collStats']\n\n scale = query.get('scale')\n\n if db != 'mindsdb' or collection == 'predictors' or scale is None:\n # old behavior\n # NOTE real answer is huge, i removed most da" }, { "id": 51078, "commit_id": "0ea0f8e8757c3844a98d74013ae3708836bd6355", "repo": "PaddleHub", "path": "modules/image/Image_editing/colorization/user_guided_colorization/test.py", "file_name": "test.py", "fun_name": "setUpClass", "commit_message": "update user_guided_colorization (#1994)\n\n* update user_guided_colorization\r\n\r\n* add clean func", "code": "def setUpClass(cls) -> None:\n img_url = 'https://unsplash.com/photos/1sLIu1XKQrY/download?ixid=MnwxMjA3fDB8MXxhbGx8MTJ8fHx8fHwyfHwxNjYyMzQxNDUx&force=true&w=640'\n if not os.path.exists('tests'):\n os.makedirs('tests')\n response = requests.get(img_url)\n assert response.status_code == 200, 'Network Error.'\n with open('tests/test.jpg', 'wb') as f:\n f.write(response.content)\n cls.module = hub.Module(name=\"user_guided_colorization\")\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 29, "vocab_size": 27, "complexity": 2, "nloc": 9, "token_counts": 73, "n_ast_nodes": 133, "n_identifiers": 19, "random_cut": "def setUpClass(cls) -> None:\n img_url = 'https://unsplash.com/photos/1sLIu1XKQrY/download?ixid=MnwxMjA3fDB8MXxhbGx8MTJ8fHx8fHwyfHwxNjYyMzQxNDUx&force=true&w=640'\n if not os.path.exists('tests'):" }, { "id": 160402, "commit_id": "54a7b0b9843e2e89b217eaa38550752bb4754119", "repo": "numpy", "path": "numpy/core/setup_common.py", "file_name": "setup_common.py", "fun_name": "check_api_version", "commit_message": "make MismatchCAPIWarnining into MismatchCAPIError", "code": "def check_api_version(apiversion, codegen_dir):\n \n curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)\n\n # If different hash, it means that the api .txt files in\n # codegen_dir have been updated without the API version being\n # updated. Any modification in those .txt files should be reflected\n # in the api and eventually abi versions.\n # To compute the checksum of the current API, use numpy/core/cversions.py\n if not curapi_hash == api_hash:\n msg = (\"API mismatch detected, the C API version \"\n \"numbers have to be updated. Current C api version is \"\n f\"{apiversion}, with checksum {curapi_hash}, but recorded \"\n f\"checksum in core/codegen_dir/cversions.txt is {api_hash}. If \"\n \"functions were added in the C API, you have to update \"\n f\"C_API_VERSION in {__file__}.\"\n )\n raise MismatchCAPIError(msg)\n\n\nFUNC_CALL_ARGS = {}\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 242, "n_words": 121, "vocab_size": 82, "complexity": 2, "nloc": 11, "token_counts": 42, "n_ast_nodes": 102, "n_identifiers": 10, "random_cut": "def check_api_version(apiversion, codegen_dir):\n \n curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)\n\n # If different hash, it means that the api .txt files in\n # codegen_dir have been updated without the API version being\n # updated. Any modification in those .txt files should be reflected\n # in the api and eventually abi versions.\n # To compute the checksum of the current API, use numpy/core/cversions.py\n if not curapi_hash == api_hash:\n msg = (\"API mismatc" }, { "id": 265038, "commit_id": "537383e0713645564ba2949e37dc2cbf41eb3317", "repo": "netbox", "path": "netbox/dcim/tests/test_cablepaths.py", "file_name": "test_cablepaths.py", "fun_name": "test_214_interface_to_providernetwork_via_circuit", "commit_message": "Add cable topology tests", "code": "def test_214_interface_to_providernetwork_via_circuit(self):\n \n interface1 = Interface.objects.create(device=self.device, name='Interface 1')\n providernetwork = ProviderNetwork.objects.create(name='Provider Network 1', provider=self.circuit.provider)\n circuittermination1 = CircuitTermination.objects.create(circuit=self.circuit, site=self.site, term_side='A')\n circuittermination2 = CircuitTermination.objects.create(circuit=self.circuit, provider_network=providernetwork, term_side='Z')\n\n # Create cable 1\n cable1 = Cable(\n a_terminations=[interface1],\n b_terminations=[circuittermination1]\n )\n cable1.save()\n self.assertPathExists(\n (interface1, cable1, circuittermination1, circuittermination2, providernetwork),\n is_active=True\n )\n self.assertEqual(CablePath.objects.count(), 1)\n\n # Delete cable 1\n cable1.delete()\n self.assertEqual(CablePath.objects.count(), 0)\n interface1.refresh_from_db()\n self.assertPathIsNotSet(interface1)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 216, "n_words": 53, "vocab_size": 43, "complexity": 1, "nloc": 19, "token_counts": 175, "n_ast_nodes": 278, "n_identifiers": 31, "random_cut": "def test_214_interface_to_providernetwork_via_circuit(self):\n \n interface1 = Interface.objects.create(device=self.device, name='Interface 1')\n providernetwork = ProviderNetwork.objects.create(name='Provider Network 1', provider=self.circuit.provider)\n circuittermination1 = CircuitTermination.objects.create(circuit=self.circuit, site=self.site, term_side='A')\n circuittermination2 = CircuitTerminati" }, { "id": 248152, "commit_id": "116a4c8340b729ffde43be33df24d417384cb28b", "repo": "synapse", "path": "tests/handlers/test_receipts.py", "file_name": "test_receipts.py", "fun_name": "test_filters_out_event_with_only_hidden_receipts_and_ignores_the_rest", "commit_message": "Implement changes to MSC2285 (hidden read receipts) (#12168)\n\n* Changes hidden read receipts to be a separate receipt type\r\n (instead of a field on `m.read`).\r\n* Updates the `/receipts` endpoint to accept `m.fully_read`.", "code": "def test_filters_out_event_with_only_hidden_receipts_and_ignores_the_rest(self):\n self._test_filters_hidden(\n [\n {\n \"content\": {\n \"$14356419edgd14394fHBLK:matrix.org\": {\n ReceiptTypes.READ_PRIVATE: {\n \"@rikj:jki.re\": {\n \"ts\": 1436451550453,\n },\n }\n },\n \"$1435641916114394fHBLK:matrix.org\": {\n ReceiptTypes.READ: {\n \"@user:jki.re\": {\n \"ts\": 1436451550453,\n }\n }\n },\n },\n \"room_id\": \"!jEsUZKDJdhlrceRyVU:example.org\",\n \"type\": \"m.receipt\",\n }\n ],\n [\n {\n \"content\": {\n \"$1435641916114394fHBLK:matrix.org\": {\n ReceiptTypes.READ: {\n \"@user:jki.re\": {\n \"ts\": 1436451550453,\n }\n }\n }\n },\n \"room_id\": \"!jEsUZKDJdhlrceRyVU:example.org\",\n \"type\": \"m.receipt\",\n }\n ],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 919, "n_words": 59, "vocab_size": 22, "complexity": 1, "nloc": 40, "token_counts": 103, "n_ast_nodes": 185, "n_identifiers": 6, "random_cut": "def test_filters_out_event_with_only_hidden_receipts_and_ignores_the_rest(self):\n self._test_filters_hidden(\n [\n {\n \"content\": {\n \"$14356419edgd14394fHBLK:matrix.org\": {\n ReceiptTypes.READ_PRIVATE: {\n \"@rikj:jki.re\": {\n \"ts\": 1436451550453,\n },\n }\n },\n \"$1435641916114394fHBLK:matrix.org\": {\n ReceiptTypes.READ: {\n \"@user:jki.re\": {\n \"ts\": 1436451550453,\n }\n }\n },\n },\n \"room_id\": \"!jEsUZKDJdhlrceRyVU:example.org\",\n " }, { "id": 46907, "commit_id": "34d2dd8853849d00de2e856b1f79cffe4da6d990", "repo": "airflow", "path": "tests/api_connexion/endpoints/test_dag_endpoint.py", "file_name": "test_dag_endpoint.py", "fun_name": "test_should_response_200_for_null_start_date", "commit_message": "Add more fields to REST API dags/dag_id/details endpoint (#22756)\n\nAdded more fields to the DAG details endpoint, which is the endpoint for\r\ngetting DAG `object` details", "code": "def test_should_response_200_for_null_start_date(self):\n response = self.client.get(\n f\"/api/v1/dags/{self.dag3_id}/details\", environ_overrides={'REMOTE_USER': \"test\"}\n )\n assert response.status_code == 200\n last_parsed = response.json[\"last_parsed\"]\n expected = {\n \"catchup\": True,\n \"concurrency\": 16,\n \"max_active_tasks\": 16,\n \"dag_id\": \"test_dag3\",\n \"dag_run_timeout\": None,\n \"default_view\": \"grid\",\n \"description\": None,\n \"doc_md\": None,\n \"fileloc\": __file__,\n \"file_token\": FILE_TOKEN,\n \"is_paused\": None,\n \"is_active\": None,\n \"is_subdag\": False,\n \"orientation\": \"LR\",\n \"owners\": ['airflow'],\n \"params\": {},\n \"schedule_interval\": {\n \"__type\": \"TimeDelta\",\n \"days\": 1,\n \"microseconds\": 0,\n \"seconds\": 0,\n },\n \"start_date\": None,\n \"tags\": [],\n \"timezone\": \"Timezone('UTC')\",\n \"max_active_runs\": 16,\n \"pickle_id\": None,\n \"end_date\": None,\n 'is_paused_upon_creation': None,\n 'last_parsed': last_parsed,\n 'render_template_as_native_obj': False,\n }\n assert response.json == expected\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 501, "n_words": 85, "vocab_size": 67, "complexity": 1, "nloc": 40, "token_counts": 173, "n_ast_nodes": 319, "n_identifiers": 13, "random_cut": "def test_should_response_200_for_null_start_date(self):\n response = self.client.get(\n f\"/api/v1/dags/{self.dag3_id}/details\", environ_overrides={'REMOTE_USER': \"test\"}\n )\n assert response.status_code == 200\n last_parsed = response.json[\"last_parsed\"]\n expected = {\n \"catchup\": True,\n \"concurrency\": 16,\n \"max_active_tasks\": 16,\n \"dag_id\": \"test_dag3\",\n \"dag_run_timeout\": None,\n \"default_view\": \"grid\",\n \"description\": None,\n \"doc_md\": None,\n \"fileloc\": __file__,\n \"file_token\": FILE_TOKEN,\n \"is_paused\": None,\n \"is_active\": None,\n \"is_subdag\": False,\n \"orientation\": \"LR\",\n \"owners\": ['airflow'],\n \"params\": {},\n \"schedule_interval\": {\n \"__type\": \"TimeDelta\",\n \"days\": 1,\n \"microseconds\": 0,\n \"seconds\": 0,\n },\n \"start_date\": None,\n \"tags\": [],\n \"timez" }, { "id": 268893, "commit_id": "c223693db91473c9a71c330d4e38a751d149f93c", "repo": "keras", "path": "keras/applications/resnet_rs.py", "file_name": "resnet_rs.py", "fun_name": "decode_predictions", "commit_message": "KERAS application addition of Resnet-RS model", "code": "def decode_predictions(preds, top=5):\n return imagenet_utils.decode_predictions(preds, top=top)\n\npreprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(\n mode='',\n ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,\n error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)\ndecode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__\n\nDOC = \n\nsetattr(ResNetRS50, '__doc__', ResNetRS50.__doc__ + DOC)\nsetattr(ResNetRS152, '__doc__', ResNetRS152.__doc__ + DOC)\nsetattr(ResNetRS200, '__doc__', ResNetRS200.__doc__ + DOC)\nsetattr(ResNetRS270, '__doc__', ResNetRS270.__doc__ + DOC)\nsetattr(ResNetRS350, '__doc__', ResNetRS350.__doc__ + DOC)\nsetattr(ResNetRS420, '__doc__', ResNetRS420.__doc__ + DOC)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 48, "n_words": 47, "vocab_size": 30, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 205, "n_identifiers": 21, "random_cut": "def decode_predictions(preds, top=5):\n return imagenet_utils.decode_predictions(preds, top=top)\n\npreprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(\n mode='',\n ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,\n error=imagenet_uti" }, { "id": 169461, "commit_id": "159a91754159545df743ff89fc51e83d5421993b", "repo": "pandas", "path": "pandas/tests/generic/test_frame.py", "file_name": "test_frame.py", "fun_name": "test_validate_bool_args", "commit_message": "fix pylint bad-super-call (#48896)\n\n* fix pylint bad-super-call\r\n\r\n* fix black pre commit\r\n\r\n* Update pyproject.toml\r\n\r\nCo-authored-by: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com>\r\n\r\n* change super() to df.copy()\r\n\r\nCo-authored-by: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com>", "code": "def test_validate_bool_args(self, value):\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n\n msg = 'For argument \"inplace\" expected type bool, received type'\n with pytest.raises(ValueError, match=msg):\n df.copy().rename_axis(mapper={\"a\": \"x\", \"b\": \"y\"}, axis=1, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().drop(\"a\", axis=1, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().fillna(value=0, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().replace(to_replace=1, value=7, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().interpolate(inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy()._where(cond=df.a > 2, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().mask(cond=df.a > 2, inplace=value)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 206, "n_words": 67, "vocab_size": 38, "complexity": 1, "nloc": 17, "token_counts": 254, "n_ast_nodes": 412, "n_identifiers": 24, "random_cut": "def test_validate_bool_args(self, value):\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n\n msg = 'For argument \"inplace\" expected type bool, received type'\n with pytest.raises(ValueError, match=msg):\n df.copy().rename_axis(mapper={\"a\": \"x\", \"b\": \"y\"}, axis=1, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().drop(\"a\", axis=1, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().fillna(value=0, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().replace(to_replace=1, value=7, inplace=value)\n\n with pytest.raises(ValueError, match=msg):\n df.copy().interpolate(inplace=value)\n\n" }, { "id": 164333, "commit_id": "419331c598a097896edae40bc0687e4127f97b6b", "repo": "pandas", "path": "pandas/tests/arrays/datetimes/test_constructors.py", "file_name": "test_constructors.py", "fun_name": "test_from_pandas_array", "commit_message": "⬆️ UPGRADE: Autoupdate pre-commit config (#45752)\n\nCo-authored-by: MarcoGorelli ", "code": "def test_from_pandas_array(self):\n arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9\n\n result = DatetimeArray._from_sequence(arr)._with_freq(\"infer\")\n\n expected = pd.date_range(\"1970-01-01\", periods=5, freq=\"H\")._data\n tm.assert_datetime_array_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 47, "n_words": 20, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 69, "n_ast_nodes": 112, "n_identifiers": 20, "random_cut": "def test_from_pandas_array(self):\n arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9\n\n result = Da" }, { "id": 167058, "commit_id": "b99ec4a9c92e288ace6b63072ffc4c296f8e5dc9", "repo": "pandas", "path": "pandas/tests/frame/indexing/test_setitem.py", "file_name": "test_setitem.py", "fun_name": "test_setitem_partial_column_inplace", "commit_message": "REF: Add Manager.column_setitem to set values into a single column (without intermediate series) (#47074)", "code": "def test_setitem_partial_column_inplace(self, consolidate, using_array_manager):\n # This setting should be in-place, regardless of whether frame is\n # single-block or multi-block\n # GH#304 this used to be incorrectly not-inplace, in which case\n # we needed to ensure _item_cache was cleared.\n\n df = DataFrame(\n {\"x\": [1.1, 2.1, 3.1, 4.1], \"y\": [5.1, 6.1, 7.1, 8.1]}, index=[0, 1, 2, 3]\n )\n df.insert(2, \"z\", np.nan)\n if not using_array_manager:\n if consolidate:\n df._consolidate_inplace()\n assert len(df._mgr.blocks) == 1\n else:\n assert len(df._mgr.blocks) == 2\n\n zvals = df[\"z\"]._values\n\n df.loc[2:, \"z\"] = 42\n\n expected = Series([np.nan, np.nan, 42, 42], index=df.index, name=\"z\")\n tm.assert_series_equal(df[\"z\"], expected)\n\n # check setting occurred in-place\n tm.assert_numpy_array_equal(zvals, expected.values)\n assert np.shares_memory(zvals, df[\"z\"]._values)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 285, "n_words": 101, "vocab_size": 86, "complexity": 4, "nloc": 19, "token_counts": 210, "n_ast_nodes": 280, "n_identifiers": 25, "random_cut": "def test_setitem_partial_column_inplace(self, consolidate, using_array_manager):\n # This setting should be in-place, regardless of whether frame is\n # single-block or multi-block\n # GH#304 this used to be i" }, { "id": 248902, "commit_id": "efee345b454ac5e6aeb4b4128793be1fbc308b91", "repo": "synapse", "path": "tests/rest/client/test_rooms.py", "file_name": "test_rooms.py", "fun_name": "test_search_filter_not_labels", "commit_message": "Remove unnecessary `json.dumps` from tests (#13303)", "code": "def test_search_filter_not_labels(self) -> None:\n \n request_data = {\n \"search_categories\": {\n \"room_events\": {\n \"search_term\": \"label\",\n \"filter\": self.FILTER_NOT_LABELS,\n }\n }\n }\n\n self._send_labelled_messages_in_room()\n\n channel = self.make_request(\n \"POST\", \"/search?access_token=%s\" % self.tok, request_data\n )\n\n results = channel.json_body[\"search_categories\"][\"room_events\"][\"results\"]\n\n self.assertEqual(\n len(results),\n 4,\n [result[\"result\"][\"content\"] for result in results],\n )\n self.assertEqual(\n results[0][\"result\"][\"content\"][\"body\"],\n \"without label\",\n results[0][\"result\"][\"content\"][\"body\"],\n )\n self.assertEqual(\n results[1][\"result\"][\"content\"][\"body\"],\n \"without label\",\n results[1][\"result\"][\"content\"][\"body\"],\n )\n self.assertEqual(\n results[2][\"result\"][\"content\"][\"body\"],\n \"with wrong label\",\n results[2][\"result\"][\"content\"][\"body\"],\n )\n self.assertEqual(\n results[3][\"result\"][\"content\"][\"body\"],\n \"with two wrong labels\",\n results[3][\"result\"][\"content\"][\"body\"],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 452, "n_words": 67, "vocab_size": 42, "complexity": 2, "nloc": 40, "token_counts": 231, "n_ast_nodes": 404, "n_identifiers": 13, "random_cut": "def test_search_filter_not_labels(self) -> None:\n \n request_data = {\n \"search_categories\": {\n \"room_events\": {\n \"search_term\": \"label\",\n \"filter\": self.FILTER_NOT_LABELS,\n }\n }\n }\n\n self._send_labelled_messages_in_room()\n\n channel = self.make_request(\n \"POST\", \"/search?access_token=%s\" % self.tok, request_data\n )\n\n results = channel.json_body[\"search_categories\"][\"room_events\"][\"results\"]\n\n self.assertEqual(\n len(results),\n 4,\n [result[\"result\"][\"content\"] for result in results],\n )\n self.assertEqual(\n results[0][\"result\"][\"content\"][\"body\"],\n \"without label\",\n results[0][\"result\"][\"content\"][\"body\"],\n )\n self.assertEqual(\n results[1][\"result\"][\"content\"][\"body\"],\n \"without label\",\n results[1][\"result\"][\"content\"][\"body\"],\n )\n self.assertEqual(\n results[2][\"result\"][\"content\"][\"body\"],\n \"with wrong label\",\n results[2][\"result\"][\"content\"][\"body\"],\n )\n self.assertEqual(\n results[3" }, { "id": 157549, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/modules/image_degradation/utils_image.py", "file_name": "utils_image.py", "fun_name": "tensor2img", "commit_message": "release more models", "code": "def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):\n \n tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp\n tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]\n n_dim = tensor.dim()\n if n_dim == 4:\n n_img = len(tensor)\n img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 3:\n img_np = tensor.numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 2:\n img_np = tensor.numpy()\n else:\n raise TypeError(\n 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))\n if out_type == np.uint8:\n img_np = (img_np * 255.0).round()\n # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.\n return img_np.astype(out_type)\n\n\n\n\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 225, "n_words": 117, "vocab_size": 77, "complexity": 5, "nloc": 24, "token_counts": 228, "n_ast_nodes": 358, "n_identifiers": 27, "random_cut": "def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):\n \n tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp\n tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]\n n_dim = tensor.dim()\n if n_dim == 4:\n n_img = len(tensor)\n img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 3:\n img_np = tensor.numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 2:\n img_np = tensor.numpy()\n else:\n raise TypeError(\n 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))\n if out_type == np.uint8" }, { "id": 285650, "commit_id": "3d0190e35bae4092f52025377d8604b3a6a17bfa", "repo": "OpenBBTerminal", "path": "openbb_terminal/featflags_controller.py", "file_name": "featflags_controller.py", "fun_name": "call_color", "commit_message": "New path for .env (#2508)\n\n* add log path\r\n\r\n* add test to check if log file is in correct dir\r\n\r\n* env path\r\n\r\n* black\r\n\r\n* mypy fix\r\n\r\n* linting\r\n\r\n* add make_paths and change references\r\n\r\n* terminal change\r\n\r\n* change constants to paths\r\n\r\n* change names\r\n\r\n* black\r\n\r\n* mypy\r\n\r\n* mypy\r\n\r\n* pylint else\r\n\r\n* add make paths\r\n\r\n* remove custom user dir name\r\n\r\nCo-authored-by: Chavithra ", "code": "def call_color(self, _):\n \n obbff.USE_COLOR = not obbff.USE_COLOR\n set_key(obbff.USER_ENV_FILE, \"OPENBB_USE_COLOR\", str(obbff.USE_COLOR))\n console.print(\"\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 39, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 64, "n_identifiers": 10, "random_cut": "def call_color(self, _):\n \n obbff.USE_COLOR = not obbff.USE_COLOR\n set_key(obbff.USER_ENV_" }, { "id": 33345, "commit_id": "d4dbd7ca59bd50dd034e7995cb36e5efed3d9512", "repo": "transformers", "path": "tests/generation/test_generation_beam_search.py", "file_name": "test_generation_beam_search.py", "fun_name": "check_beam_scorer_update", "commit_message": "Generate: get the correct beam index on eos token (#18851)", "code": "def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores):\n # check too many eos tokens\n beam_scorer = self.prepare_beam_scorer()\n\n tokens = next_tokens.clone()\n tokens[0, :] = self.eos_token_id\n\n with self.parent.assertRaises(ValueError):\n beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id)\n\n # check all batches are done\n beam_scorer = self.prepare_beam_scorer()\n\n tokens = next_tokens.clone()\n tokens[:, : self.num_beams] = self.eos_token_id\n beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device)\n beam_indices = tuple(tuple(b) for b in beam_indices)\n beam_scorer.process(\n input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices\n )\n # beam scorer should be done\n self.parent.assertTrue(beam_scorer.is_done)\n\n # check\n beam_scorer = self.prepare_beam_scorer()\n\n tokens = next_tokens.clone()\n tokens[:, 1] = self.eos_token_id\n beam_outputs = beam_scorer.process(\n input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices\n )\n output_scores = beam_outputs[\"next_beam_scores\"]\n output_tokens = beam_outputs[\"next_beam_tokens\"]\n output_indices = beam_outputs[\"next_beam_indices\"]\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 305, "n_words": 105, "vocab_size": 59, "complexity": 3, "nloc": 44, "token_counts": 432, "n_ast_nodes": 356, "n_identifiers": 30, "random_cut": "def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores):\n # check too many eos tokens\n beam_scorer = self.prepare_beam_scorer()\n\n tokens = next_tokens.clone()\n tokens[0, :] = self.eos_token_id\n\n with self.parent.assertRaises(ValueError):\n beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id)\n\n # check all batches are done\n beam_scorer = self.prepare_beam_scorer()\n\n tokens = next_tokens.clone()\n tokens[:, : self.num_beams] = self.eos_token_id\n beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device)\n beam_indices = tuple(tuple(b) for b in beam_indices)\n beam_scorer.process(\n input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices\n )\n # beam scorer should be done\n self.parent.assertTrue(beam_scorer.is_don" }, { "id": 706, "commit_id": "530a1aa0eb7f10555b7dcf61c27e3230e019e9c6", "repo": "PySyft", "path": "packages/syft/tests/syft/core/tensor/adp/entity_list_test.py", "file_name": "entity_list_test.py", "fun_name": "test_entity_list_serde", "commit_message": "Added capnp to sy.serialize / sy.deserialize interface\n\n- Added np.array utf-8 string serialization", "code": "def test_entity_list_serde() -> None:\n entities = [\"🥒pickles\", \"madhava\", \"short\", \"muchlongername\", \"a\", \"🌶\"]\n entity_list = EntityList.from_objs([Entity(name=entity) for entity in entities])\n ser = sy.serialize(entity_list, to_bytes=True)\n de = sy.deserialize(ser, from_bytes=True)\n\n de.one_hot_lookup == entity_list.one_hot_lookup\n assert entity_list == de\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 51, "n_words": 34, "vocab_size": 28, "complexity": 2, "nloc": 7, "token_counts": 75, "n_ast_nodes": 123, "n_identifiers": 16, "random_cut": "def test_entity_list_serde() -> None:\n entities = [\"🥒pickles\", \"madhava\", \"short\", \"muchlongername\", \"a\", \"🌶\"]\n entity_list = EntityList.from_objs([Entity(name=entity) for entity in entities])\n ser = sy.serialize(entity_list, to_bytes=True)\n " }, { "id": 20246, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/windows.py", "file_name": "windows.py", "fun_name": "_pick_get_win_folder", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _pick_get_win_folder() -> Callable[[str], str]:\n if hasattr(ctypes, \"windll\"):\n return get_win_folder_via_ctypes\n try:\n import winreg # noqa: F401\n except ImportError:\n return get_win_folder_from_env_vars\n else:\n return get_win_folder_from_registry\n\n\nget_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())\n\n__all__ = [\n \"Windows\",\n]\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 71, "n_words": 31, "vocab_size": 28, "complexity": 3, "nloc": 9, "token_counts": 36, "n_ast_nodes": 94, "n_identifiers": 14, "random_cut": "def _pick_get_win_folder() -> Callable[[str], str]:\n if hasattr(ctypes, \"windll\"):\n return get_win_folder_via_ctypes\n try:\n import winreg # noqa: F401\n except ImportErr" }, { "id": 129152, "commit_id": "5a7f6e4fddabd151baf96d64d6c45e5964766653", "repo": "ray", "path": "python/ray/autoscaler/_private/fake_multi_node/node_provider.py", "file_name": "node_provider.py", "fun_name": "_save_node_state", "commit_message": "[rfc][ci] create fake docker-compose cluster environment (#20256)\n\nFollowing #18987 this PR adds a docker-compose based local multi node cluster.\r\n\r\nThe fake multinode docker comprises two parts. The docker_monitor.py script is a watch script calling docker compose up whenever the docker-compose.yaml changes. The node provider creates and updates the docker compose according to the autoscaling requirements.\r\n\r\nThis mode fully supports autoscaling and comes with test utilities to start and connect to docker-compose autoscaling environments. There's also a sample test case showing how this can be used.", "code": "def _save_node_state(self):\n with open(self._node_state_path, \"wt\") as f:\n json.dump(self._nodes, f)\n\n # Make sure this is always writeable from inside the containers\n if not self.in_docker_container:\n # Only chmod from the outer container\n os.chmod(self._node_state_path, 0o777)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 85, "n_words": 32, "vocab_size": 29, "complexity": 2, "nloc": 5, "token_counts": 43, "n_ast_nodes": 74, "n_identifiers": 11, "random_cut": "def _save_node_state(self):\n with open(self._node_state_path, \"wt\") as f:\n json.dump(s" }, { "id": 200142, "commit_id": "86975d1b114689b68dd9f7b953602f318c4497ec", "repo": "sympy", "path": "sympy/physics/continuum_mechanics/truss.py", "file_name": "truss.py", "fun_name": "_draw_nodes", "commit_message": "rectified non-alignment of nodes", "code": "def _draw_nodes(self, subs_dict):\n node_markers = []\n\n for node in list(self._node_coordinates):\n if (type(self._node_coordinates[node][0]) in (Symbol, Quantity)):\n if self._node_coordinates[node][0] in list(subs_dict):\n self._node_coordinates[node][0] = subs_dict[self._node_coordinates[node][0]]\n else:\n raise ValueError(\"provided substituted dictionary is not adequate\")\n elif (type(self._node_coordinates[node][0]) == Mul):\n objects = self._node_coordinates[node][0].as_coeff_Mul()\n for object in objects:\n if type(object) in (Symbol, Quantity):\n if subs_dict==None or object not in list(subs_dict):\n raise ValueError(\"provided substituted dictionary is not adequate\")\n else:\n self._node_coordinates[node][0] /= object\n self._node_coordinates[node][0] *= subs_dict[object]\n\n if (type(self._node_coordinates[node][1]) in (Symbol, Quantity)):\n if self._node_coordinates[node][1] in list(subs_dict):\n self._node_coordinates[node][1] = subs_dict[self._node_coordinates[node][1]]\n else:\n raise ValueError(\"provided substituted dictionary is not adequate\")\n elif (type(self._node_coordinates[node][1]) == Mul):\n objects = self._node_coordinates[node][1].as_coeff_Mul()\n for object in objects:\n if type(object) in (Symbol, Quantity):\n if subs_dict==None or object not in list(subs_dict):\n raise ValueError(\"provided substituted dictionary is not adequate\")\n else:\n self._node_coordinates[node][1] /= object\n self._node_coordinates[node][1] *= subs_dict[object]\n\n for node in list(self._node_coordinates):\n node_markers.append(\n {\n 'args':[[self._node_coordinates[node][0]], [self._node_coordinates[node][1]]],\n 'marker':'o',\n 'markersize':5,\n 'color':'black'\n }\n )\n return node_markers\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 826, "n_words": 139, "vocab_size": 53, "complexity": 17, "nloc": 41, "token_counts": 407, "n_ast_nodes": 619, "n_identifiers": 16, "random_cut": "def _draw_nodes(self, subs_dict):\n node_markers = []\n\n for node in list(self._node_coordinates):\n if (type(self._node_coordinates[node][0]) in (Symbol, Quantity)):\n if self._node_coordinates[node][0] in list(subs_dict):\n self._node_coordinates[node][0] = subs_dict[self._node_coordinates[node][0]]\n else:\n raise ValueError(\"provided substituted dictionary is not adequate\")\n elif (type(self._node_coordinates[node][0]) == Mul):\n objects = self._node_coordinates[node][0].as_coeff_Mul()\n for object " }, { "id": 132340, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/syncer.py", "file_name": "syncer.py", "fun_name": "sync_up_to_new_location", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def sync_up_to_new_location(self, worker_ip):\n if worker_ip != self.worker_ip:\n logger.debug(\"Setting new worker IP to %s\", worker_ip)\n self.set_worker_ip(worker_ip)\n self.reset()\n if not self.sync_up():\n logger.warning(\n \"Sync up to new location skipped. This should not occur.\"\n )\n else:\n logger.warning(\"Sync attempted to same IP %s.\", worker_ip)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 156, "n_words": 39, "vocab_size": 32, "complexity": 3, "nloc": 11, "token_counts": 57, "n_ast_nodes": 99, "n_identifiers": 9, "random_cut": "def sync_up_to_new_location(self, worker_ip):\n if worker_ip != self.worker_ip:\n logger.debug(\"Setting new worker IP to %s\", worker_ip)\n self.set_worker" }, { "id": 78476, "commit_id": "84662031294740d59eee60af37e69c3735de1117", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_delete_page.py", "file_name": "test_delete_page.py", "fun_name": "test_confirm_delete_scenario_1", "commit_message": "review fixes", "code": "def test_confirm_delete_scenario_1(self):\n # If the number of pages to be deleted are less than\n # WAGTAILADMIN_UNSAFE_PAGE_DELETION_LIMIT then don't need\n # for confirmation\n child_1 = SimplePage(title=\"child 1\", slug=\"child-1\", content=\"hello\")\n self.child_page.add_child(instance=child_1)\n child_2 = SimplePage(title=\"child 2\", slug=\"child-2\", content=\"hello\")\n self.child_page.add_child(instance=child_2)\n response = self.client.get(\n reverse(\"wagtailadmin_pages:delete\", args=(self.child_page.id,))\n )\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(\n response,\n '',\n )\n # deletion should not actually happen on GET\n self.assertTrue(SimplePage.objects.filter(id=self.child_page.id).exists())\n\n # And admin should be able to delete page without any confirmation\n response = self.client.post(\n reverse(\"wagtailadmin_pages:delete\", args=(self.child_page.id,))\n )\n # Check that page is deleted\n self.assertFalse(SimplePage.objects.filter(id=self.child_page.id).exists())\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 263, "n_words": 87, "vocab_size": 66, "complexity": 1, "nloc": 18, "token_counts": 170, "n_ast_nodes": 285, "n_identifiers": 26, "random_cut": "def test_confirm_delete_scenario_1(self):\n # If the number of pages to be deleted are less than\n # WAGTAILADMIN_UNSAFE_PAGE_DELETION_LIMIT then don't need\n # for confirmation\n child_1 = SimplePage(title=\"child 1\", slug=\"child-1\", content=\"hello\")\n self.child_page.add_child(instance=child_1)\n child_2 = SimplePage(title=\"child 2\", slug=\"child-2\", content=\"hello\")\n self.child_page.add_child(instance=child_2)\n response = self.client.get(\n reverse(\"wagtailadmin_pages:delete\", args=(self.child_page.id,))\n )\n self.assertEqual(response.status_code, 200)\n self.assertNotContains(\n response,\n '',\n )\n # deletion should not actually happen on GET\n self.assertTrue(SimplePage.objects.filter(id=self.child_page.id).exists())\n\n # And admin should be able to delete page without any confirmation\n response = self.client.post(\n reverse(\"wagtailadmin_pages:delete\", args=(self.child_page.id,))\n " }, { "id": 152352, "commit_id": "87e8b9a2ab3f033e7fdadbb2fe258857915980ac", "repo": "stable-diffusion-webui", "path": "modules/sd_samplers.py", "file_name": "sd_samplers.py", "fun_name": "randn_like", "commit_message": "prevent replacing torch_randn globally (instead replacing k_diffusion.sampling.torch) and add a setting to disable this all", "code": "def randn_like(self, x):\r\n noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None\r\n\r\n if noise is not None and x.shape == noise.shape:\r\n res = noise\r\n else:\r\n res = torch.randn_like(x)\r\n\r\n self.sampler_noise_index += 1\r\n return res\r\n\r", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 94, "n_words": 38, "vocab_size": 25, "complexity": 5, "nloc": 8, "token_counts": 71, "n_ast_nodes": 109, "n_identifiers": 10, "random_cut": "def randn_like(self, x):\r\n noise = self.sampler_noises[self.sampler_noise_index] if self.sampler_noises is not None and self.sampler_noise_index < len(self.sampler_noises) else None\r\n\r\n if noise is not None and x.shape == noise.shape:\r\n res = noise\r\n else:\r\n res = torch.randn_like(x)\r\n\r\n self.sampler_noise_index += 1\r\n return res" }, { "id": 245612, "commit_id": "8405ad9bfce4867f552f2f7a643c9e78a97eb0b6", "repo": "mmdetection", "path": "tests/test_evaluation/test_metrics/test_coco_metric.py", "file_name": "test_coco_metric.py", "fun_name": "test_format_only", "commit_message": "[Refactor] refactor dataflow and sync the latest mmengine (#8620)\n\n* refactor dataflow\r\n\r\n* fix docstr\r\n\r\n* fix commit\r\n\r\n* fix commit\r\n\r\n* fix visualizer hook\r\n\r\n* fix UT\r\n\r\n* fix UT\r\n\r\n* fix UT error\r\n\r\n* fix bug\r\n\r\n* update to mmengine main\r\n\r\n* update typehint\r\n\r\n* replace data preprocess output type to dict\r\n\r\n* update\r\n\r\n* fix typehint", "code": "def test_format_only(self):\n # create dummy data\n fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n self._create_dummy_coco_json(fake_json_file)\n dummy_pred = self._create_dummy_results()\n\n with self.assertRaises(AssertionError):\n CocoMetric(\n ann_file=fake_json_file,\n classwise=False,\n format_only=True,\n outfile_prefix=None)\n\n coco_metric = CocoMetric(\n ann_file=fake_json_file,\n metric='bbox',\n classwise=False,\n format_only=True,\n outfile_prefix=f'{self.tmp_dir.name}/test')\n coco_metric.dataset_meta = dict(CLASSES=['car', 'bicycle'])\n coco_metric.process(\n {},\n [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n eval_results = coco_metric.evaluate(size=1)\n self.assertDictEqual(eval_results, dict())\n self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.bbox.json'))\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 269, "n_words": 45, "vocab_size": 37, "complexity": 1, "nloc": 23, "token_counts": 157, "n_ast_nodes": 269, "n_identifiers": 32, "random_cut": "def test_format_only(self):\n # create dummy data\n fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')\n self._create_dummy_coco_json(fake_json_file)\n dummy_pred = self._create_dummy_results()\n\n with self.assertRaises(AssertionError):\n CocoMetric(\n ann_file=fake_json_file,\n classwise=False,\n format_only=True,\n outfile_prefix=None)\n\n coco_metric = CocoMetric(\n ann_file=fake_json_file,\n metric='bbox',\n classwise=False,\n format_only=True,\n outfile_prefix=f'{self.tmp_dir.name}/test')\n coco_metric.dataset_meta = dict(CLASSES=['car', 'bicycle'])\n coco_metric.process(\n {},\n [dict(pred_instances=dummy_pred, img_id=0, ori_shape=(640, 640))])\n eval_results = coco_metric.evaluate(size=1)\n self.assertDictEqual(eval_results, dict())\n self.assertTrue(osp.exists(f'{self.tmp_dir.name}/test.b" }, { "id": 150142, "commit_id": "c43935e82ad9b627875a61d02a2923ac101b7374", "repo": "freqtrade", "path": "tests/freqai/conftest.py", "file_name": "conftest.py", "fun_name": "freqai_conf", "commit_message": "create dedicated minimal freqai test strat", "code": "def freqai_conf(default_conf):\n freqaiconf = deepcopy(default_conf)\n freqaiconf.update(\n {\n \"datadir\": Path(default_conf[\"datadir\"]),\n \"strategy\": \"freqai_test_strat\",\n \"strategy-path\": \"freqtrade/tests/strategy/strats\",\n \"freqaimodel\": \"LightGBMPredictionModel\",\n \"freqaimodel_path\": \"freqai/prediction_models\",\n \"timerange\": \"20180110-20180115\",\n \"freqai\": {\n \"startup_candles\": 10000,\n \"purge_old_models\": True,\n \"train_period_days\": 5,\n \"backtest_period_days\": 2,\n \"live_retrain_hours\": 0,\n \"expiration_hours\": 1,\n \"identifier\": \"uniqe-id100\",\n \"live_trained_timestamp\": 0,\n \"feature_parameters\": {\n \"include_timeframes\": [\"5m\"],\n \"include_corr_pairlist\": [\"ADA/BTC\", \"DASH/BTC\"],\n \"label_period_candles\": 20,\n \"include_shifted_candles\": 1,\n \"DI_threshold\": 0.9,\n \"weight_factor\": 0.9,\n \"principal_component_analysis\": False,\n \"use_SVM_to_remove_outliers\": True,\n \"stratify_training_data\": 0,\n \"indicator_max_period_candles\": 10,\n \"indicator_periods_candles\": [10],\n },\n \"data_split_parameters\": {\"test_size\": 0.33, \"random_state\": 1},\n \"model_training_parameters\": {\"n_estimators\": 100},\n },\n \"config_files\": [Path('config_examples', 'config_freqai_futures.example.json')]\n }\n )\n freqaiconf['exchange'].update({'pair_whitelist': ['ADA/BTC', 'DASH/BTC', 'ETH/BTC', 'LTC/BTC']})\n return freqaiconf\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 600, "n_words": 84, "vocab_size": 75, "complexity": 1, "nloc": 40, "token_counts": 201, "n_ast_nodes": 365, "n_identifiers": 6, "random_cut": "def freqai_conf(default_conf):\n freqaiconf = deepcopy(default_conf)\n freqaiconf.update(\n {\n \"datadir\": Path(default_conf[\"datadir\"]),\n \"strategy\": \"freqai_test_strat\",\n \"strategy-path\": \"freqtrade/tests/strategy/strats\",\n \"freqaimodel\": \"LightGBMPre" }, { "id": 172169, "commit_id": "b1c5b5d9517da7269163566892ba230ebf14afea", "repo": "pandas", "path": "pandas/tests/io/parser/dtypes/test_dtypes_basic.py", "file_name": "test_dtypes_basic.py", "fun_name": "test_use_nullable_dtypes_pyarrow_backend", "commit_message": "Add pyarrow support to python engine in read_csv (#50318)", "code": "def test_use_nullable_dtypes_pyarrow_backend(all_parsers, request):\n # GH#36712\n pa = pytest.importorskip(\"pyarrow\")\n parser = all_parsers\n engine = parser.engine\n\n data = \n with pd.option_context(\"mode.nullable_backend\", \"pyarrow\"):\n if engine == \"c\":\n request.node.add_marker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason=f\"Not implemented with engine={parser.engine}\",\n )\n )\n result = parser.read_csv(\n StringIO(data), use_nullable_dtypes=True, parse_dates=[\"i\"]\n )\n expected = DataFrame(\n {\n \"a\": pd.Series([1, 3], dtype=\"int64[pyarrow]\"),\n \"b\": pd.Series([2.5, 4.5], dtype=\"float64[pyarrow]\"),\n \"c\": pd.Series([True, False], dtype=\"bool[pyarrow]\"),\n \"d\": pd.Series([\"a\", \"b\"], dtype=pd.ArrowDtype(pa.string())),\n \"e\": pd.Series([pd.NA, 6], dtype=\"int64[pyarrow]\"),\n \"f\": pd.Series([pd.NA, 7.5], dtype=\"float64[pyarrow]\"),\n \"g\": pd.Series([pd.NA, True], dtype=\"bool[pyarrow]\"),\n \"h\": pd.Series(\n [pd.NA if engine == \"python\" else \"\", \"a\"],\n dtype=pd.ArrowDtype(pa.string()),\n ),\n \"i\": pd.Series([Timestamp(\"2019-12-31\")] * 2),\n \"j\": pd.Series([pd.NA, pd.NA], dtype=\"null[pyarrow]\"),\n }\n )\n tm.assert_frame_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 477, "n_words": 95, "vocab_size": 75, "complexity": 3, "nloc": 37, "token_counts": 312, "n_ast_nodes": 511, "n_identifiers": 33, "random_cut": "def test_use_nullable_dtypes_pyarrow_backend(all_parsers, request):\n # GH#36712\n pa = pytest.importorskip(\"pyarrow\")\n parser = all_parsers\n engine = parser.engine\n\n data = \n with pd.option_context(\"mode.nullable_backend\", \"pyarrow\"):\n if engine == \"c\":\n request.node.add_marker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason=f\"Not implemented with engine={parser.engine}\",\n )\n )\n result = parser.read_csv(" }, { "id": 247297, "commit_id": "2ffaf30803f93273a4d8a65c9e6c3110c8433488", "repo": "synapse", "path": "tests/rest/client/test_rooms.py", "file_name": "test_rooms.py", "fun_name": "test_rooms_messages_sent", "commit_message": "Add type hints to `tests/rest/client` (#12108)\n\n* Add type hints to `tests/rest/client`\r\n\r\n* newsfile\r\n\r\n* fix imports\r\n\r\n* add `test_account.py`\r\n\r\n* Remove one type hint in `test_report_event.py`\r\n\r\n* change `on_create_room` to `async`\r\n\r\n* update new functions in `test_third_party_rules.py`\r\n\r\n* Add `test_filter.py`\r\n\r\n* add `test_rooms.py`\r\n\r\n* change to `assertEquals` to `assertEqual`\r\n\r\n* lint", "code": "def test_rooms_messages_sent(self) -> None:\n path = \"/rooms/%s/send/m.room.message/mid1\" % (urlparse.quote(self.room_id))\n\n content = b'{\"body\":\"test\",\"msgtype\":{\"type\":\"a\"}}'\n channel = self.make_request(\"PUT\", path, content)\n self.assertEqual(400, channel.code, msg=channel.result[\"body\"])\n\n # custom message types\n content = b'{\"body\":\"test\",\"msgtype\":\"test.custom.text\"}'\n channel = self.make_request(\"PUT\", path, content)\n self.assertEqual(200, channel.code, msg=channel.result[\"body\"])\n\n # m.text message type\n path = \"/rooms/%s/send/m.room.message/mid2\" % (urlparse.quote(self.room_id))\n content = b'{\"body\":\"test2\",\"msgtype\":\"m.text\"}'\n channel = self.make_request(\"PUT\", path, content)\n self.assertEqual(200, channel.code, msg=channel.result[\"body\"])\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 145, "n_words": 55, "vocab_size": 28, "complexity": 1, "nloc": 12, "token_counts": 140, "n_ast_nodes": 227, "n_identifiers": 13, "random_cut": "def test_rooms_messages_sent(self) -> None:\n path = \"/rooms/%s/send/m.room.message/mid1\" % (urlparse.quote(self.room_id))\n\n content = b'{\"body\":\"test\",\"msgtype\":{\"type\":\"a\"}}'\n channel = self.make_request(\"PUT\", p" }, { "id": 27586, "commit_id": "7ea7916c65357741c3911e307acb58d547a5e91a", "repo": "saleor", "path": "saleor/webhook/observability/tests/test_payloads.py", "file_name": "test_payloads.py", "fun_name": "test_serialize_gql_operation_result_when_no_operation_data", "commit_message": "Observability reporter (#9803)\n\n* Initial commit\r\n\r\n* Add observability celery beat task\r\n\r\n* Add observability_reporter_task and observability_send_events\r\n\r\n* Convert payload to camel case\r\n\r\n* Add fakeredis to dev dependencies\r\n\r\n* Add redis buffer tests\r\n\r\n* Refactor buffer\r\n\r\n* Update\r\n\r\n* Optimize buffer\r\n\r\n* Add tests\r\n\r\n* Add types-redis to dev dependencies\r\n\r\n* Refactor\r\n\r\n* Fix after rebase\r\n\r\n* Refactor opentracing\r\n\r\n* Add opentracing to observability tasks\r\n\r\n* Add more tests\r\n\r\n* Fix buffer fixtures\r\n\r\n* Report dropped events\r\n\r\n* Fix buffer tests\r\n\r\n* Refactor get_buffer\r\n\r\n* Refactor unit tests\r\n\r\n* Set Redis connection client_name\r\n\r\n* Refactor redis tests\r\n\r\n* Fix test_get_or_create_connection_pool\r\n\r\n* Fix JsonTruncText comparison\r\n\r\n* Add more generate_event_delivery_attempt_payload tests", "code": "def test_serialize_gql_operation_result_when_no_operation_data():\n bytes_limit = 1024\n result = GraphQLOperationResponse()\n payload, _ = serialize_gql_operation_result(result, bytes_limit)\n assert payload == GraphQLOperation(\n name=None, operation_type=None, query=None, result=None, result_invalid=False\n )\n assert len(dump_payload(payload)) <= bytes_limit\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 27, "vocab_size": 23, "complexity": 1, "nloc": 8, "token_counts": 57, "n_ast_nodes": 87, "n_identifiers": 14, "random_cut": "def test_serialize_gql_operation_result_when_no_operation_data():\n bytes_limit = 1024\n result = G" }, { "id": 30493, "commit_id": "2d0ac4707c6b19614bf56bede0892656cd0e1f0c", "repo": "OCRmyPDF", "path": "tests/test_optimize.py", "file_name": "test_optimize.py", "fun_name": "test_multiple_pngs", "commit_message": "Use better img2pdf settings where possible while supporting old versions\n\nFixes #894", "code": "def test_multiple_pngs(resources, outdir):\n with Path.open(outdir / 'in.pdf', 'wb') as inpdf:\n img2pdf.convert(\n fspath(resources / 'baiona_colormapped.png'),\n fspath(resources / 'baiona_gray.png'),\n outputstream=inpdf,\n **IMG2PDF_KWARGS,\n )\n", "url": "https://github.com/ocrmypdf/OCRmyPDF.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 80, "n_words": 20, "vocab_size": 17, "complexity": 2, "nloc": 32, "token_counts": 192, "n_ast_nodes": 81, "n_identifiers": 11, "random_cut": "def test_multiple_pngs(resources, outdir):\n with Path.open(outdir / 'in.pdf', 'wb') as inpdf:\n img2pdf.co" }, { "id": 79174, "commit_id": "e0fd8e1a473d154c7ec154958e8c334db5a39a6d", "repo": "wagtail", "path": "wagtail/snippets/tests/test_bulk_actions/test_bulk_delete.py", "file_name": "test_bulk_delete.py", "fun_name": "test_delete_with_limited_permissions", "commit_message": "Fix plural handling for \"no permission to delete these snippets\" errors\n\n`./manage.py compilemessages` does not allow variables to differ between the singular and plural forms - it fails with\n\n a format specification for argument 'snippet_type_name', as in 'msgstr[0]', doesn't exist in 'msgid_plural'\n\nIt's not possible to use the gettext pluralisation mechanism properly here, because we're using Django's verbose_name and verbose_name_plural properties which don't cover the requirements of languages with complex pluralisation rules. Since we can only hope to support English-style (`if n == 1`) pluralisation, use an n==1 test directly (as we have elsewhere in the template) rather than trying to shoehorn this into gettext pluralisation.\n\nWhile we're at it, remove the capitalisation of the snippet name - it makes no sense here (especially when only done for the plural).", "code": "def test_delete_with_limited_permissions(self):\n self.user.is_superuser = False\n self.user.user_permissions.add(\n Permission.objects.get(\n content_type__app_label=\"wagtailadmin\", codename=\"access_admin\"\n )\n )\n self.user.save()\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n\n html = response.content.decode()\n self.assertInHTML(\n \"

You don't have permission to delete these standard snippets

\",\n html,\n )\n\n for snippet in self.test_snippets:\n self.assertInHTML(f\"
  • {snippet.text}
  • \", html)\n\n response = self.client.post(self.url)\n # User should be redirected back to the index\n self.assertEqual(response.status_code, 302)\n\n # Documents should not be deleted\n for snippet in self.test_snippets:\n self.assertTrue(self.snippet_model.objects.filter(pk=snippet.pk).exists())\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 248, "n_words": 63, "vocab_size": 48, "complexity": 3, "nloc": 21, "token_counts": 150, "n_ast_nodes": 249, "n_identifiers": 30, "random_cut": "def test_delete_with_limited_permissions(self):\n self.user.is_superuser = False\n self.user.user_permissions.add(\n Permission.objects.get(\n content_type__app_label=\"wagtailadmin\", codename=\"access_admin\"\n )\n )\n self.user.save()\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n\n html = response.content.decode()\n self.assertInHTML(\n \"

    You don't have permission to delete these standard snippets

    \",\n html,\n )\n\n for snippet in self.test_snippets:\n self.assertInHTML(f\"
  • {snippet.text}
  • \", html)\n\n response = self.client.post" }, { "id": 121964, "commit_id": "7fbf8ec669c03ce0e1014aaf010dabdf5985509f", "repo": "jax", "path": "jax/interpreters/pxla.py", "file_name": "pxla.py", "fun_name": "_pmap_dce_rule", "commit_message": "Fix Forward. The fix is on the user's end. Original PR: https://github.com/google/jax/pull/12217\n\nCo-authored-by: Matthew Johnson \nCo-authored-by: Yash Katariya \nPiperOrigin-RevId: 472999907", "code": "def _pmap_dce_rule(used_outputs, eqn):\n # just like pe.dce_jaxpr_call_rule, except handles in_axes / out_axes\n new_jaxpr, used_inputs = pe.dce_jaxpr(eqn.params['call_jaxpr'], used_outputs)\n _, donated_invars = partition_list(used_inputs, eqn.params['donated_invars'])\n # TODO(yashkatariya,mattjj): Handle global_arg_shapes here too.\n _, in_axes = partition_list(used_inputs, eqn.params['in_axes'])\n _, out_axes = partition_list(used_outputs, eqn.params['out_axes'])\n new_params = dict(eqn.params, call_jaxpr=new_jaxpr,\n donated_invars=tuple(donated_invars),\n in_axes=tuple(in_axes), out_axes=tuple(out_axes))\n if not any(used_inputs) and not any(used_outputs) and not new_jaxpr.effects:\n return used_inputs, None\n else:\n new_eqn = pe.new_jaxpr_eqn(\n [v for v, used in zip(eqn.invars, used_inputs) if used],\n [v for v, used in zip(eqn.outvars, used_outputs) if used],\n eqn.primitive, new_params, new_jaxpr.effects, eqn.source_info)\n return used_inputs, new_eqn\n\n\n# Set param update handlers to update `donated_invars` just like xla_call_p\npe.call_param_updaters[xla_pmap_p] = pe.call_param_updaters[xla.xla_call_p]\npe.partial_eval_jaxpr_custom_rules[xla_pmap_p] = \\\n partial(pe.call_partial_eval_custom_rule,\n 'call_jaxpr', _pmap_partial_eval_custom_params_updater,\n res_aval=_pmap_partial_eval_custom_res_maker)\npe.dce_rules[xla_pmap_p] = _pmap_dce_rule\nad.call_param_updaters[xla_pmap_p] = ad.call_param_updaters[xla.xla_call_p]\nad.call_transpose_param_updaters[xla_pmap_p] = \\\n ad.call_transpose_param_updaters[xla.xla_call_p]\n\nad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p)\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 218, "n_words": 121, "vocab_size": 84, "complexity": 8, "nloc": 16, "token_counts": 188, "n_ast_nodes": 418, "n_identifiers": 43, "random_cut": "def _pmap_dce_rule(used_outputs, eqn):\n # just like pe.dce_jaxpr_call_rule, except handles in_axes / out_axes\n new_jaxpr, used_inputs = pe.dce_jaxpr(eqn.params['call_jaxpr'], used_outputs)\n _, donated_invars = partition_list(used_inputs, eqn.params['donated_invars'])\n # TODO(yashkatariya,mattjj): Handle global_arg_shapes here too.\n _, in_axes = partition_list(used_inputs, eqn.params['in_axes'])\n _, out_axes = partition_list(used_outputs, eqn.params['out_axes'])\n new_params = dict(eqn.params, call_jaxpr=new_jaxpr,\n donated_invars=tuple(donated_invars),\n in_axes=tuple(in_axes), out_axes=tuple(out_axes))\n if not any(used_inputs) and not any(used_outputs) and not new_jaxpr.effects:\n return used_inputs, None\n else:\n new_eqn = pe.new_jaxpr_eqn(\n [v for v, used in zip(eqn.invars, used_inputs) if used],\n [v for v, used in zip(eqn.outvars, used_outputs) if used],\n eqn.primitive, new_params, new_jaxpr.effects, eqn.source_info)\n return used_inputs, new_eqn\n\n\n# Set param update handlers to update `donated_invars` just like xla_call_p\npe.call_param_updaters[xla_pmap_p] = pe.call_param_updaters[xla.xla_call_p]\npe.partial_eval_jaxpr_custom_rules[xla_pmap_p] =" }, { "id": 136578, "commit_id": "c51b0c9a5664e5c6df3d92f9093b56e61b48f514", "repo": "ray", "path": "python/ray/tests/test_batch_node_provider_unit.py", "file_name": "test_batch_node_provider_unit.py", "fun_name": "_add_node", "commit_message": "[autoscaler][kuberay] Batching node provider (#29933)\n\nImplements the abstract subclass of NodeProvider proposed in\r\nhttps://docs.google.com/document/d/1JyQINBFirZw7YenA_14zize0R3hIII1_fnfQytIXTPo/\r\n\r\nThe goal is to simplify the autoscaler's interactions with external cluster managers like the KubeRay operator.\r\n\r\nA follow-up PR will implement KuberayNodeProvider as a subclass of the BatchingNodeProvider added here.\r\n\r\nSigned-off-by: Dmitri Gekhtman ", "code": "def _add_node(self, node_type, node_kind):\n new_node_id = str(uuid4())\n self._node_data_dict[new_node_id] = NodeData(\n kind=node_kind, ip=str(uuid4()), status=STATUS_UP_TO_DATE, type=node_type\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 47, "n_ast_nodes": 71, "n_identifiers": 14, "random_cut": "def _add_node(self, node_type, node_kind):\n new_node_id = str(uuid4())\n self._n" }, { "id": 67566, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/startup/leaderboard.py", "file_name": "leaderboard.py", "fun_name": "get_all_sales_partner", "commit_message": "style: format code with black", "code": "def get_all_sales_partner(date_range, company, field, limit=None):\n\tif field == \"total_sales_amount\":\n\t\tselect_field = \"sum(`base_net_total`)\"\n\telif field == \"total_commission\":\n\t\tselect_field = \"sum(`total_commission`)\"\n\n\tfilters = {\"sales_partner\": [\"!=\", \"\"], \"docstatus\": 1, \"company\": company}\n\tif date_range:\n\t\tdate_range = frappe.parse_json(date_range)\n\t\tfilters[\"transaction_date\"] = [\"between\", [date_range[0], date_range[1]]]\n\n\treturn frappe.get_list(\n\t\t\"Sales Order\",\n\t\tfields=[\n\t\t\t\"`sales_partner` as name\",\n\t\t\t\"{} as value\".format(select_field),\n\t\t],\n\t\tfilters=filters,\n\t\tgroup_by=\"sales_partner\",\n\t\torder_by=\"value DESC\",\n\t\tlimit=limit,\n\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 36, "n_words": 57, "vocab_size": 48, "complexity": 4, "nloc": 20, "token_counts": 117, "n_ast_nodes": 209, "n_identifiers": 15, "random_cut": "def get_all_sales_partner(date_range, company, field, limit=None):\n\tif field == \"total_sales_amount\":\n\t\tselect_field = \"sum(`base_net_total`)\"\n\telif field == \"total_commission\":\n\t\tselect_field = \"sum(`total_commission`)\"\n\n\tfilters = {\"sales_partner\": [\"!=\", \"\"], \"d" }, { "id": 156326, "commit_id": "bbd1d2f16b5ac4784d758252188047b7816c7fa4", "repo": "dask", "path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "fun_name": "test_from_delayed_dataframe", "commit_message": "Stringify BlockwiseDepDict mapping values when produces_keys=True (#8972)", "code": "def test_from_delayed_dataframe(c):\n # Check that Delayed keys in the form of a tuple\n # are properly serialized in `from_delayed`\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n\n df = pd.DataFrame({\"x\": range(20)})\n ddf = dd.from_pandas(df, npartitions=2)\n ddf = dd.from_delayed(ddf.to_delayed())\n dd.utils.assert_eq(ddf, df, scheduler=c)\n\n\n@pytest.mark.parametrize(\"fuse\", [True, False])", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"fuse\", [True, False])", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 64, "n_words": 42, "vocab_size": 35, "complexity": 1, "nloc": 7, "token_counts": 74, "n_ast_nodes": 149, "n_identifiers": 19, "random_cut": "def test_from_delayed_dataframe(c):\n # Check that Delayed keys in the form of a tuple\n # are properly serialized in `from_delayed`\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"d" }, { "id": 311925, "commit_id": "5a34feb7de440e0df748c9db500facc72a4c2646", "repo": "core", "path": "tests/util/test_async.py", "file_name": "test_async.py", "fun_name": "test_check_loop_async_custom", "commit_message": "Don't warn on time.sleep injected by the debugger (#65420)", "code": "async def test_check_loop_async_custom(caplog):\n \n with pytest.raises(RuntimeError), patch(\n \"homeassistant.util.async_.extract_stack\",\n return_value=[\n Mock(\n filename=\"/home/paulus/homeassistant/core.py\",\n lineno=\"23\",\n line=\"do_something()\",\n ),\n Mock(\n filename=\"/home/paulus/config/custom_components/hue/light.py\",\n lineno=\"23\",\n line=\"self.light.is_on\",\n ),\n Mock(\n filename=\"/home/paulus/aiohue/lights.py\",\n lineno=\"2\",\n line=\"something()\",\n ),\n ],\n ):\n hasync.check_loop(banned_function)\n assert (\n \"Detected blocking call inside the event loop. This is causing stability issues. \"\n \"Please report issue to the custom component author for hue doing blocking calls \"\n \"at custom_components/hue/light.py, line 23: self.light.is_on\" in caplog.text\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 328, "n_words": 63, "vocab_size": 55, "complexity": 1, "nloc": 27, "token_counts": 89, "n_ast_nodes": 159, "n_identifiers": 15, "random_cut": "async def test_check_loop_async_custom(caplog):\n \n with pytest.raises(RuntimeError), patch(\n \"homeassistant.util.async_.extract_stack\",\n return_value=[\n Mock(\n filename=\"/home/paulus/homeassistant/core.py\",\n lineno=\"23\",\n line=\"do_something()\",\n ),\n Mock(\n filename=\"/home/paulus/config/custom_compo" }, { "id": 149841, "commit_id": "c499a92f57cccf520f3d6f19941857af87fac5aa", "repo": "freqtrade", "path": "freqtrade/optimize/backtesting.py", "file_name": "backtesting.py", "fun_name": "load_bt_data_detail", "commit_message": "Remove surplus mark columns, and make fillna on funding rate only", "code": "def load_bt_data_detail(self) -> None:\n \n if self.timeframe_detail:\n self.detail_data = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.timeframe_detail,\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=self.config.get('candle_type_def', CandleType.SPOT)\n )\n else:\n self.detail_data = {}\n if self.trading_mode == TradingMode.FUTURES:\n # Load additional futures data.\n funding_rates_dict = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'],\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=CandleType.FUNDING_RATE\n )\n\n # For simplicity, assign to CandleType.Mark (might contian index candles!)\n mark_rates_dict = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'],\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=CandleType.from_string(self.exchange._ft_has[\"mark_ohlcv_price\"])\n )\n # Combine data to avoid combining the data per trade.\n unavailable_pairs = []\n for pair in self.pairlists.whitelist:\n if pair not in self.exchange._leverage_tiers:\n unavailable_pairs.append(pair)\n continue\n\n if (pair in mark_rates_dict\n and len(funding_rates_dict[pair]) == 0\n and \"futures_funding_rate\" in self.config):\n mark_rates_dict[pair][\"open_fund\"] = self.config.get('futures_funding_rate')\n mark_rates_dict[pair].rename(\n columns={'open': 'open_mark',\n 'close': 'close_mark',\n 'high': 'high_mark',\n 'low': 'low_mark',\n 'volume': 'volume_mark'},\n inplace=True)\n\n self.futures_data[pair] = mark_rates_dict[pair]\n else:\n if \"futures_funding_rate\" in self.config:\n self.futures_data[pair] = mark_rates_dict[pair].merge(\n funding_rates_dict[pair], on='date',\n how=\"outer\", suffixes=[\"_mark\", \"_fund\"])['open_fund'].fillna(\n self.config.get('futures_funding_rate'))\n else:\n self.futures_data[pair] = mark_rates_dict[pair].merge(\n funding_rates_dict[pair], on='date',\n how=\"inner\", suffixes=[\"_mark\", \"_fund\"])\n\n if unavailable_pairs:\n raise OperationalException(\n f\"Pairs {', '.join(unavailable_pairs)} got no leverage tiers available. \"\n \"It is therefore impossible to backtest with this pair at the moment.\")\n else:\n self.futures_data = {}\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 1368, "n_words": 175, "vocab_size": 115, "complexity": 10, "nloc": 71, "token_counts": 476, "n_ast_nodes": 787, "n_identifiers": 45, "random_cut": "def load_bt_data_detail(self) -> None:\n \n if self.timeframe_detail:\n self.detail_data = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.timeframe_detail,\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=self.config.get('candle_type_def', CandleType.SPOT)\n )\n else:\n self.detail_data = {}\n if self.trading_mode == TradingMode.FUTURES:\n # Load additional futures data.\n funding_rates_dict = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pairlists.whitelist,\n timeframe=self.exchange._ft_has['mark_ohlcv_timeframe'],\n timerange=self.timerange,\n startup_candles=0,\n fail_without_data=True,\n data_format=self.config.get('dataformat_ohlcv', 'json'),\n candle_type=CandleType.FUNDING_RATE\n )\n\n # For simplicity, assign to CandleType.Mark (might contian index candles!)\n mark_rates_dict = history.load_data(\n datadir=self.config['datadir'],\n pairs=self.pa" }, { "id": 112157, "commit_id": "14d2966b9e91ae16dcc39de8f41017a75cec8ff9", "repo": "nni", "path": "nni/retiarii/oneshot/pytorch/supermodule/_singlepathnas.py", "file_name": "_singlepathnas.py", "fun_name": "generate_architecture_params", "commit_message": "Valuechoice oneshot lightning (#4602)", "code": "def generate_architecture_params(self):\n self.alpha = {}\n if self.kernel_size_candidates is not None:\n # kernel size arch params\n self.t_kernel = nn.Parameter(torch.rand(len(self.kernel_size_candidates) - 1))\n self.alpha['kernel_size'] = self.t_kernel\n # kernel size mask\n self.kernel_masks = []\n for i in range(0, len(self.kernel_size_candidates) - 1):\n big_size = self.kernel_size_candidates[i]\n small_size = self.kernel_size_candidates[i + 1]\n mask = torch.zeros_like(self.weight)\n mask[:, :, :big_size[0], :big_size[1]] = 1 # if self.weight.shape = (out, in, 7, 7), big_size = (5, 5) and\n mask[:, :, :small_size[0], :small_size[1]] = 0 # small_size = (3, 3), mask will look like:\n self.kernel_masks.append(mask) # 0 0 0 0 0 0 0\n mask = torch.zeros_like(self.weight) # 0 1 1 1 1 1 0\n mask[:, :, :self.kernel_size_candidates[-1][0], :self.kernel_size_candidates[-1][1]] = 1 # 0 1 0 0 0 1 0\n self.kernel_masks.append(mask) # 0 1 0 0 0 1 0\n # 0 1 0 0 0 1 0\n if self.out_channel_candidates is not None: # 0 1 1 1 1 1 0\n # out_channel (or expansion) arch params. we do not consider skip-op here, so we # 0 0 0 0 0 0 0\n # only generate ``len(self.kernel_size_candidates) - 1 `` thresholds\n self.t_expansion = nn.Parameter(torch.rand(len(self.out_channel_candidates) - 1))\n self.alpha['out_channels'] = self.t_expansion\n self.channel_masks = []\n for i in range(0, len(self.out_channel_candidates) - 1):\n big_channel, small_channel = self.out_channel_candidates[i], self.out_channel_candidates[i + 1]\n mask = torch.zeros_like(self.weight)\n mask[:big_channel] = 1\n mask[:small_channel] = 0\n # if self.weight.shape = (32, in, W, H), big_channel = 16 and small_size = 8, mask will look like:\n # 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n self.channel_masks.append(mask)\n mask = torch.zeros_like(self.weight)\n mask[:self.out_channel_candidates[-1]] = 1\n self.channel_masks.append(mask)\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 728, "n_words": 270, "vocab_size": 95, "complexity": 5, "nloc": 29, "token_counts": 345, "n_ast_nodes": 549, "n_identifiers": 24, "random_cut": "def generate_architecture_params(self):\n self.alpha = {}\n if self.kernel_size_candidates is not None:\n # kernel size arch params\n self.t_kernel = nn.Parameter(torch.rand(len(self.kernel_size_candidates) - 1))\n self.alpha['kernel_size'] = self.t_kernel\n # kernel size mask\n self.kernel_masks = []\n for i in range(0, len(self.kernel_size_candidates) - 1):\n big_size = self.kernel_size_candidates[i]\n small_size = self.kernel_size_candidates[i + 1]\n mask = torch.zeros_like(self.weight)\n mask[:, :, :big_size[0], :big_size[1]] = 1 # if self.weight.shape = (out, in, 7, 7), big_size = (5, 5) and\n " }, { "id": 290730, "commit_id": "f952b74b74443d20c2ed200990e3040fee38aa9d", "repo": "core", "path": "homeassistant/components/alarm_control_panel/__init__.py", "file_name": "__init__.py", "fun_name": "supported_features", "commit_message": "Adjust type hints for AlarmControlPanelEntityFeature (#82186)", "code": "def supported_features(self) -> AlarmControlPanelEntityFeature | int:\n \n return self._attr_supported_features\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 5, "random_cut": "def supported_features(self) -> AlarmControlPanelEntityFeature | int:\n \n return sel" }, { "id": 273091, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/preprocessing/image_preprocessing_test.py", "file_name": "image_preprocessing_test.py", "fun_name": "test_random_crop_output_shape", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_random_crop_output_shape(self, expected_height, expected_width):\n self._run_test(expected_height, expected_width)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 25, "n_identifiers": 5, "random_cut": "def test_random_crop_output_shape(self, expected_height, expected_width):\n self._run_" }, { "id": 63721, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/resolvelib/structs.py", "file_name": "structs.py", "fun_name": "add", "commit_message": "upd; format", "code": "def add(self, key):\n \n if key in self._vertices:\n raise ValueError(\"vertex exists\")\n self._vertices.add(key)\n self._forwards[key] = set()\n self._backwards[key] = set()\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 48, "n_ast_nodes": 81, "n_identifiers": 8, "random_cut": "def add(self, key):\n \n if key in self._" }, { "id": 45460, "commit_id": "69f6f9e01b6df76c3c8fa266d460324163957887", "repo": "airflow", "path": "airflow/migrations/versions/54bebd308c5f_add_trigger_table_and_task_info.py", "file_name": "54bebd308c5f_add_trigger_table_and_task_info.py", "fun_name": "downgrade", "commit_message": "Autogenerate migration reference doc (#21601)\n\n* document airflow version in each alembic migration module and use this to autogen the doc\r\n* update each migration module to have the same description used in migration ref (so it can be used in autogen)", "code": "def downgrade():\n \n with op.batch_alter_table('task_instance', schema=None) as batch_op:\n batch_op.drop_constraint('task_instance_trigger_id_fkey', type_='foreignkey')\n batch_op.drop_index('ti_trigger_id')\n batch_op.drop_column('trigger_id')\n batch_op.drop_column('trigger_timeout')\n batch_op.drop_column('next_method')\n batch_op.drop_column('next_kwargs')\n op.drop_table('trigger')\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 9, "token_counts": 65, "n_ast_nodes": 129, "n_identifiers": 10, "random_cut": "def downgrade():\n \n with op.batch_alter_table('task_instance', schema=None) as batch_op:\n batch_op.drop_constraint('task_instance_trigger_id_fkey', type_='foreign" }, { "id": 154885, "commit_id": "c89f8ba6aaa575ed44f381ad838c8e39050bc102", "repo": "modin", "path": "modin/pandas/resample.py", "file_name": "resample.py", "fun_name": "sem", "commit_message": "REFACTOR-#5038: Remove unnecessary `_method` argument from resamplers (#5039)\n\nSigned-off-by: Vasily Litvinov ", "code": "def sem(self, *args, **kwargs):\n return self._dataframe.__constructor__(\n query_compiler=self._query_compiler.resample_sem(\n self.resample_kwargs,\n *args,\n **kwargs,\n )\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 8, "token_counts": 38, "n_ast_nodes": 57, "n_identifiers": 10, "random_cut": "def sem(self, *args, **kwargs):\n return self._dataframe.__constructor__(\n query_compiler=self._query_compiler.resample_s" }, { "id": 104188, "commit_id": "21bfd0d3f5ff3fbfd691600e2c7071a167816cdf", "repo": "datasets", "path": "src/datasets/packaged_modules/text/text.py", "file_name": "text.py", "fun_name": "_generate_tables", "commit_message": "Run pyupgrade for Python 3.6+ (#3560)\n\n* Run pyupgrade for Python 3.6+\r\n\r\n* Fix lint issues\r\n\r\n* Revert changes for the datasets code\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def _generate_tables(self, files):\n schema = pa.schema(self.config.features.type if self.config.features is not None else {\"text\": pa.string()})\n for file_idx, file in enumerate(files):\n batch_idx = 0\n with open(file, encoding=self.config.encoding) as f:\n if self.config.sample_by == \"line\":\n batch_idx = 0\n while True:\n batch = f.read(self.config.chunksize)\n if not batch:\n break\n batch += f.readline() # finish current line\n batch = batch.splitlines(keepends=self.config.keep_linebreaks)\n pa_table = pa.Table.from_arrays([pa.array(batch)], schema=schema)\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f\"pa_table: {pa_table} num rows: {pa_table.num_rows}\")\n # logger.warning('\\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n yield (file_idx, batch_idx), pa_table\n batch_idx += 1\n elif self.config.sample_by == \"paragraph\":\n batch_idx = 0\n batch = \"\"\n while True:\n batch += f.read(self.config.chunksize)\n if not batch:\n break\n batch += f.readline() # finish current line\n batch = batch.split(\"\\n\\n\")\n pa_table = pa.Table.from_arrays(\n [pa.array([example for example in batch[:-1] if example])], schema=schema\n )\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f\"pa_table: {pa_table} num rows: {pa_table.num_rows}\")\n # logger.warning('\\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n yield (file_idx, batch_idx), pa_table\n batch_idx += 1\n batch = batch[-1]\n elif self.config.sample_by == \"document\":\n text = f.read()\n pa_table = pa.Table.from_arrays([pa.array([text])], schema=schema)\n yield file_idx, pa_table\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 25, "n_whitespaces": 1000, "n_words": 179, "vocab_size": 84, "complexity": 12, "nloc": 35, "token_counts": 299, "n_ast_nodes": 494, "n_identifiers": 31, "random_cut": "def _generate_tables(self, files):\n schema = pa.schema(self.config.features.type if self.config.features is not None else {\"text\": pa.string()})\n for file_idx, file in enumerate(files):\n batch_idx = 0\n with open(file, encoding=self.config.encoding) as f:\n if self.config.sample_by == \"line\":\n batch_idx = 0\n while True:\n batch = f.read(self.config.chunksize)\n if not batch:\n break\n batch += f.readline() # finish current line\n batch = batch.splitlines(keepends=self.config.keep_linebreaks)\n pa_table = pa.Table.from_arrays([pa.array(batch)], schema=schema)\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f\"pa_table: {pa_table} num rows: {pa_table.num_rows}\")\n # logger.warning('\\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))\n yield (file_idx, batch_idx), pa_table\n batch_idx += 1\n elif self.config.sample_by == \"paragraph\":\n batch_idx = 0\n batch = \"\"\n while True:\n batch += f.read(self.config.chunksize)\n if not batch:\n break\n batch += f.readline() # finish current line\n batch = batch.split(\"\\n\\n\")\n pa_table = pa.Table.from_arrays(\n [pa.array([example for example in batch[:-1] if example])], schema=schema\n )\n # Uncomment for debugging (will print the Arrow table size and elements)\n # logger.warning(f\"pa_table: {pa_table} num rows" }, { "id": 337956, "commit_id": "0c6bdc2c237ac071be99ac6f93ddfbc8bbcb8441", "repo": "accelerate", "path": "tests/fsdp/test_fsdp.py", "file_name": "test_fsdp.py", "fun_name": "test_cpu_offload", "commit_message": "enhancements and fixes for FSDP and DeepSpeed (#532)\n\n* checkpointing enhancements and fixes for FSDP and DeepSpeed\r\n\r\n* resolving comments\r\n\r\n1. Adding deprecation args and warnings in launcher for FSDP\r\n2. Handling old configs to work with new launcher args wrt FSDP.\r\n3. Reverting changes to public methods in `checkpointing.py` and handling it in `Accelerator`\r\n4. Explicitly writing the defaults of various FSDP options in `dataclasses` for readability.\r\n\r\n* fixes\r\n\r\n1. FSDP wrapped model being added to the `_models`.\r\n2. Not passing the env variables when args are None.\r\n\r\n* resolving comments\r\n\r\n* adding FSDP for all the collective operations\r\n\r\n* adding deepspeed and fsdp tests\r\n\r\n1. Removes mrpc datafiles and directly relies on HF datasets as it was throwing `file not found` error when running from within `tests` folder. Updating `moke_dataloaders` as a result.\r\n2. adding `test_performance.py`, `test_memory.py` and `test_checkpointing.py` for multi-gpu FSDP and DeepSpeed tests\r\n\r\n* reverting `mocked_dataloader` changes\r\n\r\n* adding FSDP tests\r\n\r\n* data files revert\r\n\r\n* excluding fsdp tests from `tests_core`\r\n\r\n* try 2\r\n\r\n* adding time delay to avoid `torchrun` from crashing at times leading which causing flaky behaviour\r\n\r\n* reducing the time of tests\r\n\r\n* fixes\r\n\r\n* fix\r\n\r\n* fixes and reduce time further\r\n\r\n* reduce time further and minor fixes\r\n\r\n* adding a deepspeed basic e2e test for single gpu setup", "code": "def test_cpu_offload(self):\n from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload\n\n for flag in [True, False]:\n env = self.dist_env.copy()\n env[\"FSDP_OFFLOAD_PARAMS\"] = str(flag).lower()\n with mockenv_context(**env):\n fsdp_plugin = FullyShardedDataParallelPlugin()\n self.assertEqual(fsdp_plugin.cpu_offload, CPUOffload(offload_params=flag))\n\n\n@require_fsdp\n@require_multi_gpu\n@slow", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "@require_fsdp\n@require_multi_gpu\n@slow", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 100, "n_words": 27, "vocab_size": 25, "complexity": 2, "nloc": 8, "token_counts": 73, "n_ast_nodes": 128, "n_identifiers": 22, "random_cut": "def test_cpu_offload(self):\n from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload\n\n for flag in [True, False]:\n env = self.dist_env.copy()\n env[\"FSDP_OFFLOAD_PARAMS\"] = str(flag).lower()\n with mockenv_context(**env):\n fsdp_plugin = FullyShardedDataParallelPlugin()\n self.assertEqual(fsdp_plugin.cpu_o" }, { "id": 157784, "commit_id": "e292b514b8f4873a36c8ca0ba68b19db2ee8ba44", "repo": "d2l-zh", "path": "d2l/paddle.py", "file_name": "paddle.py", "fun_name": "sgd", "commit_message": "[Paddle] Add chapter chapter_linear-networks (#1134)\n\n* [Paddletest] Add chapter3 chapter_linear-networks\r\n\r\n* [Paddle] Add chapter chapter_linear-networks\r\n\r\n* [Paddle] Add chapter chapter_linear-networks\r\n\r\n* [Paddle] Add chapter3 linear-networks\r\n\r\n* [Paddle] Add chapter3 linear-networks\r\n\r\n* [Paddle] Add chapter3 linear-networks\r\n\r\n* [Paddle] Add chapter3 linear-networks\r\n\r\n* Convert tenor to to_tensor\r\n\r\n* [Paddle] Add chapter_preface\r\n\r\n* Fix get_dataloader_workers mac & windows\r\n\r\n* Remove redundant list/tuple unpacking\r\n\r\n* Minor style fixes\r\n\r\n* sync lib\r\n\r\n* Add stop gradient explaination\r\n\r\n* remove blank content\r\n\r\n* Update softmax-regression-scratch.md\r\n\r\n* Fix the sgd bugs\r\n\r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: w5688414 ", "code": "def sgd(params, lr, batch_size):\n \n with paddle.no_grad():\n for i,param in enumerate(params):\n param -= lr * params[i].grad/ batch_size\n params[i].set_value(param.numpy())\n params[i].clear_gradient()\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 64, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 60, "n_ast_nodes": 98, "n_identifiers": 13, "random_cut": "def sgd(params, lr, batch_size):\n \n with paddle.no_grad():\n for i,param in enumerate(params):\n param -= lr * params[i].grad/ batch_size\n params[i].set_va" }, { "id": 131119, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/kubernetes_e2e/test_helm.py", "file_name": "test_helm.py", "fun_name": "delete_rayclusters", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def delete_rayclusters(namespace):\n cmd = f\"kubectl -n {namespace} delete rayclusters --all\"\n try:\n subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode()\n except subprocess.CalledProcessError as e:\n assert False, \"returncode: {}, stdout: {}\".format(e.returncode, e.stdout)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 47, "n_words": 25, "vocab_size": 25, "complexity": 2, "nloc": 6, "token_counts": 53, "n_ast_nodes": 89, "n_identifiers": 14, "random_cut": "def delete_rayclusters(namespace):\n cmd = f\"kubectl -n {namespace} delete rayclusters --all\"\n try:\n subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode()\n except subprocess.CalledProcessError as e:\n assert False, \"returncode: {}, stdout: {}\".format(e.returncode, e.stdout)\n\n" }, { "id": 42678, "commit_id": "70b41e46b46e65c0446a40ab91624cb2291a5039", "repo": "airflow", "path": "tests/models/test_mappedoperator.py", "file_name": "test_mappedoperator.py", "fun_name": "test_map_xcom_arg", "commit_message": "Move MappedOperator tests to mirror code location (#23884)\n\nAt some point during the development of AIP-42 we moved the code for\r\nMappedOperator out of baseoperator.py to mappedoperator.py, but we\r\ndidn't move the tests at the same time", "code": "def test_map_xcom_arg():\n \n with DAG(\"test-dag\", start_date=DEFAULT_DATE):\n task1 = BaseOperator(task_id=\"op1\")\n mapped = MockOperator.partial(task_id='task_2').expand(arg2=XComArg(task1))\n finish = MockOperator(task_id=\"finish\")\n\n mapped >> finish\n\n assert task1.downstream_list == [mapped]\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 58, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 63, "n_ast_nodes": 112, "n_identifiers": 15, "random_cut": "def test_map_xcom_arg():\n \n with DAG(\"test-dag\", start_date=DEFAULT_DATE):\n task1 = BaseOperator(task_id=\"op1\")\n mapped = MockOperator.partial(task_id='task_2').expand(arg2=XComArg(task1))\n finish = MockOperator(task_id=\"finish\")\n\n mapped >" }, { "id": 79450, "commit_id": "ae0603001638e6b03556aef19bdcfa445f9f74c6", "repo": "wagtail", "path": "wagtail/admin/views/generic/models.py", "file_name": "models.py", "fun_name": "get_context_data", "commit_message": "Extract generic RevisionsUnscheduleView and make page's unpublish view extend from it", "code": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"object\": self.object,\n \"revision\": self.revision,\n \"subtitle\": self.get_page_subtitle(),\n \"object_display_title\": self.get_object_display_title(),\n \"revisions_unschedule_url\": self.get_revisions_unschedule_url(),\n \"next_url\": self.get_next_url(),\n }\n )\n return context\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 163, "n_words": 24, "vocab_size": 23, "complexity": 1, "nloc": 13, "token_counts": 72, "n_ast_nodes": 123, "n_identifiers": 12, "random_cut": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"object\": self.object,\n \"revision\": self.revision,\n \"subtitle\": self.get_page_subtitle(),\n \"object_display_title\": self.get_object_display_title(),\n \"revisions_unsched" }, { "id": 166423, "commit_id": "7c054d6a256fd0186befe03acf9e9e86d81668d6", "repo": "pandas", "path": "pandas/tests/extension/base/groupby.py", "file_name": "groupby.py", "fun_name": "test_in_numeric_groupby", "commit_message": "DEPR: groupby numeric_only default (#47025)", "code": "def test_in_numeric_groupby(self, data_for_grouping):\n df = pd.DataFrame(\n {\n \"A\": [1, 1, 2, 2, 3, 3, 1, 4],\n \"B\": data_for_grouping,\n \"C\": [1, 1, 1, 1, 1, 1, 1, 1],\n }\n )\n\n dtype = data_for_grouping.dtype\n if is_numeric_dtype(dtype) or dtype.name == \"decimal\":\n warn = None\n else:\n warn = FutureWarning\n msg = \"The default value of numeric_only\"\n with tm.assert_produces_warning(warn, match=msg):\n result = df.groupby(\"A\").sum().columns\n\n if data_for_grouping.dtype._is_numeric:\n expected = pd.Index([\"B\", \"C\"])\n else:\n expected = pd.Index([\"C\"])\n\n tm.assert_index_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 261, "n_words": 70, "vocab_size": 49, "complexity": 4, "nloc": 21, "token_counts": 153, "n_ast_nodes": 243, "n_identifiers": 23, "random_cut": "def test_in_numeric_groupby(self, data_for_grouping):\n df = pd.DataFrame(\n {\n \"A\": [1, 1, 2, 2, 3, 3, 1, 4],\n \"B\": data_for_grouping,\n \"C\": [1, 1, 1, 1, 1, 1, 1, 1],\n }\n )\n\n dtype = data_for_grouping.dtype\n if is_numeric_dtype(dtype) or dtype.name == \"decimal\":\n warn = None\n else:\n warn = FutureWarning\n msg = \"The default value of numeric_only\"\n with tm.assert_produces_warning(warn, match=msg):\n result = df.groupby(\"A\").sum().columns\n\n if data_for_grouping.dtype._is_numeric:\n expected = pd.Index([\"B\", \"C\"]" }, { "id": 176625, "commit_id": "de1d00f20e0bc14f1cc911b3486e50225a8fa168", "repo": "networkx", "path": "networkx/generators/classic.py", "file_name": "classic.py", "fun_name": "lollipop_graph", "commit_message": "Adjust the usage of nodes_or_number decorator (#5599)\n\n* recorrect typo in decorators.py\r\n\r\n* Update tests to show troubles in current code\r\n\r\n* fix troubles with usage of nodes_or_number\r\n\r\n* fix typo\r\n\r\n* remove nodes_or_number where that makes sense\r\n\r\n* Reinclude nodes_or_numbers and add some tests for nonstandard usage\r\n\r\n* fix typowq\r\n\r\n* hopefully final tweaks (no behavior changes\r\n\r\n* Update test_classic.py\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def lollipop_graph(m, n, create_using=None):\n \n m, m_nodes = m\n M = len(m_nodes)\n if M < 2:\n raise NetworkXError(\"Invalid description: m should indicate at least 2 nodes\")\n\n n, n_nodes = n\n if isinstance(m, numbers.Integral) and isinstance(n, numbers.Integral):\n n_nodes = list(range(M, M + n))\n N = len(n_nodes)\n\n # the ball\n G = complete_graph(m_nodes, create_using)\n if G.is_directed():\n raise NetworkXError(\"Directed Graph not supported\")\n\n # the stick\n G.add_nodes_from(n_nodes)\n if N > 1:\n G.add_edges_from(pairwise(n_nodes))\n\n if len(G) != M + N:\n raise NetworkXError(\"Nodes must be distinct in containers m and n\")\n\n # connect ball to stick\n if M > 0 and N > 0:\n G.add_edge(m_nodes[-1], n_nodes[0])\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 193, "n_words": 100, "vocab_size": 67, "complexity": 9, "nloc": 20, "token_counts": 157, "n_ast_nodes": 257, "n_identifiers": 22, "random_cut": "def lollipop_graph(m, n, create_using=None):\n \n m, m_nodes = m\n M = len(m_nodes)\n if M < 2:\n raise NetworkXError(\"Invalid description: m should indicate a" }, { "id": 91777, "commit_id": "3ffb14a47d868956ef759a0cd837066629676774", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_details.py", "file_name": "test_organization_metric_details.py", "fun_name": "test_same_entity_multiple_metric_ids", "commit_message": "Revert \"feat(metrics): make indexer more configurable (#35604)\" (#35862)\n\nThis reverts commit 7f60db924ea37f34e0cfe6856777239e2a2ffe13.", "code": "def test_same_entity_multiple_metric_ids(self, mocked_derived_metrics):\n \n mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2\n org_id = self.project.organization.id\n metric_id = indexer.record(org_id, \"metric_foo_doe\")\n\n self.store_session(\n self.build_session(\n project_id=self.project.id,\n started=(time.time() // 60) * 60,\n status=\"ok\",\n release=\"foobar@2.0\",\n errors=2,\n )\n )\n self._send_buckets(\n [\n {\n \"org_id\": org_id,\n \"project_id\": self.project.id,\n \"metric_id\": metric_id,\n \"timestamp\": (time.time() // 60 - 2) * 60,\n \"tags\": {\n resolve_weak(org_id, \"release\"): indexer.record(org_id, \"fooww\"),\n },\n \"type\": \"c\",\n \"value\": 5,\n \"retention_days\": 90,\n },\n ],\n entity=\"metrics_counters\",\n )\n response = self.get_success_response(\n self.organization.slug,\n \"derived_metric.multiple_metrics\",\n )\n assert response.data == {\n \"name\": \"derived_metric.multiple_metrics\",\n \"type\": \"numeric\",\n \"operations\": [],\n \"unit\": \"percentage\",\n \"tags\": [{\"key\": \"release\"}],\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 597, "n_words": 82, "vocab_size": 66, "complexity": 1, "nloc": 41, "token_counts": 193, "n_ast_nodes": 348, "n_identifiers": 27, "random_cut": "def test_same_entity_multiple_metric_ids(self, mocked_derived_metrics):\n \n mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2\n org_id = self.project.organization.id\n metric_id = indexer.record(org_id, \"metric_foo_doe\")\n\n self.store_session(\n self.build_session(\n project_id=self.project.id,\n started=(time.time() // 60) * 60,\n status=\"ok\",\n release=\"foobar@2.0\",\n errors=2,\n )\n )\n self._send_buckets(\n [\n {\n \"org_id\": org_id,\n \"project_id\": self.project.id,\n \"metric_id\": metric_id,\n \"timestamp\": (time.time() // 60 - 2) * 60,\n \"tags\": {\n resolve_weak(org_id, \"release\"): indexer.record(org_id, \"fooww\"),\n },\n \"type\": \"c\",\n \"value\": 5,\n \"retention_days\": 90,\n },\n ],\n entity=\"metrics_counters\",\n )\n response = self.get_success_response(\n self.organization.slug,\n \"derive" }, { "id": 176575, "commit_id": "4f2b1b854d5934a487b428f252ad6ff9375d74ad", "repo": "networkx", "path": "networkx/drawing/nx_pydot.py", "file_name": "nx_pydot.py", "fun_name": "read_dot", "commit_message": "improve docstring for read_doc, see issue #5604 (#5605)", "code": "def read_dot(path):\n \n import pydot\n\n data = path.read()\n\n # List of one or more \"pydot.Dot\" instances deserialized from this file.\n P_list = pydot.graph_from_dot_data(data)\n\n # Convert only the first such instance into a NetworkX graph.\n return from_pydot(P_list[0])\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 56, "n_words": 35, "vocab_size": 33, "complexity": 1, "nloc": 5, "token_counts": 31, "n_ast_nodes": 56, "n_identifiers": 8, "random_cut": "def read_dot(path):\n \n import pydot\n\n data = path.read()\n\n # List of one or more \"pydot.Dot\" instances deserialized from this file.\n P_list = pydot.graph_from_dot_d" }, { "id": 132258, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/schedulers/pb2_utils.py", "file_name": "pb2_utils.py", "fun_name": "normalize", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def normalize(data, wrt):\n \n return (data - np.min(wrt, axis=0)) / (\n np.max(wrt, axis=0) - np.min(wrt, axis=0) + 1e-8\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 34, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 51, "n_ast_nodes": 75, "n_identifiers": 7, "random_cut": "def normalize(data, wrt):\n \n return (data - np.min(wrt, axis=0)) / (\n np.ma" }, { "id": 202757, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/db_functions/math/test_sign.py", "file_name": "test_sign.py", "fun_name": "test_transform", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_transform(self):\n with register_lookup(DecimalField, Sign):\n DecimalModel.objects.create(n1=Decimal(\"5.4\"), n2=Decimal(\"0\"))\n DecimalModel.objects.create(n1=Decimal(\"-0.1\"), n2=Decimal(\"0\"))\n obj = DecimalModel.objects.filter(n1__sign__lt=0, n2__sign=0).get()\n self.assertEqual(obj.n1, Decimal(\"-0.1\"))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 65, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 86, "n_ast_nodes": 146, "n_identifiers": 17, "random_cut": "def test_transform(self):\n with register_lookup(DecimalField, Sign):\n DecimalModel.objects.create(n1=Decimal(\"5.4\"), n2=Decimal(\"0\"))\n DecimalModel.objects.create(n1=Decimal(\"-0.1\"), n2=Decimal(\"0\"))\n obj = DecimalModel.objects.filter(n1__sign__lt=0" }, { "id": 183173, "commit_id": "26f138e69be49f33fe7ff72cebbb51d617a6338f", "repo": "textual", "path": "tests/test_suggestions.py", "file_name": "test_suggestions.py", "fun_name": "test_get_suggestion", "commit_message": "[css] Address \"did you mean\" PR feedback", "code": "def test_get_suggestion(word, possible_words, expected_result):\n assert get_suggestion(word, possible_words) == expected_result\n\n\n@pytest.mark.parametrize(\n \"word, possible_words, count, expected_result\",\n (\n [\"background\", (\"background\",), 1, [\"background\"]],\n [\"backgroundu\", (\"background\",), 1, [\"background\"]],\n [\"bkgrund\", (\"background\",), 1, [\"background\"]],\n [\"llow\", (\"background\",), 1, []],\n [\"llow\", (\"background\", \"yellow\"), 1, [\"yellow\"]],\n [\"yllow\", (\"background\", \"yellow\", \"ellow\"), 1, [\"yellow\"]],\n [\"yllow\", (\"background\", \"yellow\", \"ellow\"), 2, [\"yellow\", \"ellow\"]],\n [\"yllow\", (\"background\", \"yellow\", \"red\"), 2, [\"yellow\"]],\n ),\n)", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"word, possible_words, count, expected_result\",\n (\n [\"background\", (\"background\",), 1, [\"background\"]],\n [\"backgroundu\", (\"background\",), 1, [\"background\"]],\n [\"bkgrund\", (\"background\",), 1, [\"background\"]],\n [\"llow\", (\"background\",), 1, []],\n [\"llow\", (\"background\", \"yellow\"), 1, [\"yellow\"]],\n [\"yllow\", (\"background\", \"yellow\", \"ellow\"), 1, [\"yellow\"]],\n [\"yllow\", (\"background\", \"yellow\", \"ellow\"), 2, [\"yellow\", \"ellow\"]],\n [\"yllow\", (\"background\", \"yellow\", \"red\"), 2, [\"yellow\"]],\n ),\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 122, "n_words": 57, "vocab_size": 34, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 265, "n_identifiers": 8, "random_cut": "def test_get_suggestion(word, possible_words, expected_result):\n assert get_suggestion(word, possible_words) == expected_result\n\n\n@pytest.mark.parametrize(\n \"word, possible_words, count, expected_result\",\n (\n [\"background\", (\"background\",), 1, [\"background\"]],\n [\"backgroundu\", (\"background\",), 1, [\"background\"]" }, { "id": 17330, "commit_id": "2d2673c42db3abc79e52ec83b050f12ca1a90fc5", "repo": "ccxt", "path": "python/ccxt/binance.py", "file_name": "binance.py", "fun_name": "set_margin_mode", "commit_message": "1.71.93\n\n[ci skip]", "code": "def set_margin_mode(self, marginType, symbol=None, params={}):\n #\n # {\"code\": -4048 , \"msg\": \"Margin type cannot be changed if there exists position.\"}\n #\n # or\n #\n # {\"code\": 200, \"msg\": \"success\"}\n #\n marginType = marginType.upper()\n if marginType == 'CROSS':\n marginType = 'CROSSED'\n if (marginType != 'ISOLATED') and (marginType != 'CROSSED'):\n raise BadRequest(self.id + ' marginType must be either isolated or cross')\n self.load_markets()\n market = self.market(symbol)\n method = None\n if market['linear']:\n method = 'fapiPrivatePostMarginType'\n elif market['inverse']:\n method = 'dapiPrivatePostMarginType'\n else:\n raise NotSupported(self.id + ' setMarginMode() supports linear and inverse contracts only')\n request = {\n 'symbol': market['id'],\n 'marginType': marginType,\n }\n return getattr(self, method)(self.extend(request, params))\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 309, "n_words": 100, "vocab_size": 69, "complexity": 6, "nloc": 20, "token_counts": 130, "n_ast_nodes": 234, "n_identifiers": 15, "random_cut": "def set_margin_mode(self, marginType, symbol=None, params={}):\n #\n # {\"code\": -4048 , \"msg\": \"Margin type cannot be changed if there exists position.\"}\n #\n # or\n #\n # {\"code\": 200, \"msg\": \"success\"}\n #\n marginType = marginType.upper()\n if marginType == 'CROSS':\n marginType = 'CROSSED'\n if (marginType != 'ISOLATED') and (marginType != 'CROSSED'):\n raise BadRequest(self.id + ' marginType must be either isolated or cross')\n self.load_markets()\n market = self.market(symbol)\n method = None\n if market['linear']:\n method = 'fapiPrivatePostMarginType'\n elif market['inverse']:\n method = 'dapiPrivatePostMarginType'\n else:\n raise NotSupported(self.id + ' setMarginMode() supports linear and inverse contracts only')\n request = {\n 'symbol': market['id'],\n 'marginType': marginType,\n }\n return getat" }, { "id": 170204, "commit_id": "93bd1a8ece37657e887808b1492d3715e25e8bd3", "repo": "pandas", "path": "pandas/tests/series/indexing/test_setitem.py", "file_name": "test_setitem.py", "fun_name": "test_setitem_scalar_into_readonly_backing_data", "commit_message": "STYLE: fix some consider-using-enumerate pylint warnings (#49214)\n\n* STYLE: fix some consider-using-enumerate pylint errors\r\n\r\n* fixup! STYLE: fix some consider-using-enumerate pylint errors\r\n\r\n* fixup! fixup! STYLE: fix some consider-using-enumerate pylint errors\r\n\r\n* fixup! fixup! fixup! STYLE: fix some consider-using-enumerate pylint errors\r\n\r\n* fixup! fixup! fixup! fixup! STYLE: fix some consider-using-enumerate pylint errors", "code": "def test_setitem_scalar_into_readonly_backing_data():\n # GH#14359: test that you cannot mutate a read only buffer\n\n array = np.zeros(5)\n array.flags.writeable = False # make the array immutable\n series = Series(array)\n\n for n in series.index:\n msg = \"assignment destination is read-only\"\n with pytest.raises(ValueError, match=msg):\n series[n] = 1\n\n assert array[n] == 0\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 94, "n_words": 47, "vocab_size": 41, "complexity": 2, "nloc": 9, "token_counts": 60, "n_ast_nodes": 100, "n_identifiers": 15, "random_cut": "def test_setitem_scalar_into_readonly_backing_data():\n # GH#14359: test that you cannot mutate a read only buffer\n\n array = np.zeros(5)\n array.flags.writeable = False # make the array immutable\n series = Series(array)\n\n for n in series.index:\n msg = \"assignment destination is read-only\"\n with pytest.raises(ValueError, match=msg):\n series" }, { "id": 61324, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/urls.py", "file_name": "urls.py", "fun_name": "get_url_scheme", "commit_message": "upd; format", "code": "def get_url_scheme(url):\n # type: (str) -> Optional[str]\n if \":\" not in url:\n return None\n return url.split(\":\", 1)[0].lower()\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 4, "token_counts": 29, "n_ast_nodes": 50, "n_identifiers": 4, "random_cut": "def get_url_scheme(url):\n # type: (str) -> Optional[str]\n if \":\" not in url:\n return None\n" }, { "id": 15210, "commit_id": "8543cfb54ecfae0f51f4b77a8df7b38aa0626094", "repo": "ccxt", "path": "python/ccxt/async_support/coinbase.py", "file_name": "coinbase.py", "fun_name": "parse_transaction", "commit_message": "1.66.55\n\n[ci skip]", "code": "def parse_transaction(self, transaction, market=None):\n #\n # fiat deposit\n #\n # {\n # \"id\": \"f34c19f3-b730-5e3d-9f72\",\n # \"status\": \"completed\",\n # \"payment_method\": {\n # \"id\": \"a022b31d-f9c7-5043-98f2\",\n # \"resource\": \"payment_method\",\n # \"resource_path\": \"/v2/payment-methods/a022b31d-f9c7-5043-98f2\"\n # },\n # \"transaction\": {\n # \"id\": \"04ed4113-3732-5b0c-af86-b1d2146977d0\",\n # \"resource\": \"transaction\",\n # \"resource_path\": \"/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/04ed4113-3732-5b0c-af86\"\n # },\n # \"user_reference\": \"2VTYTH\",\n # \"created_at\": \"2017-02-09T07:01:18Z\",\n # \"updated_at\": \"2017-02-09T07:01:26Z\",\n # \"resource\": \"deposit\",\n # \"resource_path\": \"/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/deposits/f34c19f3-b730-5e3d-9f72\",\n # \"committed\": True,\n # \"payout_at\": \"2017-02-12T07:01:17Z\",\n # \"instant\": False,\n # \"fee\": {\"amount\": \"0.00\", \"currency\": \"EUR\"},\n # \"amount\": {\"amount\": \"114.02\", \"currency\": \"EUR\"},\n # \"subtotal\": {\"amount\": \"114.02\", \"currency\": \"EUR\"},\n # \"hold_until\": null,\n # \"hold_days\": 0,\n # \"hold_business_days\": 0,\n # \"next_step\": null\n # }\n #\n # fiat_withdrawal\n #\n # {\n # \"id\": \"cfcc3b4a-eeb6-5e8c-8058\",\n # \"status\": \"completed\",\n # \"payment_method\": {\n # \"id\": \"8b94cfa4-f7fd-5a12-a76a\",\n # \"resource\": \"payment_method\",\n # \"resource_path\": \"/v2/payment-methods/8b94cfa4-f7fd-5a12-a76a\"\n # },\n # \"transaction\": {\n # \"id\": \"fcc2550b-5104-5f83-a444\",\n # \"resource\": \"transaction\",\n # \"resource_path\": \"/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/fcc2550b-5104-5f83-a444\"\n # },\n # \"user_reference\": \"MEUGK\",\n # \"created_at\": \"2018-07-26T08:55:12Z\",\n # \"updated_at\": \"2018-07-26T08:58:18Z\",\n # \"resource\": \"withdrawal\",\n # \"resource_path\": \"/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/withdrawals/cfcc3b4a-eeb6-5e8c-8058\",\n # \"committed\": True,\n # \"payout_at\": \"2018-07-31T08:55:12Z\",\n # \"instant\": False,\n # \"fee\": {\"amount\": \"0.15\", \"currency\": \"EUR\"},\n # \"amount\": {\"amount\": \"13130.69\", \"currency\": \"EUR\"},\n # \"subtotal\": {\"amount\": \"13130.84\", \"currency\": \"EUR\"},\n # \"idem\": \"e549dee5-63ed-4e79-8a96\",\n # \"next_step\": null\n # }\n #\n subtotalObject = self.safe_value(transaction, 'subtotal', {})\n feeObject = self.safe_value(transaction, 'fee', {})\n id = self.safe_string(transaction, 'id')\n timestamp = self.parse8601(self.safe_value(transaction, 'created_at'))\n updated = self.parse8601(self.safe_value(transaction, 'updated_at'))\n type = self.safe_string(transaction, 'resource')\n amount = self.safe_number(subtotalObject, 'amount')\n currencyId = self.safe_string(subtotalObject, 'currency')\n currency = self.safe_currency_code(currencyId)\n feeCost = self.safe_number(feeObject, 'amount')\n feeCurrencyId = self.safe_string(feeObject, 'currency')\n feeCurrency = self.safe_currency_code(feeCurrencyId)\n fee = {\n 'cost': feeCost,\n 'currency': feeCurrency,\n }\n status = self.parse_transaction_status(self.safe_string(transaction, 'status'))\n if status is None:\n committed = self.safe_value(transaction, 'committed')\n status = 'ok' if committed else 'pending'\n return {\n 'info': transaction,\n 'id': id,\n 'txid': id,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'network': None,\n 'address': None,\n 'addressTo': None,\n 'addressFrom': None,\n 'tag': None,\n 'tagTo': None,\n 'tagFrom': None,\n 'type': type,\n 'amount': amount,\n 'currency': currency,\n 'status': status,\n 'updated': updated,\n 'fee': fee,\n }\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 1594, "n_words": 306, "vocab_size": 145, "complexity": 3, "nloc": 41, "token_counts": 272, "n_ast_nodes": 523, "n_identifiers": 26, "random_cut": "def parse_transaction(self, transaction, market=None):\n #\n # fiat deposit\n #\n # {\n # \"id\": \"f34c19f3-b730-5e3d-9f72\",\n # \"status\": \"completed\",\n # \"payment_method\": {\n # \"id\": \"a022b31d-f9c7-5043-98f2\",\n # \"resource\": \"payment_method\",\n # \"resource_path\": \"/v2/payment-methods/a022b31d-f9c7-5043-98f2\"\n # },\n # \"transaction\": {\n # \"id\": \"04ed4113-3732-5b0c-af86-b1d2146977d0\",\n # \"resource\": \"transaction\",\n # \"resource_path\": \"/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/04ed4113-3732-5b0c-" }, { "id": 122719, "commit_id": "ac7740513d0b47894d9170af6aaa6b9355fb2059", "repo": "jax", "path": "jaxlib/gpu_prng.py", "file_name": "gpu_prng.py", "fun_name": "_threefry2x32_lowering", "commit_message": "Raise error for unsupported shape polymorphism for custom call and fallback lowering", "code": "def _threefry2x32_lowering(prng, platform, keys, data):\n \n assert len(keys) == 2, keys\n assert len(data) == 2, data\n assert (ir.RankedTensorType(keys[0].type).element_type ==\n ir.IntegerType.get_unsigned(32)), keys[0].type\n typ = keys[0].type\n dims = ir.RankedTensorType(typ).shape\n if any(d < 0 for d in dims):\n raise NotImplementedError(\"Shape polymorphism for custom call is not implemented (threefry); b/261671778\")\n\n for x in itertools.chain(keys, data):\n assert x.type == typ, (x.type, typ)\n ndims = len(dims)\n\n opaque = prng.threefry2x32_descriptor(_prod(dims))\n layout = tuple(range(ndims - 1, -1, -1))\n return custom_call(\n f\"{platform}_threefry2x32\",\n [typ, typ],\n [keys[0], keys[1], data[0], data[1]],\n backend_config=opaque,\n operand_layouts=[layout] * 4,\n result_layouts=[layout] * 2)\n\n\ncuda_threefry2x32 = partial(_threefry2x32_lowering, _cuda_prng, \"cu\")\nrocm_threefry2x32 = partial(_threefry2x32_lowering, _hip_prng, \"hip\")\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 150, "n_words": 95, "vocab_size": 75, "complexity": 4, "nloc": 21, "token_counts": 211, "n_ast_nodes": 345, "n_identifiers": 37, "random_cut": "def _threefry2x32_lowering(prng, platform, keys, data):\n \n assert len(keys) == 2, keys\n assert len(data) == 2, data\n assert (ir.RankedTensorType(keys[0].type).element_type ==\n ir.IntegerType.get_unsigned(32)), keys[0].type\n typ = keys[0].type\n dims = ir.RankedTensorType(typ).shape\n if any(d < 0 for d in dims):\n raise NotImplementedError(\"Shape polymorphism for custom call is not implemented (threefry); b/261671778\")\n\n for x in itertools.chain(keys, data):\n assert x.type == typ, (x.type, typ)\n ndims = len(dims)\n\n opaque = prng.threefry2x32_descriptor(_prod(dims))\n layout = tuple(range(ndims - 1, -1, -1))\n return custom_call(\n f\"{platform}_threefry2x32\",\n [typ, typ],\n [keys[0], keys[1], data[0], data[1]],\n backend_config=opaque,\n op" }, { "id": 66275, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/report/vehicle_expenses/vehicle_expenses.py", "file_name": "vehicle_expenses.py", "fun_name": "get_period_dates", "commit_message": "style: format code with black", "code": "def get_period_dates(filters):\n\tif filters.filter_based_on == \"Fiscal Year\" and filters.fiscal_year:\n\t\tfy = frappe.db.get_value(\n\t\t\t\"Fiscal Year\", filters.fiscal_year, [\"year_start_date\", \"year_end_date\"], as_dict=True\n\t\t)\n\t\treturn fy.year_start_date, fy.year_end_date\n\telse:\n\t\treturn filters.from_date, filters.to_date\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 18, "n_words": 26, "vocab_size": 24, "complexity": 3, "nloc": 8, "token_counts": 58, "n_ast_nodes": 95, "n_identifiers": 13, "random_cut": "def get_period_dates(filters):\n\tif filters.filter_based_on == \"Fiscal Year\" and filters.fiscal_year:\n\t\tfy = frappe.db.get_value(\n\t\t" }, { "id": 60723, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/collector.py", "file_name": "collector.py", "fun_name": "_ensure_html_header", "commit_message": "upd; format", "code": "def _ensure_html_header(response):\n # type: (Response) -> None\n \n content_type = response.headers.get(\"Content-Type\", \"\")\n if not content_type.lower().startswith(\"text/html\"):\n raise _NotHTML(content_type, response.request.method)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 36, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 4, "token_counts": 42, "n_ast_nodes": 76, "n_identifiers": 10, "random_cut": "def _ensure_html_header(response):\n # type: (Response) -> None\n \n content_type = response.headers.get(\"Content-Type\", \"\")\n if not content_type.lower().startswith(\"text/html\"):\n raise _NotHTML(content_type, response.request.method)\n\n" }, { "id": 198470, "commit_id": "9d58006fc0a23afcba38f641c9472917c436428a", "repo": "sympy", "path": "sympy/core/expr.py", "file_name": "expr.py", "fun_name": "_parse_order", "commit_message": "Code cleanup", "code": "def _parse_order(cls, order):\n \n from sympy.polys.orderings import monomial_key\n\n startswith = getattr(order, \"startswith\", None)\n if startswith is None:\n reverse = False\n else:\n reverse = startswith('rev-')\n if reverse:\n order = order[4:]\n\n monom_key = monomial_key(order)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 121, "n_words": 31, "vocab_size": 24, "complexity": 3, "nloc": 13, "token_counts": 66, "n_ast_nodes": 97, "n_identifiers": 11, "random_cut": "def _parse_order(cls, order):\n \n from sympy.polys.orderings import monomial_key\n\n startswith = getattr(order, \"startswith\", None)\n if startswith is None:\n reverse = False\n else:\n reverse = startswith('rev-')\n if reverse:\n order = order[4:]\n\n monom_key = mono" }, { "id": 81573, "commit_id": "46be2d9e5b4423f316d6fae4a080d36716622c15", "repo": "awx", "path": "awx/main/tasks/jobs.py", "file_name": "jobs.py", "fun_name": "sync_and_copy", "commit_message": "Replace git shallow clone with shutil.copytree\n\nIntroduce build_project_dir method\n the base method will create an empty project dir for workdir\n\nShare code between job and inventory tasks with new mixin\n combine rest of pre_run_hook logic\n structure to hold lock for entire sync process\n\nforce sync to run for inventory updates due to UI issues\n\nRemove reference to removed scm_last_revision field", "code": "def sync_and_copy(self, project, private_data_dir, scm_branch=None):\n self.acquire_lock(project, self.instance.id)\n\n try:\n original_branch = None\n project_path = project.get_project_path(check_if_exists=False)\n if project.scm_type == 'git' and (scm_branch and scm_branch != project.scm_branch):\n if os.path.exists(project_path):\n git_repo = git.Repo(project_path)\n if git_repo.head.is_detached:\n original_branch = git_repo.head.commit\n else:\n original_branch = git_repo.active_branch\n\n return self.sync_and_copy_without_lock(project, private_data_dir, scm_branch=scm_branch)\n finally:\n # We have made the copy so we can set the tree back to its normal state\n if original_branch:\n # for git project syncs, non-default branches can be problems\n # restore to branch the repo was on before this run\n try:\n original_branch.checkout()\n except Exception:\n # this could have failed due to dirty tree, but difficult to predict all cases\n logger.exception(f'Failed to restore project repo to prior state after {self.instance.id}')\n\n self.release_lock(project)\n\n\n@task(queue=get_local_queuename)", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "@task(queue=get_local_queuename)", "n_ast_errors": 1, "ast_levels": 19, "n_whitespaces": 445, "n_words": 114, "vocab_size": 85, "complexity": 9, "nloc": 20, "token_counts": 137, "n_ast_nodes": 245, "n_identifiers": 32, "random_cut": "def sync_and_copy(self, project, private_data_dir, scm_branch=None):\n self.acquire_lock(project, self.instance.id)\n\n try:\n original_branch = None\n project_path = project.get_project_path(check_if_exists=False)\n if project.scm_type == 'git' and (scm_branch and scm_branch != project.scm_branch):\n if os.path.exists(project_path):\n git_repo = git.Repo(project_path)\n if git_repo.head.is_detached:\n original_branch = git_repo.head.commit\n else:\n original_branch = git_repo.active_branch\n\n return self.sync_and_copy_without_lock(project, private_data_dir, scm_branch=scm_branch)\n finally:\n # We have made the copy so we can set the tree back to its normal state\n if original_branch:\n # for git project syncs, non-default branches can be problems\n # restore to branch the repo w" }, { "id": 188799, "commit_id": "9a95d8b0c26bdaea17ea9264ab45e8a81b6422f0", "repo": "calibre", "path": "src/calibre/gui2/preferences/create_custom_column.py", "file_name": "create_custom_column.py", "fun_name": "current_columns", "commit_message": "More CreateNewCustomColumn stuff.\n- Improved documentation\n- Check column headings for duplicates\n- Method to return the current column headings as a dict\n- Improved exception handling", "code": "def current_columns(self):\n \n # deepcopy to prevent users from changing it. The new MappingProxyType\n # isn't enough because only the top-level dict is immutable, not the\n # items in the dict.\n return copy.deepcopy(self.custcols)\n\n", "url": "https://github.com/kovidgoyal/calibre.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 67, "n_words": 32, "vocab_size": 28, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 30, "n_identifiers": 5, "random_cut": "def current_columns(self):\n \n # deepcopy to prevent users from changing it. The new MappingProxyType\n # isn't enough because only the top" }, { "id": 55461, "commit_id": "5c08c3ed69793298f86f1484f149951ee2a0847f", "repo": "prefect", "path": "tests/orion/models/test_orm.py", "file_name": "test_orm.py", "fun_name": "many_task_run_states", "commit_message": "Fix some timestamp sensitive tests on windows", "code": "async def many_task_run_states(flow_run, session, db):\n \n\n # clear all other task runs\n await session.execute(sa.delete(db.TaskRun))\n await session.execute(sa.delete(db.TaskRunState))\n\n for i in range(5):\n task_run = await models.task_runs.create_task_run(\n session=session,\n task_run=schemas.actions.TaskRunCreate(\n flow_run_id=flow_run.id,\n task_key=\"test-task\",\n dynamic_key=str(i),\n ),\n )\n\n states = [\n db.TaskRunState(\n task_run_id=task_run.id,\n **schemas.states.State(\n type={\n 0: schemas.states.StateType.PENDING,\n 1: schemas.states.StateType.RUNNING,\n 2: schemas.states.StateType.COMPLETED,\n }[i],\n timestamp=pendulum.now(\"UTC\").add(minutes=i),\n ).dict(),\n )\n for i in range(3)\n ]\n\n task_run.set_state(states[-1])\n\n session.add_all(states)\n\n await session.commit()\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 398, "n_words": 56, "vocab_size": 48, "complexity": 3, "nloc": 29, "token_counts": 198, "n_ast_nodes": 309, "n_identifiers": 40, "random_cut": "async def many_task_run_states(flow_run, session, db):\n \n\n # clear all other task runs\n await session.execute(sa.delete(db.TaskRun))\n await session.execute(sa.delete(db.TaskRunState))\n\n for i in range(5):\n task_run = await models.task_runs.create_task_run(\n session=session,\n task_run=schemas.actions.TaskRunCreate(\n flow_run_id=flow_run.id,\n task_key=\"test-task\",\n dynamic_key=str(i),\n ),\n )\n\n states = [\n db.TaskRunState(\n task_run_id=task_run.id,\n **schemas.states.State(\n type={\n 0: schemas.states.StateType.PENDING,\n 1: schemas.states.StateType.RUNNING,\n " }, { "id": 296538, "commit_id": "23446fa1c0a8579ae314151651b6973af600df09", "repo": "core", "path": "homeassistant/components/filesize/sensor.py", "file_name": "sensor.py", "fun_name": "_async_update_data", "commit_message": "Fix file size last_updated (#70114)\n\nCo-authored-by: J. Nick Koston ", "code": "async def _async_update_data(self) -> dict[str, float | int | datetime]:\n \n try:\n statinfo = os.stat(self._path)\n except OSError as error:\n raise UpdateFailed(f\"Can not retrieve file statistics {error}\") from error\n\n size = statinfo.st_size\n last_updated = datetime.utcfromtimestamp(statinfo.st_mtime).replace(\n tzinfo=dt_util.UTC\n )\n\n _LOGGER.debug(\"size %s, last updated %s\", size, last_updated)\n data: dict[str, int | float | datetime] = {\n \"file\": round(size / 1e6, 2),\n \"bytes\": size,\n \"last_updated\": last_updated,\n }\n\n return data\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 199, "n_words": 63, "vocab_size": 53, "complexity": 2, "nloc": 17, "token_counts": 112, "n_ast_nodes": 184, "n_identifiers": 27, "random_cut": "async def _async_update_data(self) -> dict[str, float | int | datetime]:\n \n try:\n statinfo = os.stat(self._path)\n except OSError as error:\n raise UpdateFailed(f\"Can not retrieve file statistics {error}\") from error\n\n size = statinfo.st_size\n last_updated = datetime.utcfromtimestamp(statinfo.st_mtime).replace(\n tzinfo=dt_util.UTC\n )\n\n _LOGGER.debug(\"size %s, last updated %s\", size, last_updated)\n data: dict[str, int | float | datetime] = {\n \"file\": round(size / " }, { "id": 307749, "commit_id": "b29605060a74c441550708ccf4ace4b697f66ae6", "repo": "core", "path": "homeassistant/components/hdmi_cec/media_player.py", "file_name": "media_player.py", "fun_name": "update", "commit_message": "Enforce MediaPlayerState in hdmi_cec media player (#78522)", "code": "def update(self) -> None:\n \n device = self._device\n if device.power_status in [POWER_OFF, 3]:\n self._attr_state = MediaPlayerState.OFF\n elif not self.support_pause:\n if device.power_status in [POWER_ON, 4]:\n self._attr_state = MediaPlayerState.ON\n elif device.status == STATUS_PLAY:\n self._attr_state = MediaPlayerState.PLAYING\n elif device.status == STATUS_STOP:\n self._attr_state = MediaPlayerState.IDLE\n elif device.status == STATUS_STILL:\n self._attr_state = MediaPlayerState.PAUSED\n else:\n _LOGGER.warning(\"Unknown state: %s\", device.status)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 189, "n_words": 52, "vocab_size": 33, "complexity": 7, "nloc": 16, "token_counts": 109, "n_ast_nodes": 175, "n_identifiers": 21, "random_cut": "def update(self) -> None:\n \n device = self._device\n if device.power_status in [POWER_OFF, 3]:\n self._attr_state = MediaPlayerState.OFF\n elif not self.support_pause:\n if device.power_status in [POWER_ON, 4]:\n self._attr_state = MediaPlayerState." }, { "id": 94571, "commit_id": "b25bf3d4efa751232673b1e9d2a07ee439994348", "repo": "sentry", "path": "tests/sentry/utils/suspect_resolutions/test_commit_correlation.py", "file_name": "test_commit_correlation.py", "fun_name": "test_get_files_changed_no_shared_files", "commit_message": "ref(suspect-resolutions): refactor code around (#37775)\n\n* refactor code\r\n\r\n* fix metric correlation test", "code": "def test_get_files_changed_no_shared_files(self):\n (project, issue, release, repo) = self.setup()\n Activity.objects.create(\n project=project, group=issue, type=ActivityType.SET_RESOLVED_IN_COMMIT.value\n )\n release2 = self.create_release()\n issue2 = self.create_group()\n commit2 = Commit.objects.create(\n organization_id=project.organization_id, repository_id=repo.id, key=\"2\"\n )\n ReleaseCommit.objects.create(\n organization_id=project.organization_id, release=release2, commit=commit2, order=1\n )\n CommitFileChange.objects.create(\n organization_id=project.organization_id, commit=commit2, filename=\".gitignore\"\n )\n GroupRelease.objects.create(\n project_id=project.id, group_id=issue2.id, release_id=release2.id\n )\n\n res1 = get_files_changed_in_releases(issue.id, project.id)\n res2 = get_files_changed_in_releases(issue2.id, project.id)\n\n assert res1.files_changed == {\".random\", \".random2\"}\n assert res2.files_changed == {\".gitignore\"}\n assert res1.release_ids == [release.id]\n assert res2.release_ids == [release2.id]\n assert not is_issue_commit_correlated(issue.id, issue2.id, project.id).is_correlated\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 266, "n_words": 72, "vocab_size": 52, "complexity": 1, "nloc": 26, "token_counts": 228, "n_ast_nodes": 347, "n_identifiers": 41, "random_cut": "def test_get_files_changed_no_shared_files(self):\n (project, issue, release, repo) = self.setup()\n Activity.objects.create(\n project=project, group=issue, type=ActivityType.SET_RESOLVED_IN_COMMIT.value\n )\n release2 = self.create_release()\n issue2 = self.create_group()\n commit2 = Commit.objects.create(\n organization_id=project.organization_id, repository_id=repo.id, key=\"2\"\n )\n ReleaseCommit.objects.create(\n organization_id=project.organization_id, release=release2, commit=commit2, order=1\n )\n CommitFileChange.objects.create(\n organization_id=project.organization_id, commit=commit2, filename=\".gitignore\"\n )\n GroupRelease.objects.create(\n project_id=project.id, group_id=issue2.id, release_id=release2.id\n )\n\n res1 = get_files_changed_in_releases(issue.id, project.id)\n res2 = get_files_changed_in_releases(issue2.id, project.id)\n\n assert res1.files_changed == {\".random\", \".random2\"}\n assert res2.files_changed == {\".gitignore\"}\n assert res1.release_ids == [release.id]\n assert res2.release_ids == [release2.i" }, { "id": 107077, "commit_id": "7749b7b153219738dcf30f0acbad310a2550aa19", "repo": "matplotlib", "path": "lib/mpl_toolkits/tests/test_axisartist_axislines.py", "file_name": "test_axisartist_axislines.py", "fun_name": "test_ParasiteAxesAuxTrans", "commit_message": "Expire axes_grid1/axisartist deprecations.", "code": "def test_ParasiteAxesAuxTrans():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n data = np.ones((6, 6))\n data[2, 2] = 2\n data[0, :] = 0\n data[-2, :] = 0\n data[:, 0] = 0\n data[:, -2] = 0\n x = np.arange(6)\n y = np.arange(6)\n xx, yy = np.meshgrid(x, y)\n\n funcnames = ['pcolor', 'pcolormesh', 'contourf']\n\n fig = plt.figure()\n for i, name in enumerate(funcnames):\n\n ax1 = SubplotHost(fig, 1, 3, i+1)\n fig.add_subplot(ax1)\n\n ax2 = ParasiteAxes(ax1, IdentityTransform())\n ax1.parasites.append(ax2)\n if name.startswith('pcolor'):\n getattr(ax2, name)(xx, yy, data[:-1, :-1])\n else:\n getattr(ax2, name)(xx, yy, data)\n ax1.set_xlim((0, 5))\n ax1.set_ylim((0, 5))\n\n ax2.contour(xx, yy, data, colors='k')\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 217, "n_words": 95, "vocab_size": 70, "complexity": 3, "nloc": 25, "token_counts": 237, "n_ast_nodes": 371, "n_identifiers": 32, "random_cut": "def test_ParasiteAxesAuxTrans():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n data = np.ones((6, 6))\n data[2, 2] = 2\n data[0, :] = 0\n data[-2, :] = 0\n data[:, 0] = 0\n data[:, -2] = 0\n x = np.arange(6)\n y = np.arange(6)\n xx, yy = np.meshgrid(x" }, { "id": 321278, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/mainwindow/tabwidget.py", "file_name": "tabwidget.py", "fun_name": "subElementRect", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def subElementRect(self, sr, opt, widget=None):\n \n if sr == QStyle.SubElement.SE_TabBarTabText:\n layouts = self._tab_layout(opt)\n if layouts is None:\n log.misc.warning(\"Could not get layouts for tab!\")\n return QRect()\n return layouts.text\n elif sr in [QStyle.SubElement.SE_TabWidgetTabBar,\n QStyle.SubElement.SE_TabBarScrollLeftButton]:\n # Handling SE_TabBarScrollLeftButton so the left scroll button is\n # aligned properly. Otherwise, empty space will be shown after the\n # last tab even though the button width is set to 0\n #\n # Need to use super() because we also use super() to render\n # element in drawControl(); otherwise, we may get bit by\n # style differences...\n return super().subElementRect(sr, opt, widget)\n else:\n return self._style.subElementRect(sr, opt, widget)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 307, "n_words": 98, "vocab_size": 70, "complexity": 4, "nloc": 12, "token_counts": 97, "n_ast_nodes": 158, "n_identifiers": 19, "random_cut": "def subElementRect(self, sr, opt, widget=None):\n \n if sr == QStyle.SubElement.SE_TabBarTabText:\n layouts = self._tab_layout(opt)\n if layouts is None:\n log.misc.warning(\"Could not get layouts for tab!\")\n return QR" }, { "id": 110506, "commit_id": "ffcc8d314c8a47772ba541027f138ee18155d7e6", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_axes.py", "file_name": "test_axes.py", "fun_name": "test_cla_clears_chlidren_axes_and_fig", "commit_message": "MNT: when clearing an Axes via clear/cla fully detach children\n\nReset the Axes and Figure of the children to None to help break cycles.\n\nCloses #6982", "code": "def test_cla_clears_chlidren_axes_and_fig():\n fig, ax = plt.subplots()\n lines = ax.plot([], [], [], [])\n img = ax.imshow([[1]])\n for art in lines + [img]:\n assert art.axes is ax\n assert art.figure is fig\n ax.clear()\n for art in lines + [img]:\n assert art.axes is None\n assert art.figure is None\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 89, "n_words": 44, "vocab_size": 24, "complexity": 3, "nloc": 11, "token_counts": 90, "n_ast_nodes": 140, "n_identifiers": 13, "random_cut": "def test_cla_clears_chlidren_axes_and_fig():\n fig, ax = plt.subplots()" }, { "id": 196747, "commit_id": "2110dbe01539e03ef8634deac4c40f895da38daa", "repo": "sympy", "path": "sympy/crypto/crypto.py", "file_name": "crypto.py", "fun_name": "rsa_public_key", "commit_message": "Fix a misspelling", "code": "def rsa_public_key(*args, **kwargs):\n r\n return _rsa_key(*args, public=True, private=False, **kwargs)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 14, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 170, "token_counts": 28, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def rsa_public_key(*args, **kwargs):\n r\n return _rsa_key(*args, public=True, private=False, **kwargs)\n\n" }, { "id": 318171, "commit_id": "90458ee200d6d9e6fe7458fec3021d904e365c13", "repo": "core", "path": "homeassistant/components/zerproc/light.py", "file_name": "light.py", "fun_name": "async_update", "commit_message": "Use attributes in zerproc light (#75951)", "code": "async def async_update(self) -> None:\n \n try:\n if not self.available:\n await self._light.connect()\n state = await self._light.get_state()\n except pyzerproc.ZerprocException:\n if self.available:\n _LOGGER.warning(\"Unable to connect to %s\", self._light.address)\n self._attr_available = False\n return\n if not self.available:\n _LOGGER.info(\"Reconnected to %s\", self._light.address)\n self._attr_available = True\n self._attr_is_on = state.is_on\n hsv = color_util.color_RGB_to_hsv(*state.color)\n self._attr_hs_color = hsv[:2]\n self._attr_brightness = int(round((hsv[2] / 100) * 255))\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 218, "n_words": 55, "vocab_size": 38, "complexity": 5, "nloc": 18, "token_counts": 132, "n_ast_nodes": 220, "n_identifiers": 24, "random_cut": "async def async_update(self) -> None:\n \n try:\n if not self.available:\n await self._light.connect()\n state = await self._light.get_state()\n except pyzerproc.Zerproc" }, { "id": 162364, "commit_id": "e0ddbd02bd1c365b95bb88eaa6e4e0238faf35eb", "repo": "yt-dlp", "path": "yt_dlp/extractor/awaan.py", "file_name": "awaan.py", "fun_name": "_parse_video_data", "commit_message": "[cleanup] Use format_field where applicable", "code": "def _parse_video_data(self, video_data, video_id, is_live):\n title = video_data.get('title_en') or video_data['title_ar']\n img = video_data.get('img')\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': video_data.get('description_en') or video_data.get('description_ar'),\n 'thumbnail': format_field(img, template='http://admin.mangomolo.com/analytics/%s'),\n 'duration': int_or_none(video_data.get('duration')),\n 'timestamp': parse_iso8601(video_data.get('create_time'), ' '),\n 'is_live': is_live,\n 'uploader_id': video_data.get('user_id'),\n }\n\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 152, "n_words": 37, "vocab_size": 34, "complexity": 3, "nloc": 13, "token_counts": 109, "n_ast_nodes": 194, "n_identifiers": 12, "random_cut": "def _parse_video_data(self, video_data, video_id, is_live):\n title = video_data.get('title_en') or video_data['title_ar']\n img = video_data.get('img')\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': video_data.get('description_en') or video_data.get('description_ar'),\n 'thumbnail': format_field(img, template='http://admin.mangomolo.com/analytics/%s'),\n 'duration': int_or_none(video_data.get('duration')),\n 'timestamp': parse_iso8601(video_data.get('create_time'), ' '),\n 'is_liv" }, { "id": 43511, "commit_id": "d7bd72f494e7debec11672eeddf2e6ba5ef75fac", "repo": "airflow", "path": "dev/breeze/src/airflow_breeze/utils/selective_checks.py", "file_name": "selective_checks.py", "fun_name": "upgrade_to_newer_dependencies", "commit_message": "Convert selective checks to Breeze Python (#24610)\n\nInstead of bash-based, complex logic script to perform PR selective\r\nchecks we now integrated the whole logic into Breeze Python code.\r\n\r\nIt is now much simplified, when it comes to algorithm. We've\r\nimplemented simple rule-based decision tree. The rules describing\r\nthe decision tree are now are now much easier\r\nto reason about and they correspond one-to-one with the rules\r\nthat are implemented in the code in rather straightforward way.\r\n\r\nThe code is much simpler and diagnostics of the selective checks\r\nhas also been vastly improved:\r\n\r\n* The rule engine displays status of applying each rule and\r\n explains (with yellow warning message what decision was made\r\n and why. Informative messages are printed showing the resulting\r\n output\r\n\r\n* List of files impacting the decision are also displayed\r\n\r\n* The names of \"ci file group\" and \"test type\" were aligned\r\n\r\n* Unit tests covering wide range of cases are added. Each test\r\n describes what is the case they demonstrate\r\n\r\n* `breeze selective-checks` command that is used in CI can also\r\n be used locally by just providing commit-ish reference of the\r\n commit to check. This way you can very easily debug problems and\r\n fix them\r\n\r\nFixes: #19971", "code": "def upgrade_to_newer_dependencies(self) -> bool:\n return len(\n self._matching_files(FileGroupForCi.SETUP_FILES, CI_FILE_GROUP_MATCHES)\n ) > 0 or self._github_event in [GithubEvents.PUSH, GithubEvents.SCHEDULE]\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 40, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 4, "token_counts": 37, "n_ast_nodes": 56, "n_identifiers": 12, "random_cut": "def upgrade_to_newer_dependencies(self) -> bool:\n return len(\n self._matching_files(FileGroupForCi.SETUP_FILES, CI_FILE_GROUP_MATCHES)\n ) > 0 or self._github_event in [GithubEvents.PUSH, GithubEvents.SCHEDULE]\n" }, { "id": 125552, "commit_id": "d79431e32cffbf3f86da5f7417697dc8edd1da3f", "repo": "ray", "path": "python/ray/train/gbdt_trainer.py", "file_name": "gbdt_trainer.py", "fun_name": "preprocess_datasets", "commit_message": "[air] remove unnecessary logs + improve repr for result (#26906)", "code": "def preprocess_datasets(self) -> None:\n super().preprocess_datasets()\n\n # XGBoost/LightGBM-Ray requires each dataset to have at least as many\n # blocks as there are workers.\n # TODO: Move this logic to the respective libraries\n for dataset_key, dataset in self.datasets.items():\n if dataset.num_blocks() < self._ray_params.num_actors:\n if dataset.size_bytes() > _WARN_REPARTITION_THRESHOLD:\n warnings.warn(\n f\"Dataset '{dataset_key}' has {dataset.num_blocks()} blocks, \"\n f\"which is less than the `num_workers` \"\n f\"{self._ray_params.num_actors}. \"\n f\"This dataset will be automatically repartitioned to \"\n f\"{self._ray_params.num_actors} blocks. You can disable \"\n \"this error message by partitioning the dataset \"\n \"to have blocks >= number of workers via \"\n \"`dataset.repartition(num_workers)`.\"\n )\n self.datasets[dataset_key] = dataset.repartition(\n self._ray_params.num_actors\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 429, "n_words": 98, "vocab_size": 78, "complexity": 4, "nloc": 18, "token_counts": 84, "n_ast_nodes": 179, "n_identifiers": 15, "random_cut": "def preprocess_datasets(self) -> None:\n super().preprocess_datasets()\n\n # XGBoost/LightGBM-Ray requires each dataset to have at least as many\n # blocks as there are workers.\n # TODO: Move this logic to the respective libraries\n for dataset_key, dataset in self.datasets.items():\n if dataset.num_blocks() < self._ray_params.num_actors:\n if dataset.size_bytes() > _WARN_REPARTITION_THRESHOLD:\n warnings.warn(\n f\"Dataset '{dataset_key}' has {dataset.num_blocks()} blocks, \"\n f\"which is less than the `num_workers` \"\n f\"{self._ray_para" }, { "id": 162993, "commit_id": "3dfed3fcd552dcbf4daf7f78c82a87638f896512", "repo": "pandas", "path": "pandas/tests/io/test_sql.py", "file_name": "test_sql.py", "fun_name": "test_datetime_NaT", "commit_message": "ENH: to_sql returns rowcount (#45137)", "code": "def test_datetime_NaT(self):\n df = DataFrame(\n {\"A\": date_range(\"2013-01-01 09:00:00\", periods=3), \"B\": np.arange(3.0)}\n )\n df.loc[1, \"A\"] = np.nan\n assert df.to_sql(\"test_datetime\", self.conn, index=False) == 3\n\n # with read_table -> type information from schema used\n result = sql.read_sql_table(\"test_datetime\", self.conn)\n tm.assert_frame_equal(result, df)\n\n # with read_sql -> no type information -> sqlite has no native\n result = sql.read_sql_query(\"SELECT * FROM test_datetime\", self.conn)\n if self.flavor == \"sqlite\":\n assert isinstance(result.loc[0, \"A\"], str)\n result[\"A\"] = to_datetime(result[\"A\"], errors=\"coerce\")\n tm.assert_frame_equal(result, df)\n else:\n tm.assert_frame_equal(result, df)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 204, "n_words": 73, "vocab_size": 54, "complexity": 2, "nloc": 15, "token_counts": 149, "n_ast_nodes": 244, "n_identifiers": 24, "random_cut": "def test_datetime_NaT(self):\n df = DataFrame(\n {\"A\": date_range(\"2013-01-01 09:00:00\", periods=3), " }, { "id": 149926, "commit_id": "5bf021be2e8f1479753e66573575fa7cde00a2b6", "repo": "freqtrade", "path": "tests/strategy/test_interface.py", "file_name": "test_interface.py", "fun_name": "test_auto_hyperopt_interface", "commit_message": "Enhance hyperoptable strategy to test instance parameters", "code": "def test_auto_hyperopt_interface(default_conf):\n default_conf.update({'strategy': 'HyperoptableStrategy'})\n PairLocks.timeframe = default_conf['timeframe']\n strategy = StrategyResolver.load_strategy(default_conf)\n strategy.ft_bot_start()\n with pytest.raises(OperationalException):\n next(strategy.enumerate_parameters('deadBeef'))\n\n assert strategy.buy_rsi.value == strategy.buy_params['buy_rsi']\n # PlusDI is NOT in the buy-params, so default should be used\n assert strategy.buy_plusdi.value == 0.5\n assert strategy.sell_rsi.value == strategy.sell_params['sell_rsi']\n\n assert repr(strategy.sell_rsi) == 'IntParameter(74)'\n\n # Parameter is disabled - so value from sell_param dict will NOT be used.\n assert strategy.sell_minusdi.value == 0.5\n all_params = strategy.detect_all_parameters()\n assert isinstance(all_params, dict)\n assert len(all_params['buy']) == 2\n assert len(all_params['sell']) == 2\n # Number of Hyperoptable parameters\n assert all_params['count'] == 6\n\n strategy.__class__.sell_rsi = IntParameter([0, 10], default=5, space='buy')\n\n with pytest.raises(OperationalException, match=r\"Inconclusive parameter.*\"):\n [x for x in strategy.detect_parameters('sell')]\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 171, "n_words": 98, "vocab_size": 70, "complexity": 2, "nloc": 20, "token_counts": 196, "n_ast_nodes": 325, "n_identifiers": 34, "random_cut": "def test_auto_hyperopt_interface(default_conf):\n default_conf.update({'strategy': 'HyperoptableStrategy'})\n PairLocks.timeframe = default_conf['timeframe']\n strategy = StrategyResolver.load_strategy(default_conf)\n strategy.ft_bot_start()\n with pytest.raises(OperationalException):\n next(strategy.enumerate_parameters('deadBeef'))\n\n assert strategy.buy_rsi.value == strategy.buy_params['buy_rsi']\n # PlusDI is NOT in the buy-params, so default should be use" }, { "id": 190366, "commit_id": "4fc3616712edb19179b17dd270ad6cf63abf99c2", "repo": "DeOldify", "path": "fastai/general_optimizer.py", "file_name": "general_optimizer.py", "fun_name": "_get_val3", "commit_message": "Upgrading to support latest Pytorch version", "code": "def _get_val3(self, state, val, param): \n v = val.view(val.size(0), -1).mean(1)\n return state.add_(1-param, v) if self.decay else state.add_(v)\n", "url": "https://github.com/jantic/DeOldify.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 30, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 3, "token_counts": 54, "n_ast_nodes": 80, "n_identifiers": 11, "random_cut": "def _get_val3(self, state, val, param): \n v = val.view(val.size(0), -1)." }, { "id": 207691, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_pk_hidden_fields", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_pk_hidden_fields(self):\n \n story1 = Story.objects.create(\n title=\"The adventures of Guido\", content=\"Once upon a time in Djangoland...\"\n )\n story2 = Story.objects.create(\n title=\"Crouching Tiger, Hidden Python\",\n content=\"The Python was sneaking into...\",\n )\n response = self.client.get(reverse(\"admin:admin_views_story_changelist\"))\n # Only one hidden field, in a separate place than the table.\n self.assertContains(response, 'id=\"id_form-0-id\"', 1)\n self.assertContains(response, 'id=\"id_form-1-id\"', 1)\n self.assertContains(\n response,\n '
    \\n'\n ''\n '\\n
    '\n % (story2.id, story1.id),\n html=True,\n )\n self.assertContains(response, '%d' % story1.id, 1)\n self.assertContains(response, '%d' % story2.id, 1)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 271, "n_words": 81, "vocab_size": 61, "complexity": 1, "nloc": 21, "token_counts": 125, "n_ast_nodes": 213, "n_identifiers": 16, "random_cut": "def test_pk_hidden_fields(self):\n \n story1 = Story.objects.create(\n title=\"The adventures of Guido\", content=\"Once upon a time in Djangoland...\"\n )\n story2 = Story.object" }, { "id": 106841, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/__init__.py", "file_name": "__init__.py", "fun_name": "get_window_data", "commit_message": "apply black py to all python files", "code": "def get_window_data(self, win=None, env=None):\n \n\n return self._send(\n msg={\"win\": win, \"eid\": env},\n endpoint=\"win_data\",\n create=False,\n )\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 67, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 8, "random_cut": "def get_window_data(self, win=None, env=None):\n " }, { "id": 178646, "commit_id": "840959fbec6d897aa7e51f63e1c34e46402ced8b", "repo": "Nuitka", "path": "nuitka/Caching.py", "file_name": "Caching.py", "fun_name": "getModuleImportableFilesHash", "commit_message": "Optimization: Make experimental caching bytecode demotion work.", "code": "def getModuleImportableFilesHash(full_name):\n \n package_name = full_name.getPackageName()\n\n paths = getPackageSearchPath(None)\n\n if package_name is not None:\n paths += getPackageSearchPath(package_name)\n\n all_suffixes = getAllModuleSuffixes()\n\n result_hash = Hash()\n\n for path in paths:\n if not os.path.isdir(path):\n continue\n\n for fullname, filename in listDir(path):\n if isPackageDir(fullname) or filename.endswith(all_suffixes):\n result_hash.updateFromValues(filename, b\"\\0\")\n\n return result_hash.asHexDigest()\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 125, "n_words": 43, "vocab_size": 33, "complexity": 7, "nloc": 14, "token_counts": 96, "n_ast_nodes": 159, "n_identifiers": 20, "random_cut": "def getModuleImportableFilesHash(full_name):\n \n package_name = full_name.getPackageName()\n\n paths = getPackageSearchPath(None)\n\n if package_name is not None:\n paths += getPackageSearchPath(package_name)\n\n all_suffixes = getAllModuleSuffixes()\n\n result_hash = Hash()\n\n for path in paths:\n if not os.path.isdir(path):\n continue\n\n for fullname, filename in listDir(path):\n if isPackageDir(fullname) or filename.endswith(all_suffixes):\n result_hash.updateFromValues(filename, b\"\\0\")\n\n return result_hash.asHexDig" }, { "id": 305410, "commit_id": "d1ecd74a1a153b85b829acf45b5c6a5ea79df5c1", "repo": "core", "path": "homeassistant/components/lg_netcast/media_player.py", "file_name": "media_player.py", "fun_name": "media_pause", "commit_message": "Improve entity type hints [l] (#77655)", "code": "def media_pause(self) -> None:\n \n self._playing = False\n self._state = STATE_PAUSED\n self.send_command(34)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 39, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 6, "random_cut": "def media_pause(self) -> None:\n \n self._playing = False\n self._state = STATE_PAUSED\n self" }, { "id": 331731, "commit_id": "5f81d4de234f579bdc988e8346da14b37a3af160", "repo": "pytorch-image-models", "path": "timm/models/vision_transformer.py", "file_name": "vision_transformer.py", "fun_name": "_reset_representation", "commit_message": "Move DeiT to own file, vit getting crowded. Working towards fixing #1029, make pooling interface for transformers and mlp closer to convnets. Still working through some details...", "code": "def _reset_representation(self, representation_size):\n self.representation_size = representation_size\n if self.representation_size:\n self.pre_logits = nn.Sequential(OrderedDict([\n ('fc', nn.Linear(self.embed_dim, self.representation_size)),\n ('act', nn.Tanh())\n ]))\n else:\n self.pre_logits = nn.Identity()\n", "url": "https://github.com/huggingface/pytorch-image-models.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 104, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 9, "token_counts": 68, "n_ast_nodes": 109, "n_identifiers": 11, "random_cut": "def _reset_representation(self, representation_size):\n self.representation_size = representation_size\n if self.representation_size:\n self.pre_logits = nn.Sequential(OrderedDict([\n " }, { "id": 212206, "commit_id": "1b3e6acd6eebd352106cc5ecf5e12dbf90e0607c", "repo": "bokeh", "path": "tests/unit/bokeh/core/property/test_instance.py", "file_name": "test_instance.py", "fun_name": "test___repr__", "commit_message": "Add Init signatures to Bokeh models (#12035)\n\n* Add signatures to Bokeh Model initializers\r\n\r\n* use explicit type for override default\r\n\r\n* move InstanceDefault to bokeh.core.properties\r\n\r\n* enable assertions", "code": "def test___repr__(self) -> None:\n m = bcpi.InstanceDefault(_TestModel, x=10, z=[10])\n assert repr(m) == \"\"\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 51, "n_identifiers": 9, "random_cut": "def test___repr__(self) -> None:\n m =" }, { "id": 40140, "commit_id": "d2bd5aa361a1fdff7e8fa3e29568fc295c40489e", "repo": "dash", "path": "dash/testing/plugin.py", "file_name": "plugin.py", "fun_name": "pytest_addoption", "commit_message": "Revert \"Remove percynofinalize.\"\n\nThis reverts commit 8d2b1d3f5eab35f88eba46f6e96de5f484857513.", "code": "def pytest_addoption(parser):\n dash = parser.getgroup(\"Dash\", \"Dash Integration Tests\")\n\n dash.addoption(\n \"--webdriver\",\n choices=(\"Chrome\", \"Firefox\"),\n default=\"Chrome\",\n help=\"Name of the selenium driver to use\",\n )\n\n dash.addoption(\n \"--remote\", action=\"store_true\", help=\"instruct pytest to use selenium grid\"\n )\n\n dash.addoption(\n \"--remote-url\",\n action=\"store\",\n default=SELENIUM_GRID_DEFAULT,\n help=\"set a different selenium grid remote url if other than default\",\n )\n\n dash.addoption(\n \"--headless\", action=\"store_true\", help=\"set this flag to run in headless mode\"\n )\n\n dash.addoption(\n \"--percy-assets\",\n action=\"store\",\n default=\"tests/assets\",\n help=\"configure how Percy will discover your app's assets\",\n )\n\n dash.addoption(\n \"--nopercyfinalize\",\n action=\"store_false\",\n help=\"set this flag to control percy finalize at CI level\",\n )\n\n dash.addoption(\n \"--pause\",\n action=\"store_true\",\n help=\"pause using pdb after opening the test app, so you can interact with it\",\n )\n\n\n@pytest.mark.tryfirst", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "@pytest.mark.tryfirst", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 288, "n_words": 105, "vocab_size": 80, "complexity": 1, "nloc": 36, "token_counts": 134, "n_ast_nodes": 251, "n_identifiers": 13, "random_cut": "def pytest_addoption(parser):\n dash = parser.getgroup(\"Dash\", \"Dash Integration Tests\")\n\n dash.addoption(\n \"--webdriver\",\n choices=(\"Chrome\", \"Firefox\"),\n default=\"Chrome\",\n help=\"Name of the selenium driver to use\",\n )\n\n dash.addoption(\n \"--remote\", action=\"store_true\", help=\"instruct pytest to use selenium grid\"\n )\n\n dash.addoption(\n \"--remote-url\",\n action=\"store\",\n default=SELENIUM_GRID_DEFAULT,\n help=\"set a different selenium grid remote url if other than default\",\n )\n\n dash.addoption(\n \"--headless\", action=\"store_true\", help=\"set this flag to run in headless mode\"\n )\n\n dash.addoption(\n \"--percy-assets\",\n action=\"store\",\n default=\"tests/assets\",\n help=\"configure how Percy will " }, { "id": 212597, "commit_id": "91fbc852c5ed7245e661e3075310f79246aac09a", "repo": "bokeh", "path": "bokeh/colors/color.py", "file_name": "color.py", "fun_name": "to_hex", "commit_message": "Combine alpha values by multiplying together (#12283)\n\n* Combine multiple alpha in color2rgba\r\n\r\n* Visual integration tests\r\n\r\n* Update varying_alpha_palette to combine alpha\r\n\r\n* Include alpha in RGB.to_hex()\r\n\r\n* Improvements for mypy\r\n\r\n* More improvements for mypy\r\n\r\n* Remove unused combine_alpha() from bokehjs", "code": "def to_hex(self) -> str:\n \n if self.a < 1.0:\n return \"#%02X%02X%02X%02X\" % (self.r, self.g, self.b, round(self.a*255))\n else:\n return \"#%02X%02X%02X\" % (self.r, self.g, self.b)\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 65, "n_words": 22, "vocab_size": 18, "complexity": 2, "nloc": 15, "token_counts": 60, "n_ast_nodes": 92, "n_identifiers": 8, "random_cut": "def to_hex(self) -> str:\n \n if self.a < 1.0:\n return \"#%02X%02X%02X%02X\" % (self.r, self.g, self.b, round(self.a*255)" }, { "id": 200894, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/aggregation/tests.py", "file_name": "tests.py", "fun_name": "test_sum_distinct_aggregate", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_sum_distinct_aggregate(self):\n \n authors = Author.objects.filter(book__in=[self.b5, self.b6])\n self.assertEqual(authors.count(), 3)\n\n distinct_authors = authors.distinct()\n self.assertEqual(distinct_authors.count(), 2)\n\n # Selected author ages are 57 and 46\n age_sum = distinct_authors.aggregate(Sum(\"age\"))\n self.assertEqual(age_sum[\"age__sum\"], 103)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 82, "n_words": 26, "vocab_size": 24, "complexity": 1, "nloc": 7, "token_counts": 79, "n_ast_nodes": 132, "n_identifiers": 16, "random_cut": "def test_sum_distinct_aggregate(self):\n \n authors = Author.objects.filter(book__in=[self.b5, self.b6])\n self.assertEqual(authors.count(), 3)\n\n " }, { "id": 190745, "commit_id": "c6105dbd7210286dde07aa0a09381bf99840acca", "repo": "microservices-demo", "path": "src/emailservice/demo_pb2_grpc.py", "file_name": "demo_pb2_grpc.py", "fun_name": "add_CurrencyServiceServicer_to_server", "commit_message": "chore(deps): manual upgrade of dependencies (#1396)\n\n* chore(deps): manual upgrade of dependencies\r\n\r\naccumulated upgrades from renovate PRs #997, #1094, #1095, #1193, #1379, #1384, #1387, #1388, #1389,\r\n\r\n* chore(deps): fix dependencies\r\n\r\ndue to requests constraint the charset-normalizer is rolled back to\r\nlates of major version 2 (2.1.1).\r\nadd importlib-metadata dependency.\r\nupdate opentelemetry-* to the version supported by 1.15.0 of the SDK.\r\nrollback to docker 3.10 for email & recommendation cause profiler package\r\ndoes not support python 3.11\r\nregenerate email and recommendation svs protobuf\r\n\r\n* chore(deps): adding changes from #1402 and #1403", "code": "def add_CurrencyServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'GetSupportedCurrencies': grpc.unary_unary_rpc_method_handler(\n servicer.GetSupportedCurrencies,\n request_deserializer=demo__pb2.Empty.FromString,\n response_serializer=demo__pb2.GetSupportedCurrenciesResponse.SerializeToString,\n ),\n 'Convert': grpc.unary_unary_rpc_method_handler(\n servicer.Convert,\n request_deserializer=demo__pb2.CurrencyConversionRequest.FromString,\n response_serializer=demo__pb2.Money.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'hipstershop.CurrencyService', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.", "url": "https://github.com/GoogleCloudPlatform/microservices-demo.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 214, "n_words": 34, "vocab_size": 31, "complexity": 1, "nloc": 16, "token_counts": 86, "n_ast_nodes": 132, "n_identifiers": 20, "random_cut": "def add_CurrencyServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'GetSupportedCurrencies': grpc.unary_unary_rpc_method_handler(\n servicer.GetSupportedCurrencies,\n request_deserializer=demo__pb2.Empty.FromString,\n response_serializer=demo__pb2.GetSupported" }, { "id": 114048, "commit_id": "80d671fae9c05521009615a2bfff1b760a98debd", "repo": "mindsdb", "path": "mindsdb/integrations/mysql/mysql.py", "file_name": "mysql.py", "fun_name": "_to_mysql_table", "commit_message": "fix dtypes casting", "code": "def _to_mysql_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'int',\n dtype.float: 'double',\n dtype.binary: 'bool',\n dtype.date: 'Date',\n dtype.datetime: 'Datetime',\n dtype.binary: 'VARCHAR(500)',\n dtype.categorical: 'VARCHAR(500)',\n dtype.tags: 'VARCHAR(500)',\n dtype.image: 'VARCHAR(500)',\n dtype.video: 'VARCHAR(500)',\n dtype.audio: 'VARCHAR(500)',\n dtype.short_text: 'VARCHAR(500)',\n dtype.rich_text: 'VARCHAR(500)',\n dtype.quantity: 'VARCHAR(500)',\n dtype.num_array: 'VARCHAR(500)',\n dtype.cat_array: 'VARCHAR(500)',\n dtype.num_tsarray: 'VARCHAR(500)',\n dtype.cat_tsarray: 'VARCHAR(500)',\n 'default': 'VARCHAR(500)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map.get(col_subtype, subtype_map.get('default'))\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_original` {new_type} ')\n except Exception as e:\n log.error(f'Error: can not determine mysql data type for column {name}: {e}')\n\n return column_declaration\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 450, "n_words": 91, "vocab_size": 68, "complexity": 4, "nloc": 33, "token_counts": 191, "n_ast_nodes": 342, "n_identifiers": 34, "random_cut": "def _to_mysql_table(self, dtype_dict, predicted_cols, columns):\n subtype_map = {\n dtype.integer: 'int',\n dtype.float: 'double',\n dtype.binary: 'bool',\n dtype.date: 'Date',\n dtype.datetime: 'Datetime',\n dtype.binary: 'VARCHAR(500)',\n dtype.categorical: 'VARCHAR(500)',\n dtype.tags: 'VARCHAR(500)',\n dtype.image: 'VARCHAR(500)',\n dtype.video: 'VARCHAR(500)',\n dtype.audio: 'VARCHAR(500)',\n dtype.short_text: 'VARCHAR(500)',\n dtype.rich_text: 'VARCHAR(500)',\n dtype.quantity: 'VARCHAR(500)',\n dtype.num_array: 'VARCHAR(500)',\n dtype.cat_array: 'VARCHAR(500)',\n dtype.num_tsarray: 'VARCHAR(500)',\n dtype.cat_tsarray: 'VARCHAR(500)',\n 'default': 'VARCHAR(500)'\n }\n\n column_declaration = []\n for name in columns:\n try:\n col_subtype = dtype_dict[name]\n new_type = subtype_map.get(col_subtype, subtype_map.get('default'))\n column_declaration.append(f' `{name}` {new_type} ')\n if name in predicted_cols:\n column_declaration.append(f' `{name}_" }, { "id": 27579, "commit_id": "7ea7916c65357741c3911e307acb58d547a5e91a", "repo": "saleor", "path": "saleor/webhook/observability/tests/test_obfuscation.py", "file_name": "test_obfuscation.py", "fun_name": "test_anonymize_gql_operation_response_with_fragment_spread", "commit_message": "Observability reporter (#9803)\n\n* Initial commit\r\n\r\n* Add observability celery beat task\r\n\r\n* Add observability_reporter_task and observability_send_events\r\n\r\n* Convert payload to camel case\r\n\r\n* Add fakeredis to dev dependencies\r\n\r\n* Add redis buffer tests\r\n\r\n* Refactor buffer\r\n\r\n* Update\r\n\r\n* Optimize buffer\r\n\r\n* Add tests\r\n\r\n* Add types-redis to dev dependencies\r\n\r\n* Refactor\r\n\r\n* Fix after rebase\r\n\r\n* Refactor opentracing\r\n\r\n* Add opentracing to observability tasks\r\n\r\n* Add more tests\r\n\r\n* Fix buffer fixtures\r\n\r\n* Report dropped events\r\n\r\n* Fix buffer tests\r\n\r\n* Refactor get_buffer\r\n\r\n* Refactor unit tests\r\n\r\n* Set Redis connection client_name\r\n\r\n* Refactor redis tests\r\n\r\n* Fix test_get_or_create_connection_pool\r\n\r\n* Fix JsonTruncText comparison\r\n\r\n* Add more generate_event_delivery_attempt_payload tests", "code": "def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory):\n query = \n result = {\"data\": \"result\"}\n sensitive_fields = {\"Product\": {\"name\"}}\n operation_result = gql_operation_factory(query, result=result)\n\n anonymize_gql_operation_response(operation_result, sensitive_fields)\n\n assert operation_result.result[\"data\"] == MASK\n\n\n@pytest.mark.parametrize(\n \"sensitive_fields\",\n [\n {\"NonExistingType\": {}},\n {\"Product\": {\"nonExistingField\"}},\n {\"Node\": {\"id\"}},\n ],\n)", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"sensitive_fields\",\n [\n {\"NonExistingType\": {}},\n {\"Product\": {\"nonExistingField\"}},\n {\"Node\": {\"id\"}},\n ],\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 79, "n_words": 33, "vocab_size": 29, "complexity": 1, "nloc": 23, "token_counts": 49, "n_ast_nodes": 152, "n_identifiers": 11, "random_cut": "def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory):\n query = \n result = {\"data\": \"result\"}\n sensitive_fields = {\"Product\": {\"name\"}}\n operation_result = gql_operation_factory(query, result=result)\n\n anonymize_gql_operation_response(operation_result, sensitive_fields)\n\n assert operation_result.result[\"data\"] == MASK\n\n\n@pytest.mark.parametrize(\n \"sensitive_fields\",\n [\n {\"NonExistingType\": {}},\n {\"Product\": {\"nonExistingField\"}},\n {\"Node\": {\"id\"}},\n ],\n)" }, { "id": 81254, "commit_id": "2f82b757483cf67829a8c0ed843b51d126ec658e", "repo": "awx", "path": "awx/main/analytics/subsystem_metrics.py", "file_name": "subsystem_metrics.py", "fun_name": "observe", "commit_message": "Add subsystem metrics for task manager", "code": "def observe(self, field, value):\n self.METRICS[field].observe(value)\n self.metrics_have_changed = True\n if self.auto_pipe_execute is True:\n self.pipe_execute()\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 44, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 37, "n_ast_nodes": 58, "n_identifiers": 8, "random_cut": "def observe(self, field, value):\n self.METRICS[field].observe(value)\n self.metrics_have_changed = True\n if self.auto_pipe_execute is True:\n self.pipe_execute()\n" }, { "id": 23988, "commit_id": "a0c33908d500fe893d8e79e11399a5ab665f330b", "repo": "PaddleOCR", "path": "ppocr/postprocess/table_postprocess.py", "file_name": "table_postprocess.py", "fun_name": "decode_label", "commit_message": "add TableMaster", "code": "def decode_label(self, batch):\n \n structure_idx = batch[1]\n gt_bbox_list = batch[2]\n shape_list = batch[-1]\n ignored_tokens = self.get_ignored_tokens()\n end_idx = self.dict[self.end_str]\n\n structure_batch_list = []\n bbox_batch_list = []\n batch_size = len(structure_idx)\n for batch_idx in range(batch_size):\n structure_list = []\n bbox_list = []\n for idx in range(len(structure_idx[batch_idx])):\n char_idx = int(structure_idx[batch_idx][idx])\n if idx > 0 and char_idx == end_idx:\n break\n if char_idx in ignored_tokens:\n continue\n structure_list.append(self.character[char_idx])\n\n bbox = gt_bbox_list[batch_idx][idx]\n if bbox.sum() != 0:\n bbox = self._bbox_decode(bbox, shape_list[batch_idx])\n bbox_list.append(bbox)\n structure_batch_list.append(structure_list)\n bbox_batch_list.append(bbox_list)\n result = {\n 'bbox_batch_list': bbox_batch_list,\n 'structure_batch_list': structure_batch_list,\n }\n return result\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 417, "n_words": 83, "vocab_size": 57, "complexity": 7, "nloc": 30, "token_counts": 190, "n_ast_nodes": 304, "n_identifiers": 28, "random_cut": "def decode_label(self, batch):\n \n structure_idx = batch[1]\n gt_bbox_list = batch[2]\n shape_list = batch[-1]\n ignored_tokens = self.get_ignored_tokens()\n end_idx = self.dict[self.end_str]\n\n structure_batch_list = []\n bbox_batch_list = []\n batch_size = len(structure_idx)\n for batch_idx in range(batch_size):\n structure_list = []\n bbox_list = []\n for idx in range(len(structure_idx[batch_idx])):\n char_idx = int(structure_idx[batch_idx][idx])\n if idx > 0 and char_idx == end_idx:\n break\n if char_idx in ignored_tokens:\n continue\n structure_list.append(self.character[char_idx])\n\n bbox = gt_bbox_list[batch_idx][idx]\n if bbox.sum() != 0:\n bbox = self._bbox_decode(bbox, shape_list[batch_idx])\n bbox_list.append(bbox)\n structure_batch_list.append(structure_list)\n bbox_batch_list.append(bbox_list)\n result = {\n 'bbox_batch_list': bbox_batch_list,\n 'structure_batch_list': structure_batch_list,\n }\n return result\n" }, { "id": 97067, "commit_id": "ebf2d415cbbbfc4473bcc304f0032f805969a0f9", "repo": "sentry", "path": "src/sentry/search/events/datasets/metrics.py", "file_name": "metrics.py", "fun_name": "field_alias_converter", "commit_message": "feat(mep): Add the team-key-transaction alias (#32593)\n\n- This moves some of our aliases to their own files, the intention is to\r\n eventually migrate more of the class methods there this gives us a few\r\n benefits\r\n - We can unit test the resolverse now\r\n - They're easier to re-use across datasets", "code": "def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]:\n return {\n constants.PROJECT_ALIAS: self._resolve_project_slug_alias,\n constants.PROJECT_NAME_ALIAS: self._resolve_project_slug_alias,\n constants.TEAM_KEY_TRANSACTION_ALIAS: self._resolve_team_key_transaction_alias,\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 61, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 46, "n_ast_nodes": 65, "n_identifiers": 12, "random_cut": "def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]:\n return {\n constants.PROJECT_ALIAS: self._resolve_project_slug_alias,\n constants.PROJECT_NAME_ALIAS: self._resolve_project_slug_alias,\n constants.TEAM_KEY_TRANSACTION_ALIAS: self._resolve_t" }, { "id": 186352, "commit_id": "91e23ff34c2f34da291c7cadf26dcb5fbde4e439", "repo": "textual", "path": "tests/snapshot_tests/test_snapshots.py", "file_name": "test_snapshots.py", "fun_name": "test_demo", "commit_message": "more pauses for demo?", "code": "def test_demo(snap_compare):\n \n assert snap_compare(\n Path(\"../../src/textual/demo.py\"),\n press=[\"down\", \"down\", \"down\", \"_\", \"_\"],\n terminal_size=(100, 30),\n )\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 37, "n_ast_nodes": 65, "n_identifiers": 5, "random_cut": "def test_demo(snap_compare):\n \n" }, { "id": 210930, "commit_id": "f2a883edd26c4630672c32415d9c5334846a4b5c", "repo": "PaddleDetection", "path": "deploy/pphuman/pipeline.py", "file_name": "pipeline.py", "fun_name": "predict_video", "commit_message": "ppvehicle plate license", "code": "def predict_video(self, video_file):\n # mot\n # mot -> attr\n # mot -> pose -> action\n capture = cv2.VideoCapture(video_file)\n video_out_name = 'output.mp4' if self.file_name is None else self.file_name\n\n # Get Video info : resolution, fps, frame count\n width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(capture.get(cv2.CAP_PROP_FPS))\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n print(\"video fps: %d, frame_count: %d\" % (fps, frame_count))\n\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n out_path = os.path.join(self.output_dir, video_out_name)\n fourcc = cv2.VideoWriter_fourcc(* 'mp4v')\n writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\n frame_id = 0\n\n entrance, records, center_traj = None, None, None\n if self.draw_center_traj:\n center_traj = [{}]\n id_set = set()\n interval_id_set = set()\n in_id_list = list()\n out_id_list = list()\n prev_center = dict()\n records = list()\n entrance = [0, height / 2., width, height / 2.]\n video_fps = fps\n\n video_action_imgs = []\n\n if self.with_video_action:\n short_size = self.cfg[\"VIDEO_ACTION\"][\"short_size\"]\n scale = ShortSizeScale(short_size)\n\n while (1):\n if frame_id % 10 == 0:\n print('frame id: ', frame_id)\n\n ret, frame = capture.read()\n if not ret:\n break\n\n if self.modebase[\"idbased\"] or self.modebase[\"skeletonbased\"]:\n if frame_id > self.warmup_frame:\n self.pipe_timer.total_time.start()\n self.pipe_timer.module_time['mot'].start()\n res = self.mot_predictor.predict_image(\n [copy.deepcopy(frame)], visual=False)\n\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['mot'].end()\n\n # mot output format: id, class, score, xmin, ymin, xmax, ymax\n mot_res = parse_mot_res(res)\n\n # flow_statistic only support single class MOT\n boxes, scores, ids = res[0] # batch size = 1 in MOT\n mot_result = (frame_id + 1, boxes[0], scores[0],\n ids[0]) # single class\n statistic = flow_statistic(\n mot_result, self.secs_interval, self.do_entrance_counting,\n video_fps, entrance, id_set, interval_id_set, in_id_list,\n out_id_list, prev_center, records)\n records = statistic['records']\n\n # nothing detected\n if len(mot_res['boxes']) == 0:\n frame_id += 1\n if frame_id > self.warmup_frame:\n self.pipe_timer.img_num += 1\n self.pipe_timer.total_time.end()\n if self.cfg['visual']:\n _, _, fps = self.pipe_timer.get_total_time()\n im = self.visualize_video(frame, mot_res, frame_id, fps,\n entrance, records,\n center_traj) # visualize\n writer.write(im)\n if self.file_name is None: # use camera_id\n cv2.imshow('PPHuman&&PPVehicle', im)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n continue\n\n self.pipeline_res.update(mot_res, 'mot')\n crop_input, new_bboxes, ori_bboxes = crop_image_with_mot(\n frame, mot_res)\n\n if self.with_vehicleplate:\n platelicense = self.vehicleplate_detector.get_platelicense(\n crop_input)\n self.pipeline_res.update(platelicense, 'vehicleplate')\n\n if self.with_attr:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['attr'].start()\n attr_res = self.attr_predictor.predict_image(\n crop_input, visual=False)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['attr'].end()\n self.pipeline_res.update(attr_res, 'attr')\n\n if self.with_idbased_detaction:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['det_action'].start()\n det_action_res = self.det_action_predictor.predict(\n crop_input, mot_res)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['det_action'].end()\n self.pipeline_res.update(det_action_res, 'det_action')\n\n if self.cfg['visual']:\n self.det_action_visual_helper.update(det_action_res)\n\n if self.with_idbased_clsaction:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['cls_action'].start()\n cls_action_res = self.cls_action_predictor.predict_with_mot(\n crop_input, mot_res)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['cls_action'].end()\n self.pipeline_res.update(cls_action_res, 'cls_action')\n\n if self.cfg['visual']:\n self.cls_action_visual_helper.update(cls_action_res)\n\n if self.with_skeleton_action:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['kpt'].start()\n kpt_pred = self.kpt_predictor.predict_image(\n crop_input, visual=False)\n keypoint_vector, score_vector = translate_to_ori_images(\n kpt_pred, np.array(new_bboxes))\n kpt_res = {}\n kpt_res['keypoint'] = [\n keypoint_vector.tolist(), score_vector.tolist()\n ] if len(keypoint_vector) > 0 else [[], []]\n kpt_res['bbox'] = ori_bboxes\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['kpt'].end()\n\n self.pipeline_res.update(kpt_res, 'kpt')\n\n self.kpt_buff.update(kpt_res, mot_res) # collect kpt output\n state = self.kpt_buff.get_state(\n ) # whether frame num is enough or lost tracker\n\n skeleton_action_res = {}\n if state:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time[\n 'skeleton_action'].start()\n collected_keypoint = self.kpt_buff.get_collected_keypoint(\n ) # reoragnize kpt output with ID\n skeleton_action_input = parse_mot_keypoint(\n collected_keypoint, self.coord_size)\n skeleton_action_res = self.skeleton_action_predictor.predict_skeleton_with_mot(\n skeleton_action_input)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['skeleton_action'].end()\n self.pipeline_res.update(skeleton_action_res,\n 'skeleton_action')\n\n if self.cfg['visual']:\n self.skeleton_action_visual_helper.update(\n skeleton_action_res)\n\n if self.with_mtmct and frame_id % 10 == 0:\n crop_input, img_qualities, rects = self.reid_predictor.crop_image_with_mot(\n frame, mot_res)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['reid'].start()\n reid_res = self.reid_predictor.predict_batch(crop_input)\n\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['reid'].end()\n\n reid_res_dict = {\n 'features': reid_res,\n \"qualities\": img_qualities,\n \"rects\": rects\n }\n self.pipeline_res.update(reid_res_dict, 'reid')\n else:\n self.pipeline_res.clear('reid')\n\n if self.with_video_action:\n # get the params\n frame_len = self.cfg[\"VIDEO_ACTION\"][\"frame_len\"]\n sample_freq = self.cfg[\"VIDEO_ACTION\"][\"sample_freq\"]\n\n if sample_freq * frame_len > frame_count: # video is too short\n sample_freq = int(frame_count / frame_len)\n\n # filter the warmup frames\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['video_action'].start()\n\n # collect frames\n if frame_id % sample_freq == 0:\n # Scale image\n scaled_img = scale(frame)\n video_action_imgs.append(scaled_img)\n\n # the number of collected frames is enough to predict video action\n if len(video_action_imgs) == frame_len:\n classes, scores = self.video_action_predictor.predict(\n video_action_imgs)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['video_action'].end()\n\n video_action_res = {\"class\": classes[0], \"score\": scores[0]}\n self.pipeline_res.update(video_action_res, 'video_action')\n\n print(\"video_action_res:\", video_action_res)\n\n video_action_imgs.clear() # next clip\n\n self.collector.append(frame_id, self.pipeline_res)\n\n if frame_id > self.warmup_frame:\n self.pipe_timer.img_num += 1\n self.pipe_timer.total_time.end()\n frame_id += 1\n\n if self.cfg['visual']:\n _, _, fps = self.pipe_timer.get_total_time()\n im = self.visualize_video(frame, self.pipeline_res, frame_id,\n fps, entrance, records,\n center_traj) # visualize\n writer.write(im)\n if self.file_name is None: # use camera_id\n cv2.imshow('PPHuman', im)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n writer.release()\n print('save result to {}'.format(out_path))\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 4259, "n_words": 660, "vocab_size": 349, "complexity": 51, "nloc": 191, "token_counts": 1477, "n_ast_nodes": 2413, "n_identifiers": 149, "random_cut": "def predict_video(self, video_file):\n # mot\n # mot -> attr\n # mot -> pose -> action\n capture = cv2.VideoCapture(video_file)\n video_out_name = 'output.mp4' if self.file_name is None else self.file_name\n\n # Get Video info : resolution, fps, frame count\n width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(capture.get(cv2.CAP_PROP_FPS))\n frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n print(\"video fps: %d, frame_count: %d\" % (fps, frame_count))\n\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n out_path = os.path.join(self.output_dir, video_out_name)\n fourcc = cv2.VideoWriter_fourcc(* 'mp4v')\n writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\n frame_id = 0\n\n entrance, records, center_traj = None, None, None\n if self.draw_center_traj:\n center_traj = [{}]\n id_set = set()\n interval_id_set = set()\n in_id_list = list()\n out_id_list = list()\n prev_center = dict()\n records = list()\n entrance = [0, height / 2., width, height / 2.]\n video_fps = fps\n\n video_action_imgs = []\n\n if self.with_video_action:\n short_size = self.cfg[\"VIDEO_ACTION\"][\"short_size\"]\n scale = ShortSizeScale(short_size)\n\n while (1):\n if frame_id % 10 == 0:\n print('frame id: ', frame_id)\n\n ret, frame = capture.read()\n if not ret:\n break\n\n if self.modebase[\"idbased\"] or self.modebase[\"skeletonbased\"]:\n if frame_id > self.warmup_frame:\n self.pipe_timer.total_time.start()\n self.pipe_timer.module_time['mot'].start()\n res = self.mot_predictor.predict_image(\n [copy.deepcopy(frame)], visual=False)\n\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['mot'].end()\n\n # mot output format: id, class, score, xmin, ymin, xmax, ymax\n mot_res = parse_mot_res(res)\n\n # flow_statistic only support single class MOT\n boxes, scores, ids = res[0] # batch size = 1 in MOT\n mot_result = (frame_id + 1, boxes[0], scores[0],\n ids[0]) # single class\n statistic = flow_statistic(\n mot_result, self.secs_interval, self.do_entrance_counting,\n video_fps, entrance, id_set, interval_id_set, in_id_list,\n out_id_list, prev_center, records)\n records = statistic['records']\n\n # nothing detected\n if len(mot_res['boxes']) == 0:\n frame_id += 1\n if frame_id > self.warmup_frame:\n self.pipe_timer.img_num += 1\n self.pipe_timer.total_time.end()\n if self.cfg['visual']:\n _, _, fps = self.pipe_timer.get_total_time()\n im = self.visualize_video(frame, mot_res, frame_id, fps,\n entrance, records,\n center_traj) # visualize\n writer.write(im)\n if self.file_name is None: # use camera_id\n cv2.imshow('PPHuman&&PPVehicle', im)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n continue\n\n self.pipeline_res.update(mot_res, 'mot')\n crop_input, new_bboxes, ori_bboxes = crop_image_with_mot(\n frame, mot_res)\n\n if self.with_vehicleplate:\n platelicense = self.vehicleplate_detector.get_platelicense(\n crop_input)\n self.pipeline_res.update(platelicense, 'vehicleplate')\n\n if self.with_attr:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['attr'].start()\n attr_res = self.attr_predictor.predict_image(\n crop_input, visual=False)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['attr'].end()\n self.pipeline_res.update(attr_res, 'attr')\n\n if self.with_idbased_detaction:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['det_action'].start()\n det_action_res = self.det_action_predictor.predict(\n crop_input, mot_res)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['det_action'].end()\n self.pipeline_res.update(det_action_res, 'det_action')\n\n if self.cfg['visual']:\n self.det_action_visual_helper.update(det_action_res)\n\n if self.with_idbased_clsaction:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['cls_action'].start()\n cls_action_res = self.cls_action_predictor.predict_with_mot(\n crop_input, mot_res)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['cls_action'].end()\n self.pipeline_res.update(cls_action_res, 'cls_action')\n\n if self.cfg['visual']:\n self.cls_action_visual_helper.update(cls_action_res)\n\n if self.with_skeleton_action:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['kpt'].start()\n kpt_pred = self.kpt_predictor.predict_image(\n crop_input, visual=False)\n keypoint_vector, score_vector = translate_to_ori_images(\n kpt_pred, np.array(new_bboxes))\n kpt_res = {}\n kpt_res['keypoint'] = [\n keypoint_vector.tolist(), score_vector.tolist()\n ] if len(keypoint_vector) > 0 else [[], []]\n kpt_res['bbox'] = ori_bboxes\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['kpt'].end()\n\n self.pipeline_res.update(kpt_res, 'kpt')\n\n self.kpt_buff.update(kpt_res, mot_res) # collect kpt output\n state = self.kpt_buff.get_state(\n ) # whether frame num is enough or lost tracker\n\n skeleton_action_res = {}\n if state:\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time[\n 'skeleton_action'].start()\n collected_keypoint = self.kpt_buff.get_collected_keypoint(\n ) # reoragnize kpt output with ID\n skeleton_action_input = parse_mot_keypoint(\n collected_keypoint, self.coord_size)\n skeleton_action_res = self.skeleton_action_predictor.predict_skeleton_with_mot(\n skeleton_action_input)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['skeleton_action'].end()\n self.pipeline_res.update(skeleton_action_res,\n 'skeleton_action')\n\n if self.cfg['visual']:\n self.skeleton_action_visual_helper.update(\n skeleton_action_res)\n\n if self.with_mtmct and frame_id % 10 == 0:\n crop_input, img_qualities, rects = self.reid_predictor.crop_image_with_mot(\n frame, mot_res)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['reid'].start()\n reid_res = self.reid_predictor.predict_batch(crop_input)\n\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['reid'].end()\n\n reid_res_dict = {\n 'features': reid_res,\n \"qualities\": img_qualities,\n \"rects\": rects\n }\n self.pipeline_res.update(reid_res_dict, 'reid')\n else:\n self.pipeline_res.clear('reid')\n\n if self.with_video_action:\n # get the params\n frame_len = self.cfg[\"VIDEO_ACTION\"][\"frame_len\"]\n sample_freq = self.cfg[\"VIDEO_ACTION\"][\"sample_freq\"]\n\n if sample_freq * frame_len > frame_count: # video is too short\n sample_freq = int(frame_count / frame_len)\n\n # filter the warmup frames\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['video_action'].start()\n\n # collect frames\n if frame_id % sample_freq == 0:\n # Scale image\n scaled_img = scale(frame)\n video_action_imgs.append(scaled_img)\n\n # the number of collected frames is enough to predict video action\n if len(video_action_imgs) == frame_len:\n classes, scores = self.video_action_predictor.predict(\n video_action_imgs)\n if frame_id > self.warmup_frame:\n self.pipe_timer.module_time['video_action'].end()\n\n video_action_res = {\"class\": classes[0], \"score\": scores[0]}\n self.pipeline_res.update(video_action_res, 'video_action')\n\n print(\"video_action_res:\", video_action_res)\n\n video_action_imgs.clear() # next clip\n\n self.collector.append(frame_id, self.pipeline_res)\n\n if frame_id > self.warmup_frame:\n self.pipe_timer.img_num += 1\n self.pipe_timer.total_time.end()\n frame_id += 1\n\n if self.cfg['visual']:\n _, _, fps = self.pipe_timer.get_total_time()\n im = self.visualize_video(frame, self.pipeline_res, frame_id,\n fps, entrance, records,\n center_traj) # visualize\n writer.write(im)\n if self.file_name is None: # use camera_id\n" }, { "id": 115528, "commit_id": "f8e08f9509befc7b7ddfda5fccbd1b9b72c9b5f9", "repo": "mindsdb", "path": "mindsdb/interfaces/database/integrations.py", "file_name": "integrations.py", "fun_name": "_get_handler_meta", "commit_message": "better dependencies installation", "code": "def _get_handler_meta(self, module):\n handler_dir = Path(module.__path__[0])\n handler_folder_name = handler_dir.name\n handler_name = handler_folder_name\n if handler_name.endswith('_handler'):\n handler_name = handler_name[:-8]\n\n dependencies = self._read_dependencies(handler_dir)\n\n self.handler_modules[module.name] = module\n import_error = None\n if hasattr(module, 'import_error'):\n import_error = module.import_error\n handler_meta = {\n 'import': {\n 'success': import_error is None,\n 'folder': handler_folder_name,\n 'dependencies': dependencies\n },\n 'version': module.version\n }\n if import_error is not None:\n handler_meta['import']['error_message'] = str(import_error)\n\n for attr in ('connection_args_example', 'connection_args', 'description', 'name', 'type', 'title'):\n if hasattr(module, attr):\n handler_meta[attr] = getattr(module, attr)\n\n # region icon\n if hasattr(module, 'icon_path'):\n icon_path = handler_dir.joinpath(module.icon_path)\n handler_meta['icon'] = {\n 'name': icon_path.name,\n 'type': icon_path.name[icon_path.name.rfind('.') + 1:].lower()\n }\n if handler_meta['icon']['type'] == 'svg':\n with open(str(icon_path), 'rt') as f:\n handler_meta['icon']['data'] = f.read()\n else:\n with open(str(icon_path), 'rb') as f:\n handler_meta['icon']['data'] = base64.b64encode(f.read()).decode('utf-8')\n # endregion\n\n if handler_meta.get('name') in ('files', 'views', 'lightwood'):\n handler_meta['permanent'] = True\n else:\n handler_meta['permanent'] = False\n\n return handler_meta\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 567, "n_words": 130, "vocab_size": 86, "complexity": 9, "nloc": 41, "token_counts": 317, "n_ast_nodes": 558, "n_identifiers": 31, "random_cut": "def _get_handler_meta(self, module):\n handler_dir = Path(module.__path__[0])\n handler_folder_name = handler_dir.name\n handler_name = handler_folder_name\n if handler_name.endswith('_handler'):\n handler_name = handler_name[:-8]\n\n dependencies = s" }, { "id": 132130, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/examples/mnist_pytorch.py", "file_name": "mnist_pytorch.py", "fun_name": "get_data_loaders", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_data_loaders():\n mnist_transforms = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n )\n\n # We add FileLock here because multiple workers will want to\n # download data, and this may cause overwrites since\n # DataLoader is not threadsafe.\n with FileLock(os.path.expanduser(\"~/data.lock\")):\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"~/data\", train=True, download=True, transform=mnist_transforms\n ),\n batch_size=64,\n shuffle=True,\n )\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"~/data\", train=False, download=True, transform=mnist_transforms\n ),\n batch_size=64,\n shuffle=True,\n )\n return train_loader, test_loader\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 236, "n_words": 63, "vocab_size": 48, "complexity": 1, "nloc": 20, "token_counts": 130, "n_ast_nodes": 195, "n_identifiers": 23, "random_cut": "def get_data_loaders():\n mnist_transforms = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n )\n\n # We add FileLock here because multiple workers will want to\n # download data, and this may cause overwrites since\n # DataLoader is not threadsafe.\n with FileLock(os.path.expanduser(\"~/data.lock\")):\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n \"~/data\", train=True, download=True, transform=mnist_transforms\n ),\n batch_size=64,\n shuffle=True,\n " }, { "id": 280, "commit_id": "8436a4bbdd900476b4f85cad7024ef4e2e964352", "repo": "PySyft", "path": "packages/syft/src/syft/core/adp/vm_private_scalar_manager.py", "file_name": "vm_private_scalar_manager.py", "fun_name": "get", "commit_message": "Fix serialize", "code": "def get(self, index) -> int:\n while index > len(self.prime_numbers)-1:\n self.exp += 1\n self.prime_numbers = primes(10**self.exp)\n return self.prime_numbers[index]\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 52, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 45, "n_ast_nodes": 70, "n_identifiers": 8, "random_cut": "def get(self, index) -> int:\n while index > len(self.prime_numbers)-1:\n self.exp += 1\n self.prime_numbers = primes(10**self.exp)\n return" }, { "id": 52684, "commit_id": "4382eee60dbee6cb153822a4cb839693e59091bf", "repo": "PaddleHub", "path": "modules/image/text_recognition/ch_pp-ocrv3_det/test.py", "file_name": "test.py", "fun_name": "test_detect_text4", "commit_message": "update ch_pp-ocrv3_det (#2173)\n\n* update ch_pp-ocrv3_det\r\n\r\n* update", "code": "def test_detect_text4(self):\n results = self.module.detect_text(\n images=[cv2.imread('tests/test.jpg')],\n use_gpu=False,\n visualization=True,\n )\n self.assertEqual(\n results[0]['data'],\n [[[261, 202], [376, 202], [376, 239], [261, 239]], [[283, 162], [352, 162], [352, 202], [283, 202]]])\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 102, "n_words": 27, "vocab_size": 22, "complexity": 1, "nloc": 9, "token_counts": 99, "n_ast_nodes": 137, "n_identifiers": 11, "random_cut": "def test_detect_text4(self):\n results = self.module.detect_text(\n images=[cv2.imread('tes" }, { "id": 113671, "commit_id": "d68c786ff81bad19c04619d6a999ff34aaa724e7", "repo": "nni", "path": "nni/compression/pytorch/utils/config_validation_v1.py", "file_name": "config_validation_v1.py", "fun_name": "validate_op_types", "commit_message": "[Compression] remove pruning v1 & refactor directory (#5228)", "code": "def validate_op_types(model, op_types, logger):\n found_types = set(['default']) | set(map(lambda x: type(x[1]).__name__, model.named_modules()))\n\n not_found_op_types = list(set(op_types) - found_types)\n if not_found_op_types:\n logger.warning('op_types %s not found in model', not_found_op_types)\n\n return True\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 46, "n_words": 28, "vocab_size": 27, "complexity": 2, "nloc": 6, "token_counts": 66, "n_ast_nodes": 107, "n_identifiers": 14, "random_cut": "def validate_op_types(model, op_types, logger):\n found_types = set(['default']) | set(map(lambda x: type(x[1]).__name__, model.named_modules()))\n\n not_found_op_types = list(set(op_types) - found_types)\n if not_found_op_types:\n logger.warning('op_types %s not found in model', not_found_op_types)\n\n return True\n" }, { "id": 179901, "commit_id": "04b6b80b3361a14eaee4a064bccc25494332e83c", "repo": "gradio", "path": "test/test_inputs.py", "file_name": "test_inputs.py", "fun_name": "test_as_component", "commit_message": "inputs", "code": "def test_as_component(self):\n bool_input = gr.inputs.Checkbox()\n self.assertEqual(bool_input.preprocess(True), True)\n self.assertEqual(bool_input.preprocess_example(True), True)\n self.assertEqual(bool_input.serialize(True, True), True)\n with tempfile.TemporaryDirectory() as tmpdirname:\n to_save = bool_input.save_flagged(tmpdirname, \"bool_input\", True, None)\n self.assertEqual(to_save, True)\n restored = bool_input.restore_flagged(tmpdirname, to_save, None)\n self.assertEqual(restored, True)\n self.assertIsInstance(bool_input.generate_sample(), bool)\n bool_input = gr.inputs.Checkbox(default=True, label=\"Check Your Input\")\n self.assertEqual(\n bool_input.get_template_context(),\n {\n \"default_value\": True,\n \"name\": \"checkbox\",\n \"label\": \"Check Your Input\",\n \"css\": {},\n },\n )\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 253, "n_words": 54, "vocab_size": 43, "complexity": 1, "nloc": 21, "token_counts": 165, "n_ast_nodes": 267, "n_identifiers": 23, "random_cut": "def test_as_component(self):\n bool_input = gr." }, { "id": 203069, "commit_id": "30a01441347d5a2146af2944b29778fa0834d4be", "repo": "django", "path": "tests/queries/test_qs_combinators.py", "file_name": "test_qs_combinators.py", "fun_name": "test_union_in_subquery", "commit_message": "Fixed #29338 -- Allowed using combined queryset in Subquery.\n\nThanks Eugene Kovalev for the initial patch, Simon Charette for the\r\nreview, and Chetan Khanna for help.", "code": "def test_union_in_subquery(self):\n ReservedName.objects.bulk_create([\n ReservedName(name='rn1', order=8),\n ReservedName(name='rn2', order=1),\n ReservedName(name='rn3', order=5),\n ])\n qs1 = Number.objects.filter(num__gt=7, num=OuterRef('order'))\n qs2 = Number.objects.filter(num__lt=2, num=OuterRef('order'))\n self.assertCountEqual(\n ReservedName.objects.annotate(\n number=Subquery(qs1.union(qs2).values('num')),\n ).filter(number__isnull=False).values_list('order', flat=True),\n [8, 1],\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 148, "n_words": 26, "vocab_size": 24, "complexity": 1, "nloc": 14, "token_counts": 137, "n_ast_nodes": 219, "n_identifiers": 24, "random_cut": "def test_union_in_subquery(self):\n ReservedName.objects.bulk_create([\n ReservedName(name='rn1', order=8),\n ReservedName(name='rn2', order=1),\n ReservedName(name='rn3', or" }, { "id": 294228, "commit_id": "83983bc875445d7147cb98e70f1214c6ed270da9", "repo": "core", "path": "homeassistant/components/motion_blinds/cover.py", "file_name": "cover.py", "fun_name": "set_absolute_position", "commit_message": "Motion request update till stop (#68580)\n\n* update untill stop\r\n\r\n* fixes\r\n\r\n* fix spelling", "code": "def set_absolute_position(self, **kwargs):\n \n position = kwargs[ATTR_ABSOLUTE_POSITION]\n self._blind.Set_position(100 - position)\n self.request_position_till_stop()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 38, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "def set_absolute_position(self, **kwargs):\n \n position = kwargs" }, { "id": 320967, "commit_id": "21419c9ef5a90ea36a27afaf2503a57f8f9f8536", "repo": "qutebrowser", "path": "tests/unit/javascript/test_greasemonkey.py", "file_name": "test_greasemonkey.py", "fun_name": "test_all", "commit_message": "greasemonkey: Don't implicitly load scripts\n\nNeeded for #7245 and also seems like cleaner code.", "code": "def test_all(gm_manager):\n \n _save_script(test_gm_script, 'test.user.js')\n gm_manager.load_scripts()\n\n assert (gm_manager.all_scripts()[0].name ==\n \"qutebrowser test userscript\")\n\n\n@pytest.mark.parametrize(\"url, expected_matches\", [\n # included\n ('http://trolol.com/', 1),\n # neither included nor excluded\n ('http://aaaaaaaaaa.com/', 0),\n # excluded\n ('https://badhost.xxx/', 0),\n])", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"url, expected_matches\", [\n # included\n ('http://trolol.com/', 1),\n # neither included nor excluded\n ('http://aaaaaaaaaa.com/', 0),\n # excluded\n ('https://badhost.xxx/', 0),\n])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 69, "n_words": 30, "vocab_size": 25, "complexity": 1, "nloc": 5, "token_counts": 32, "n_ast_nodes": 109, "n_identifiers": 10, "random_cut": "def test_all(gm_manager):\n \n _save_script(test_gm_script, 'test.user.js')\n gm_manager.load_scripts()\n\n assert (gm_manager.all_scripts()[0].name ==\n \"qutebrowser test userscript\")\n\n\n@pytest.mark.parametrize(\"url, expected" }, { "id": 69018, "commit_id": "38352b3e46fb18435c780e5775bbc886491eac96", "repo": "erpnext", "path": "erpnext/regional/india/e_invoice/utils.py", "file_name": "utils.py", "fun_name": "get_invoice_value_details", "commit_message": "test: Add test for einvoice discounts", "code": "def get_invoice_value_details(invoice):\n\tinvoice_value_details = frappe._dict(dict())\n\tinvoice_value_details.base_total = abs(sum([i.taxable_value for i in invoice.get(\"items\")]))\n\tif (\n\t\tinvoice.apply_discount_on == \"Grand Total\"\n\t\tand invoice.discount_amount\n\t\tand invoice.get(\"is_cash_or_non_trade_discount\")\n\t):\n\t\tinvoice_value_details.invoice_discount_amt = invoice.discount_amount\n\telse:\n\t\tinvoice_value_details.invoice_discount_amt = 0\n\n\tinvoice_value_details.round_off = invoice.base_rounding_adjustment\n\tinvoice_value_details.base_grand_total = abs(invoice.base_rounded_total) or abs(\n\t\tinvoice.base_grand_total\n\t)\n\tinvoice_value_details.grand_total = abs(invoice.rounded_total) or abs(invoice.grand_total)\n\n\tinvoice_value_details = update_invoice_taxes(invoice, invoice_value_details)\n\n\treturn invoice_value_details\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 33, "n_words": 51, "vocab_size": 38, "complexity": 7, "nloc": 18, "token_counts": 124, "n_ast_nodes": 203, "n_identifiers": 22, "random_cut": "def get_invoice_value_details(invoice):\n\tinvoice_value_details = frappe._dict(dict())\n\tinvoice_value_details.base_total = abs(sum([i.taxable_value for i in invoice.get(\"items\")]))\n\tif (\n\t\tinvoice.apply_discount_on == \"Grand Total\"\n\t\tand invoice.discount_amount\n\t\tand invoice.get(\"is_cash_or_non_trade_discount\")\n\t):\n\t\tinvoice_value_details.invoice_discount_amt = invoice.discount_amount\n\telse:\n\t\tinvoice_value_details.invoice_discount_a" }, { "id": 18699, "commit_id": "72c00f3d959baa1e355e8d8231e60561abe62eea", "repo": "ccxt", "path": "python/ccxt/async_support/currencycom.py", "file_name": "currencycom.py", "fun_name": "fetch_balance", "commit_message": "1.73.50\n\n[ci skip]", "code": "async def fetch_balance(self, params={}):\n await self.load_markets()\n response = await self.privateGetV2Account(params)\n #\n # {\n # \"makerCommission\": \"0.20\",\n # \"takerCommission\": \"0.20\",\n # \"buyerCommission\": \"0.20\",\n # \"sellerCommission\": \"0.20\",\n # \"canTrade\": True,\n # \"canWithdraw\": True,\n # \"canDeposit\": True,\n # \"updateTime\": \"1645266330\",\n # \"userId\": \"644722\",\n # \"balances\": [\n # {\n # \"accountId\": \"120702016179403605\",\n # \"collateralCurrency\": False,\n # \"asset\": \"CAKE\",\n # \"free\": \"1.784\",\n # \"locked\": \"0.0\",\n # \"default\": False,\n # },\n # {\n # \"accountId\": \"109698017413175316\",\n # \"collateralCurrency\": True,\n # \"asset\": \"USD\",\n # \"free\": \"7.58632\",\n # \"locked\": \"0.0\",\n # \"default\": True,\n # }\n # ]\n # }\n #\n return self.parse_balance(response)\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 667, "n_words": 94, "vocab_size": 45, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 85, "n_identifiers": 7, "random_cut": "async def fetch_balance(self, params={}):\n await self.load_markets()\n response = await self.privateGetV2Account(params)\n #\n # {\n # \"makerCommission\": \"0.20\",\n # \"takerCommission\": \"0.20\",\n # \"buyerCommission\": \"0.20\",\n # \"sellerCommission\": \"0.20\",\n # \"canTrade\": True,\n # \"canWithdraw\": True,\n # \"canDeposit\": True,\n # \"updateTime\": \"1645266330\",\n # \"userId\": \"644722\",\n # \"balances\": [\n # {\n # \"accountId\": \"120702016179403605\",\n # \"collateralCurrency\": False,\n # \"asset\": \"CAKE\",\n # \"free\": \"1.784\",\n # \"locked\": \"0.0\",\n # \"default\": False,\n # },\n # {\n # \"accountId\": \"109698017413175316\",\n # \"collateralCurrency\": True,\n # \"asset\": \"USD\",\n # \"free\": \"7.58632\",\n # \"locked\": \"0.0\",\n #" }, { "id": 51476, "commit_id": "000473594a0d7c7d27795d017abe961902251869", "repo": "PaddleHub", "path": "modules/image/semantic_segmentation/ace2p/data_feed.py", "file_name": "data_feed.py", "fun_name": "preprocess", "commit_message": "update ace2p (#2003)\n\n* update ace2p\r\n\r\n* add clean func\r\n\r\n* update ace2p", "code": "def preprocess(org_im, scale, rotation):\n image = org_im.copy()\n image_height, image_width, _ = image.shape\n\n aspect_ratio = scale[1] * 1.0 / scale[0]\n image_center, image_scale = _box2cs([0, 0, image_width - 1, image_height - 1], aspect_ratio)\n\n trans = get_affine_transform(image_center, image_scale, rotation, scale)\n image = cv2.warpAffine(\n image,\n trans, (int(scale[1]), int(scale[0])),\n flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=(0, 0, 0))\n\n img_mean = np.array([0.406, 0.456, 0.485]).reshape((1, 1, 3))\n img_std = np.array([0.225, 0.224, 0.229]).reshape((1, 1, 3))\n image = image.astype(np.float32)\n image = (image / 255.0 - img_mean) / img_std\n image = image.transpose(2, 0, 1).astype(np.float32)\n\n image_info = {\n 'image_center': image_center,\n 'image_height': image_height,\n 'image_width': image_width,\n 'image_scale': image_scale,\n 'rotation': rotation,\n 'scale': scale\n }\n\n return image, image_info\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 217, "n_words": 99, "vocab_size": 67, "complexity": 1, "nloc": 26, "token_counts": 259, "n_ast_nodes": 356, "n_identifiers": 33, "random_cut": "def preprocess(org_im, scale, rotation):\n image = org_im.copy()\n image_height, image_width, _ = image.shape\n\n aspect_ratio = scale[1] * 1.0 / scale[0]\n image_center, image_scale = _box2cs([0, 0, image_width - 1, image_height - 1], aspect_ratio)\n\n trans = get_affine_transform(image_center, image_scale, rotation, scale)\n image = cv2.warpAffine(\n image,\n trans, (int(scale[1]), int(scale[0])),\n flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=(0, 0, 0))\n\n img_mean = np.array([0.406, 0.456, 0.485]).reshape((1, 1, 3))\n img_std = np.array([0.225, 0.224, 0.229]).reshape((1, 1, 3))\n image = image.astype(np.float32)\n image = (image / 255.0 - img_mean) / img_std\n image = image.transpose(2, 0, 1).astype(np.float32)\n\n image_info = {\n 'image_center': image_center,\n 'image_height': image_height,\n 'image_width': image_width,\n 'image_scale': image_scale,\n 'rotation': rotation,\n 'scale': scale\n }\n\n return image, image_info\n\n" }, { "id": 138677, "commit_id": "4a30ae0ab65b6f4e966aa9bd9b50720889c8458e", "repo": "ray", "path": "python/ray/tune/trial_runner.py", "file_name": "trial_runner.py", "fun_name": "_process_trial_result", "commit_message": "[tune] De-clutter log outputs in trial runner (#24257)\n\nThere are currently some debug logs left logging to INFO scope. This PR demotes them to DEBUG and cleans up the messages.", "code": "def _process_trial_result(self, trial, result):\n result.update(trial_id=trial.trial_id)\n is_duplicate = RESULT_DUPLICATE in result\n force_checkpoint = result.get(SHOULD_CHECKPOINT, False)\n # TrialScheduler and SearchAlgorithm still receive a\n # notification because there may be special handling for\n # the `on_trial_complete` hook.\n if is_duplicate:\n logger.debug(\"Trial finished without logging 'done'.\")\n result = trial.last_result\n result.update(done=True)\n\n self._total_time += result.get(TIME_THIS_ITER_S, 0)\n\n flat_result = flatten_dict(result)\n self._validate_result_metrics(flat_result)\n\n if self._stopper(trial.trial_id, result) or trial.should_stop(flat_result):\n decision = TrialScheduler.STOP\n else:\n with warn_if_slow(\"scheduler.on_trial_result\"):\n decision = self._scheduler_alg.on_trial_result(self, trial, flat_result)\n if decision == TrialScheduler.STOP:\n result.update(done=True)\n else:\n # Only updating search alg if the trial is not to be stopped.\n with warn_if_slow(\"search_alg.on_trial_result\"):\n self._search_alg.on_trial_result(trial.trial_id, flat_result)\n\n # If this is not a duplicate result, the callbacks should\n # be informed about the result.\n if not is_duplicate:\n with warn_if_slow(\"callbacks.on_trial_result\"):\n self._callbacks.on_trial_result(\n iteration=self._iteration,\n trials=self._trials,\n trial=trial,\n result=result.copy(),\n )\n trial.update_last_result(result)\n # Include in next experiment checkpoint\n self.trial_executor.mark_trial_to_checkpoint(trial)\n\n # Checkpoints to disk. This should be checked even if\n # the scheduler decision is STOP or PAUSE. Note that\n # PAUSE only checkpoints to memory and does not update\n # the global checkpoint state.\n self._checkpoint_trial_if_needed(trial, force=force_checkpoint)\n\n if trial.is_saving:\n logger.debug(f\"Caching trial decision for trial {trial}: {decision}\")\n # Cache decision to execute on after the save is processed.\n # This prevents changing the trial's state or kicking off\n # another training step prematurely.\n self._cached_trial_decisions[trial.trial_id] = decision\n return None\n else:\n self._queue_decision(trial, decision)\n return decision\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 733, "n_words": 210, "vocab_size": 138, "complexity": 7, "nloc": 39, "token_counts": 262, "n_ast_nodes": 446, "n_identifiers": 43, "random_cut": "def _process_trial_result(self, trial, result):\n result.update(trial_id=trial.trial_id)\n is_duplicate =" }, { "id": 54372, "commit_id": "b5836927c71ed0448b674a89efeba64133b586cc", "repo": "prefect", "path": "tests/test_engine.py", "file_name": "test_engine.py", "fun_name": "test_non_prefect_types_return_completed_state", "commit_message": "Fix tests", "code": "async def test_non_prefect_types_return_completed_state(self):\n result_state = await return_value_to_state(\"foo\")\n assert result_state.is_completed()\n assert result_state.data.decode() == \"foo\"\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 28, "n_ast_nodes": 52, "n_identifiers": 7, "random_cut": "async def test_non_prefect_types_return_completed_state(self):\n result_state = await return_value_to_state(\"foo\")\n assert result_state.is_completed()\n assert result_state.data.decode() == \"foo\"\n" }, { "id": 305722, "commit_id": "73ba7a989b0cae6fba3564947d819e1eeb423f54", "repo": "core", "path": "homeassistant/components/sonos/speaker.py", "file_name": "speaker.py", "fun_name": "subscription_address", "commit_message": "Make Sonos typing more complete (#68072)", "code": "def subscription_address(self) -> str:\n \n assert len(self._subscriptions) > 0\n addr, port = self._subscriptions[0].event_listener.address\n return \":\".join([addr, str(port)])\n\n #\n # Subscription handling and event dispatchers\n #", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 5, "token_counts": 45, "n_ast_nodes": 77, "n_identifiers": 10, "random_cut": "def subscription_address(self) -> str:\n \n assert len(self._subscriptions) > 0\n addr, port = self._subscriptions[0].event_listener.address\n return \":\".jo" }, { "id": 182106, "commit_id": "6587ba257fc5ea07968752aeaf818b1002cdae0f", "repo": "textual", "path": "tests/css/test_tokenize.py", "file_name": "test_tokenize.py", "fun_name": "test_variable_declaration_no_semicolon", "commit_message": "Parsing variable values as individual tokens", "code": "def test_variable_declaration_no_semicolon():\n css = \"$x: 1\\n$y: 2\"\n assert list(tokenize(css, \"\")) == [\n Token(name=\"variable_name\", value=\"$x:\", code=css, path=\"\", location=(0, 0)),\n Token(name=\"whitespace\", value=\" \", code=css, path=\"\", location=(0, 3)),\n Token(name=\"number\", value=\"1\", code=css, path=\"\", location=(0, 4)),\n Token(name=\"variable_value_end\", value=\"\\n\", code=css, path=\"\", location=(0, 5)),\n Token(name=\"variable_name\", value=\"$y:\", code=css, path=\"\", location=(1, 0)),\n Token(name=\"whitespace\", value=\" \", code=css, path=\"\", location=(1, 3)),\n Token(name=\"number\", value=\"2\", code=css, path=\"\", location=(1, 4)),\n ]\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 114, "n_words": 57, "vocab_size": 32, "complexity": 1, "nloc": 11, "token_counts": 209, "n_ast_nodes": 326, "n_identifiers": 10, "random_cut": "def test_variable_declaration_no_semicolon():\n css = \"$x: 1\\n$y: 2\"\n assert list(tokenize(css, \"\")) == [\n Token(name=\"variable_name\", value=\"$x:\", code=css, path=\"\", location=(0, 0)),\n Token(name=\"whitespace\", value=\" \", code=css, path=\"\", location=(0, 3)),\n Token(name=\"number\", value=\"1\", code=css, path=\"\", location=(0, 4)),\n Token(name=\"variable_value_end\", value=\"\\n\", code=css, path=\"\", location=(0, 5)),\n Token(name=\"variable_name\", value=\"$y:\", code=css, path=\"\", location=(1, 0)),\n Token(name=\"whitespace\", value=\" \", code=css, path=\"\", lo" }, { "id": 108632, "commit_id": "85f30cbd485eddc93e3c9ff115ac21c0886909d5", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_ticker.py", "file_name": "test_ticker.py", "fun_name": "logit_deformatter", "commit_message": "Remove *math* parameter of various mathtext internal APIs.\n\nThe *math* parameter is passed through many layers of the call stack\nbut is ultimately only used for a single purpose: deciding whether to\nreplace the ASCII hyphen by a (longer) unicode minus. Instead of doing\nthat, just do the substitution at the parsing stage. In particular,\nthis fixes problematic unicode minus support with the \"cm\" fontset.\n\nThis patch also reverts a significant part of 52003e4, as LogFormatters\nno longer need to pass unicode minuses in mathtext -- everything gets\nconverted by mathtext. Likewise, this change also invalidates the\ntest_log_scales baseline image (old, buggy wrt. unicode minus); replace\nit by a test that the drawn ticks are as expected (which was the intent\nin 90c1aa3).", "code": "def logit_deformatter(string):\n r\n match = re.match(\n r\"[^\\d]*\"\n r\"(?P1-)?\"\n r\"(?P\\d*\\.?\\d*)?\"\n r\"(?:\\\\cdot)?\"\n r\"(?:10\\^\\{(?P-?\\d*)})?\"\n r\"[^\\d]*$\",\n string,\n )\n if match:\n comp = match[\"comp\"] is not None\n mantissa = float(match[\"mant\"]) if match[\"mant\"] else 1\n expo = int(match[\"expo\"]) if match[\"expo\"] is not None else 0\n value = mantissa * 10 ** expo\n if match[\"mant\"] or match[\"expo\"] is not None:\n if comp:\n return 1 - value\n return value\n match = re.match(\n r\"[^\\d]*\\\\frac\\{(?P\\d+)\\}\\{(?P\\d+)\\}[^\\d]*$\", string\n )\n if match:\n num, deno = float(match[\"num\"]), float(match[\"deno\"])\n return num / deno\n raise ValueError(\"Not formatted by LogitFormatter\")\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 352, "n_words": 83, "vocab_size": 52, "complexity": 8, "nloc": 31, "token_counts": 148, "n_ast_nodes": 245, "n_identifiers": 13, "random_cut": "def logit_deformatter(string):\n r\n match = re.match(\n r\"[^\\d]*\"\n r\"(?P1-)?\"\n r\"(?P\\d*\\.?\\d*)?\"\n r\"(?:\\\\" }, { "id": 279360, "commit_id": "30bf872258415cd4a83ac1a33b031cc804981a9c", "repo": "keras", "path": "keras/optimizers/__init__.py", "file_name": "__init__.py", "fun_name": "deserialize", "commit_message": "Flip the default optimizer to experimental optimizer when deserializing optimizer.\n\nPiperOrigin-RevId: 465336057", "code": "def deserialize(config, custom_objects=None, **kwargs):\n \n # loss_scale_optimizer has a direct dependency of optimizer, import here\n # rather than top to avoid the cyclic dependency.\n from keras.mixed_precision import (\n loss_scale_optimizer,\n )\n\n use_legacy_optimizer = kwargs.pop(\"use_legacy_optimizer\", True)\n if len(config[\"config\"]) > 0:\n # If the optimizer config is not empty, then we use the value of\n # `is_legacy_optimizer` to override `use_legacy_optimizer`. If\n # `is_legacy_optimizer` does not exist in config, it means we are\n # using the legacy optimzier.\n use_legacy_optimizer = config[\"config\"].get(\"is_legacy_optimizer\", True)\n if (\n tf.__internal__.tf2.enabled()\n and tf.executing_eagerly()\n and not use_legacy_optimizer\n ):\n all_classes = {\n \"adadelta\": adadelta_experimental.Adadelta,\n \"adagrad\": adagrad_experimental.Adagrad,\n \"adam\": adam_experimental.Adam,\n \"adamax\": adamax_experimental.Adamax,\n \"experimentaladadelta\": adadelta_experimental.Adadelta,\n \"experimentaladagrad\": adagrad_experimental.Adagrad,\n \"experimentaladam\": adam_experimental.Adam,\n \"experimentalsgd\": sgd_experimental.SGD,\n \"nadam\": nadam_experimental.Nadam,\n \"rmsprop\": rmsprop_experimental.RMSprop,\n \"sgd\": sgd_experimental.SGD,\n \"ftrl\": ftrl_experimental.Ftrl,\n \"lossscaleoptimizer\": loss_scale_optimizer.LossScaleOptimizerV3,\n \"lossscaleoptimizerv3\": loss_scale_optimizer.LossScaleOptimizerV3,\n # LossScaleOptimizerV1 was an old version of LSO that was removed.\n # Deserializing it turns it into a LossScaleOptimizer\n \"lossscaleoptimizerv1\": loss_scale_optimizer.LossScaleOptimizer,\n }\n else:\n all_classes = {\n \"adadelta\": adadelta_v2.Adadelta,\n \"adagrad\": adagrad_v2.Adagrad,\n \"adam\": adam_v2.Adam,\n \"adamax\": adamax_v2.Adamax,\n \"experimentaladadelta\": adadelta_experimental.Adadelta,\n \"experimentaladagrad\": adagrad_experimental.Adagrad,\n \"experimentaladam\": adam_experimental.Adam,\n \"experimentalsgd\": sgd_experimental.SGD,\n \"nadam\": nadam_v2.Nadam,\n \"rmsprop\": rmsprop_v2.RMSprop,\n \"sgd\": gradient_descent_v2.SGD,\n \"ftrl\": ftrl.Ftrl,\n \"lossscaleoptimizer\": loss_scale_optimizer.LossScaleOptimizer,\n \"lossscaleoptimizerv3\": loss_scale_optimizer.LossScaleOptimizerV3,\n # LossScaleOptimizerV1 was an old version of LSO that was removed.\n # Deserializing it turns it into a LossScaleOptimizer\n \"lossscaleoptimizerv1\": loss_scale_optimizer.LossScaleOptimizer,\n }\n\n # Make deserialization case-insensitive for built-in optimizers.\n if config[\"class_name\"].lower() in all_classes:\n config[\"class_name\"] = config[\"class_name\"].lower()\n return deserialize_keras_object(\n config,\n module_objects=all_classes,\n custom_objects=custom_objects,\n printable_module_name=\"optimizer\",\n )\n\n\n@keras_export(\n \"keras.__internal__.optimizers.convert_to_legacy_optimizer\", v1=[]\n)", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\n \"keras.__internal__.optimizers.convert_to_legacy_optimizer\", v1=[]\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 761, "n_words": 218, "vocab_size": 129, "complexity": 6, "nloc": 55, "token_counts": 311, "n_ast_nodes": 547, "n_identifiers": 49, "random_cut": "def deserialize(config, custom_objects=None, **kwargs):\n \n # loss_scale_optimizer has a direct dependency of optimizer, import here\n # rather than top to avoid the cyclic dependency.\n from keras.mixed_precision import (\n loss_scale_optimizer,\n )\n\n use_legacy_optimizer = kwargs.pop(\"use_legacy_optimizer\", True)\n if len(config[\"config\"]) > 0:\n # If the optimizer config is not empty, then we use the value of\n # " }, { "id": 97203, "commit_id": "e676b34aea4b38ee33ee0dd2de5e0cc8e546ae1a", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_member_team_details.py", "file_name": "test_organization_member_team_details.py", "fun_name": "test_member_can_leave", "commit_message": "ref(tests): DRY OrganizationMember tests. (#32715)", "code": "def test_member_can_leave(self):\n self.login_as(self.team_member.user)\n self.get_success_response(\n self.org.slug, self.team_member.id, self.team.slug, status_code=status.HTTP_200_OK\n )\n\n assert not OrganizationMemberTeam.objects.filter(\n team=self.team, organizationmember=self.team_member\n ).exists()\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 71, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 102, "n_identifiers": 18, "random_cut": "def test_member_can_leave(self):\n self.login_as(self.team_member.user)\n self.get_success_response(\n self.org.slug, self.team_member.id, self.team.slug, status_code=status.HTTP_200_OK\n )\n\n assert not OrganizationMemberTeam.objects.filter(\n team=self.team, organizationmember=self.team_member\n ).exists()\n" }, { "id": 187112, "commit_id": "120c10302381600abb4044083ce0a106b31df8f0", "repo": "streamlink", "path": "tests/test_api_validate.py", "file_name": "test_api_validate.py", "fun_name": "test_url", "commit_message": "plugin.api.validate: turn module into package\n\nTurn module into package with multiple logical sub-modules:\n- Define a public interface in the package's `__init__` module\n- Split validation schemas, validators and validate logic\n - schemas: classes which register attributes used by their\n respective `validate` implementations\n - validators: functions which can internally call `validate`\n and which return something that can be validated\n - validate: singledispatch functions which implement the validation\n logic for schemas and various other types\n- Rename validation schemas for better internal references\n- Rename singledispatch methods\n\nOther clean-up work:\n- Update comments and fix grammar\n- Add type annotations\n- Use f-strings\n- Use `str` instead of the `text` alias\n- Simplify some code blocks\n- Rearrange classes and functions\n- Rephrase certain error messages\n- Add a few more tests for better code coverage", "code": "def test_url(self):\n url_ = \"https://google.se/path\"\n\n assert validate(url(), url_)\n assert validate(url(scheme=\"http\"), url_)\n assert validate(url(path=\"/path\"), url_)\n\n with self.assertRaises(ValueError) as cm:\n validate(url(), \"foo\")\n assert str(cm.exception) == \"'foo' is not a valid URL\"\n\n with self.assertRaises(ValueError) as cm:\n validate(url(foo=\"bar\"), \"https://foo\")\n assert str(cm.exception) == \"Invalid URL attribute 'foo'\"\n\n with self.assertRaises(ValueError) as cm:\n validate(url(path=endswith(\".m3u8\")), \"https://foo/bar.mpd\")\n assert str(cm.exception) == \"Unable to validate URL attribute 'path': '/bar.mpd' does not end with '.m3u8'\"\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 165, "n_words": 63, "vocab_size": 39, "complexity": 1, "nloc": 14, "token_counts": 131, "n_ast_nodes": 237, "n_identifiers": 14, "random_cut": "def test_url(self):\n url_ = \"https://google.se/path\"\n\n assert validate(url(), url_)\n assert validate(url(scheme=\"http\"), url" }, { "id": 58992, "commit_id": "6e0a171ae169f4db1cfdd5ad9e0a576ff4962386", "repo": "prefect", "path": "tests/test_filesystems.py", "file_name": "test_filesystems.py", "fun_name": "test_read_write_roundtrip_sync", "commit_message": "Added Sync API for public interface methods. (#6511)\n\n* Added sync api\r\n\r\n* Removed trailing Spaces\r\n\r\n* filesystem tests for sync methods\r\n\r\n* sync tests\r\n\r\n* added more tests\r\n\r\n* adding test to resolve conflict\r\n\r\n* resolving git conflict\r\n\r\n* removed redundant/unneccessary tests\r\n\r\n* removed redundant/unneccessary tests\r\n\r\nCo-authored-by: Michael Adkins \r\nCo-authored-by: Bada-S \r\nCo-authored-by: James Bada Sopkin <69161193+Bada-S@users.noreply.github.com>", "code": "def test_read_write_roundtrip_sync(self):\n fs = RemoteFileSystem(basepath=\"memory://root\")\n fs.write_path(\"test.txt\", content=b\"hello\")\n assert fs.read_path(\"test.txt\") == b\"hello\"\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 61, "n_identifiers": 8, "random_cut": "def test_read_write_roundtrip_sync(self):\n fs = RemoteFileSystem(basepath=\"memory://root\")\n fs.write_path(\"test.txt\", content=b\"hello\")\n " }, { "id": 251165, "commit_id": "e83ec8390ad6be6a86cfcfc57bce14cb8861bf32", "repo": "mitmproxy", "path": "mitmproxy/tls.py", "file_name": "tls.py", "fun_name": "alpn_protocols", "commit_message": "`pyupgrade --py39-plus **/*.py`", "code": "def alpn_protocols(self) -> list[bytes]:\n \n if self._client_hello.extensions:\n for extension in self._client_hello.extensions.extensions:\n if extension.type == 0x10:\n return list(x.name for x in extension.body.alpn_protocols)\n return []\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 88, "n_words": 22, "vocab_size": 18, "complexity": 5, "nloc": 10, "token_counts": 54, "n_ast_nodes": 85, "n_identifiers": 11, "random_cut": "def alpn_protocols(self) -> list[bytes]:\n \n if self._client_hello.extensions:\n for extension in self._client_hello.extensions.extensions:\n if extension.type == 0x10:\n return list(x.name for x in extension.body.alpn_protocols)\n return []\n" }, { "id": 172481, "commit_id": "df670795731e7568462869b815f4eb39da1bb41a", "repo": "calibre-web", "path": "cps/kobo.py", "file_name": "kobo.py", "fun_name": "sync_shelves", "commit_message": "Fixes for kobosync with multiple users (#2230)", "code": "def sync_shelves(sync_token, sync_results, only_kobo_shelves=False):\n new_tags_last_modified = sync_token.tags_last_modified\n # transmit all archived shelfs independent of last sync (why should this matter?)\n for shelf in ub.session.query(ub.ShelfArchive).filter(ub.ShelfArchive.user_id == current_user.id):\n new_tags_last_modified = max(shelf.last_modified, new_tags_last_modified)\n sync_results.append({\n \"DeletedTag\": {\n \"Tag\": {\n \"Id\": shelf.uuid,\n \"LastModified\": convert_to_kobo_timestamp_string(shelf.last_modified)\n }\n }\n })\n ub.session.delete(shelf)\n ub.session_commit()\n\n extra_filters = []\n if only_kobo_shelves:\n for shelf in ub.session.query(ub.Shelf).filter(\n func.datetime(ub.Shelf.last_modified) > sync_token.tags_last_modified,\n ub.Shelf.user_id == current_user.id,\n not ub.Shelf.kobo_sync\n ):\n sync_results.append({\n \"DeletedTag\": {\n \"Tag\": {\n \"Id\": shelf.uuid,\n \"LastModified\": convert_to_kobo_timestamp_string(shelf.last_modified)\n }\n }\n })\n extra_filters.append(ub.Shelf.kobo_sync)\n\n if sqlalchemy_version2:\n shelflist = ub.session.execute(select(ub.Shelf).outerjoin(ub.BookShelf).filter(\n or_(func.datetime(ub.Shelf.last_modified) > sync_token.tags_last_modified,\n func.datetime(ub.BookShelf.date_added) > sync_token.tags_last_modified),\n ub.Shelf.user_id == current_user.id,\n *extra_filters\n ).distinct().order_by(func.datetime(ub.Shelf.last_modified).asc())).columns(ub.Shelf)\n else:\n shelflist = ub.session.query(ub.Shelf).outerjoin(ub.BookShelf).filter(\n or_(func.datetime(ub.Shelf.last_modified) > sync_token.tags_last_modified,\n func.datetime(ub.BookShelf.date_added) > sync_token.tags_last_modified),\n ub.Shelf.user_id == current_user.id,\n *extra_filters\n ).distinct().order_by(func.datetime(ub.Shelf.last_modified).asc())\n\n\n for shelf in shelflist:\n if not shelf_lib.check_shelf_view_permissions(shelf):\n continue\n\n new_tags_last_modified = max(shelf.last_modified, new_tags_last_modified)\n\n tag = create_kobo_tag(shelf)\n if not tag:\n continue\n\n if shelf.created > sync_token.tags_last_modified:\n sync_results.append({\n \"NewTag\": tag\n })\n else:\n sync_results.append({\n \"ChangedTag\": tag\n })\n sync_token.tags_last_modified = new_tags_last_modified\n ub.session_commit()\n\n\n# Creates a Kobo \"Tag\" object from a ub.Shelf object", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 25, "n_whitespaces": 757, "n_words": 152, "vocab_size": 82, "complexity": 9, "nloc": 61, "token_counts": 462, "n_ast_nodes": 741, "n_identifiers": 44, "random_cut": "def sync_shelves(sync_token, sync_results, only_kobo_shelves=False):\n new_tags_last_modified = sync_token.tags_last_modified\n # transmit all archived shelfs independent of last sync (why should this matter?)\n for shelf in ub.session.query(ub.ShelfArchive).filter(ub.ShelfArchive.user_id == current_user.id):\n new_tags_last_modified = max(shelf.last_modified, new_tags_last_modified)\n sync_results.append({\n \"DeletedTag\": {\n \"Tag\": {\n \"Id\": shelf.uuid,\n \"LastModified\": convert_to_kobo_timestamp_string(shelf.last_modified)\n }\n }\n })\n ub.session.delete(shelf)\n ub.session_commit()\n\n extra_filters = []\n if only_kobo_shelves:\n for shelf in ub.session.query(ub.Shelf).filter(\n func.datetime(ub.Shelf.last_modified) > sync_token.tags_last_modified,\n ub.Shelf.user_id == current_user.id,\n not ub.Shelf.kobo_sync\n ):\n sync_results.append({\n \"DeletedTag\": {\n \"Tag\": {\n \"Id\": shelf.uuid,\n \"LastModified\": convert_to_kobo_timestamp_string(shelf.last_modified)\n }\n }\n })\n extra_filters.append(ub.Shelf.kobo_sync)\n\n if sqlalchemy_version2:\n shelflist = ub.sessio" }, { "id": 144773, "commit_id": "85d6946c9524d8544e69262f737018151efb1567", "repo": "ray", "path": "python/ray/data/tests/test_dataset_formats.py", "file_name": "test_dataset_formats.py", "fun_name": "test_json_roundtrip", "commit_message": "Split test_dataset.py into two (#22303)", "code": "def test_json_roundtrip(ray_start_regular_shared, fs, data_path):\n # Single block.\n df = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [\"a\", \"b\", \"c\"]})\n ds = ray.data.from_pandas([df])\n ds._set_uuid(\"data\")\n ds.write_json(data_path, filesystem=fs)\n file_path = os.path.join(data_path, \"data_000000.json\")\n ds2 = ray.data.read_json([file_path], filesystem=fs)\n ds2df = ds2.to_pandas()\n assert ds2df.equals(df)\n # Test metadata ops.\n for block, meta in ds2._blocks.get_blocks_with_metadata():\n BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes\n\n if fs is None:\n os.remove(file_path)\n else:\n fs.delete_file(_unwrap_protocol(file_path))\n\n # Two blocks.\n df2 = pd.DataFrame({\"one\": [4, 5, 6], \"two\": [\"e\", \"f\", \"g\"]})\n ds = ray.data.from_pandas([df, df2])\n ds._set_uuid(\"data\")\n ds.write_json(data_path, filesystem=fs)\n ds2 = ray.data.read_json(data_path, parallelism=2, filesystem=fs)\n ds2df = ds2.to_pandas()\n assert pd.concat([df, df2], ignore_index=True).equals(ds2df)\n # Test metadata ops.\n for block, meta in ds2._blocks.get_blocks_with_metadata():\n BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes\n\n\n@pytest.mark.parametrize(\n \"fs,data_path,endpoint_url\",\n [\n (None, lazy_fixture(\"local_path\"), None),\n (lazy_fixture(\"local_fs\"), lazy_fixture(\"local_path\"), None),\n (lazy_fixture(\"s3_fs\"), lazy_fixture(\"s3_path\"), lazy_fixture(\"s3_server\")),\n ],\n)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"fs,data_path,endpoint_url\",\n [\n (None, lazy_fixture(\"local_path\"), None),\n (lazy_fixture(\"local_fs\"), lazy_fixture(\"local_path\"), None),\n (lazy_fixture(\"s3_fs\"), lazy_fixture(\"s3_path\"), lazy_fixture(\"s3_server\")),\n ],\n)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 237, "n_words": 113, "vocab_size": 77, "complexity": 4, "nloc": 24, "token_counts": 296, "n_ast_nodes": 571, "n_identifiers": 42, "random_cut": "def test_json_roundtrip(ray_start_regular_shared, fs, data_path):\n # Single block.\n df = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [\"a\", \"b\", \"c\"]})\n ds = ray.data.from_pandas([df])\n ds._set_uuid(\"data\")\n ds.write_json(data_path, filesystem=fs)\n file_path = os.path.join(data_path, \"data_000000.json\")\n ds2 = ray.data.read_json([file_path], filesystem=fs)\n ds2df = ds2.to_pandas()\n assert ds2df.equals(df)\n # Test metadata ops.\n for block, meta in ds2._blocks.get_blocks_with_metadata():\n BlockAccessor.for_block(ray.get(block)).size_bytes() == meta.size_bytes\n\n if fs is None:\n os.remove(file_path)\n else:\n fs.delete_file(_unwrap_protocol(file_path))\n\n # Two blocks.\n df2 = pd.DataFrame({\"one\": [4, 5, 6], \"two\": [\"e\", \"f\", \"g\"]})\n ds = ray.data.from_pandas([df, df2])\n ds._set_uuid(\"data\")\n ds.write_json(data_path, filesystem=fs)\n ds2 = ray.data.read_json(data_path, parallelism=2, filesystem=fs)\n ds2df = ds2.to_pandas()\n assert pd.concat([df, df2], ignore_index=True).equals(ds2df)\n # Test metadata ops.\n for b" }, { "id": 314959, "commit_id": "21d28dd35629a7f4fc086bf9ff4f65ee9270873b", "repo": "core", "path": "homeassistant/components/usgs_earthquakes_feed/geo_location.py", "file_name": "geo_location.py", "fun_name": "_remove_entity", "commit_message": "Migrate usgs_earthquakes_feed to async library (#68370)\n\n* use new async integration library\r\n\r\n* migrate to new async integration library\r\n\r\n* updated unit tests\r\n\r\n* updated logger\r\n\r\n* fix tests and improve test coverage\r\n\r\n* fix test\r\n\r\n* fix requirements\r\n\r\n* time control to fix tests", "code": "async def _remove_entity(self, external_id):\n \n async_dispatcher_send(self._hass, SIGNAL_DELETE_ENTITY.format(external_id))\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "async def _remove_entity(self, external_id):\n \n async_dispatcher_send(self._hass, SIGNAL_DELETE_ENTITY.format(ext" }, { "id": 104370, "commit_id": "52b3ba8a3173adb0a4d6411f2b4978551672e450", "repo": "datasets", "path": "metrics/mahalanobis/mahalanobis.py", "file_name": "mahalanobis.py", "fun_name": "_info", "commit_message": "Add Mahalanobis distance metric (#3794)\n\n* Add Mahalanobis class metric\r\n\r\n* reformat code\r\n\r\n* try to fix tests\r\n\r\n* reformat file with black\r\n\r\n* fix metric example\r\n\r\n* reformat with black\r\n\r\n* running isort\r\n\r\n* fix flake8\r\n\r\n* change assert to ValueError\r\n\r\n* change metric's features\r\n\r\n* Update metrics/mahalanobis/mahalanobis.py\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* PR feedback\r\n\r\n* Update metrics/mahalanobis/mahalanobis.py\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def _info(self):\n return datasets.MetricInfo(\n description=_DESCRIPTION,\n citation=_CITATION,\n inputs_description=_KWARGS_DESCRIPTION,\n features=datasets.Features(\n {\n \"X\": datasets.Sequence(datasets.Value(\"float\", id=\"sequence\"), id=\"X\"),\n }\n ),\n )\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 133, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 11, "token_counts": 55, "n_ast_nodes": 88, "n_identifiers": 15, "random_cut": "def _info(self):\n return datasets.MetricInfo(\n description=_DESCRIPTION,\n citation=_CITATION,\n inputs_description=_KWARGS_DESCRIPTION,\n features=datasets.Fe" }, { "id": 39432, "commit_id": "9d7d8212292b05605bb2b7c5c425c107d4266e8c", "repo": "recommenders", "path": "tests/unit/recommenders/evaluation/test_python_evaluation_time_performance.py", "file_name": "test_python_evaluation_time_performance.py", "fun_name": "test_python_map_at_k", "commit_message": "Improved time of generating synthedic data, updated pytest fixtures, removed unused lines, updated benchmark results", "code": "def test_python_map_at_k(rating_true, rating_pred):\n with Timer() as t:\n map_at_k(\n rating_true=rating_true,\n rating_pred=rating_pred,\n col_prediction=DEFAULT_PREDICTION_COL,\n k=10,\n )\n assert t.interval < 29.90376154\n\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 80, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 9, "token_counts": 41, "n_ast_nodes": 61, "n_identifiers": 10, "random_cut": "def test_python_map_at_k(rating_true, rating_pred):\n with Timer() as t:\n map_at_k(\n " }, { "id": 179813, "commit_id": "ef533b23321de5a37f81f9d5b5c60f8ea8e620a7", "repo": "gradio", "path": "demo/kinematics_blocks/run.py", "file_name": "run.py", "fun_name": "plot", "commit_message": "kinematics block", "code": "def plot(v, a):\n g = 9.81\n theta = a/180*3.14\n tmax = ((2 * v) * np.sin(theta)) / g\n timemat = tmax*np.linspace(0,1,40)[:,None]\n\n x = ((v * timemat) * np.cos(theta))\n y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat ** 2))\n \n fig = plt.figure()\n plt.scatter(x=x, y=y, marker='.')\n plt.xlim(0, 100)\n plt.ylim(0, 60)\n return fig\n\nblock = gr.Blocks()\n\nwith block:\n gr.Markdown(\"Let's do some kinematics! Choose the speed and angle to see the trajectory.\")\n \n with gr.Row():\n speed = gr.Slider(25, min=1, max=30,label=\"Speed\")\n angle = gr.Slider(45, min=0, max=90, label=\"Angle\")\n output = gr.Image(type=\"plot\")\n btn = gr.Button(\"Run\")\n btn.click(plot, [speed, angle], output)\n\nblock.launch()\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 163, "n_words": 97, "vocab_size": 70, "complexity": 1, "nloc": 12, "token_counts": 147, "n_ast_nodes": 383, "n_identifiers": 38, "random_cut": "def plot(v, a):\n g = 9.81\n theta = a/180*3.14\n tmax = ((2 * v) * np.sin(theta)) / g\n timemat = tmax*np.linspace(0,1,40)[:,None]\n\n x = ((v * timemat) * np.cos(theta))\n y = ((v * timemat) * np.sin(theta))" }, { "id": 291598, "commit_id": "33cd59d3c2e1f945c16b39d929349e3eeb4cfb9a", "repo": "core", "path": "homeassistant/components/twinkly/light.py", "file_name": "light.py", "fun_name": "async_update_movies", "commit_message": "Add Twinkly effects (#82861)\n\n* Add Twinkly effects\r\n\r\n* Remove spurious comment", "code": "async def async_update_movies(self) -> None:\n \n movies = await self._client.get_saved_movies()\n _LOGGER.debug(\"Movies: %s\", movies)\n if \"movies\" in movies:\n self._movies = movies[\"movies\"]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 58, "n_words": 19, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 39, "n_ast_nodes": 72, "n_identifiers": 8, "random_cut": "async def async_update_movies(self) -> None:\n \n movies = await self._client.get_saved_movies()\n _LOGGER.debug(\"Movies: %s\", movies)\n if \"movie" }, { "id": 19845, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/cli/req_command.py", "file_name": "req_command.py", "fun_name": "warn_if_run_as_root", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def warn_if_run_as_root() -> None:\n \n if running_under_virtualenv():\n return\n if not hasattr(os, \"getuid\"):\n return\n # On Windows, there are no \"system managed\" Python packages. Installing as\n # Administrator via pip is the correct way of updating system environments.\n #\n # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform\n # checks: https://mypy.readthedocs.io/en/stable/common_issues.html\n if sys.platform == \"win32\" or sys.platform == \"cygwin\":\n return\n\n if os.getuid() != 0:\n return\n\n logger.warning(\n \"Running pip as the 'root' user can result in broken permissions and \"\n \"conflicting behaviour with the system package manager. \"\n \"It is recommended to use a virtual environment instead: \"\n \"https://pip.pypa.io/warnings/venv\"\n )\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 191, "n_words": 99, "vocab_size": 77, "complexity": 6, "nloc": 21, "token_counts": 56, "n_ast_nodes": 114, "n_identifiers": 9, "random_cut": "def warn_if_run_as_root() -> None:\n \n if running_under_virtualenv():\n return\n if not hasattr(os, \"getuid\"):\n return\n # On Windows, there are no \"system managed\" Python packages. Installing as\n # Administrator via pip is the correct way of updating system environments.\n #\n # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform\n # checks: https://mypy.readthedocs.io/en/stable/common_issues.html\n if sys.platform == \"win32\" or sys.platform == \"cygwin\":\n return" }, { "id": 283333, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "terminal.py", "file_name": "terminal.py", "fun_name": "call_etf", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def call_etf(self, _):\n \n from openbb_terminal.etf.etf_controller import ETFController\n\n self.queue = self.load_class(ETFController, self.queue)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 47, "n_identifiers": 9, "random_cut": "def call_etf(self, _):\n \n from openbb_terminal.etf.etf_controller import ETFController\n\n self.queue = self.load" }, { "id": 320709, "commit_id": "f6a365172afe127a4ba770e14569f2d3cd7569b4", "repo": "qutebrowser", "path": "scripts/dev/update_3rdparty.py", "file_name": "update_3rdparty.py", "fun_name": "find_pdfjs_asset", "commit_message": "Use legacy PDF.js build for macOS/Windows releases\n\nFixes #7108", "code": "def find_pdfjs_asset(assets, legacy):\n \n for asset in assets:\n name = asset[\"name\"]\n if (\n name.startswith(\"pdfjs-\") and\n name.endswith(\"-dist.zip\") and\n name.endswith(\"-legacy-dist.zip\") == legacy\n ):\n return asset\n raise Exception(f\"No pdfjs found in {assets}\")\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 102, "n_words": 28, "vocab_size": 25, "complexity": 5, "nloc": 10, "token_counts": 53, "n_ast_nodes": 97, "n_identifiers": 8, "random_cut": "def find_pdfjs_asset(assets, legacy):\n \n for asset in assets:\n name = asset[\"name\"]\n if (\n name.startswith(\"pdfjs-\") and\n name.endswith(\"-dist.zip\") and\n name.endswith(\"-legacy-dist.zip\") == legacy\n ):\n return a" }, { "id": 207648, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_extended_bodyclass_template_delete_selected_confirmation", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_extended_bodyclass_template_delete_selected_confirmation(self):\n \n group = Group.objects.create(name=\"foogroup\")\n post_data = {\n \"action\": \"delete_selected\",\n \"selected_across\": \"0\",\n \"index\": \"0\",\n \"_selected_action\": group.id,\n }\n response = self.client.post(reverse(\"admin:auth_group_changelist\"), post_data)\n self.assertEqual(response.context[\"site_header\"], \"Django administration\")\n self.assertContains(response, \"bodyclass_consistency_check \")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 120, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 11, "token_counts": 76, "n_ast_nodes": 140, "n_identifiers": 16, "random_cut": "def test_extended_bodyclass_template_delete_selected_confirmation(self):\n \n group = Group.objects.create(name=\"foogroup\")\n post_data = {\n \"action\": \"delete_selected\",\n " }, { "id": 105172, "commit_id": "599403601739e7a73e8ebbc8653d246e07207265", "repo": "datasets", "path": "tests/test_arrow_dataset.py", "file_name": "test_arrow_dataset.py", "fun_name": "test_concatenate_with_indices_from_disk", "commit_message": "Optimize contiguous shard and select (#4466)\n\n* optimize contiguous shard and select\r\n\r\n* minor\r\n\r\n* support iterators (and therefore generators)\r\n\r\n* comments + docstrings", "code": "def test_concatenate_with_indices_from_disk(self, in_memory):\n data1, data2, data3 = {\"id\": [0, 1, 2] * 2}, {\"id\": [3, 4, 5] * 2}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1),\n Dataset.from_dict(data2, info=info2),\n Dataset.from_dict(data3),\n )\n dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)\n dset1, dset2, dset3 = (\n dset1.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, \"i1.arrow\")),\n dset2.select([2, 1, 0], indices_cache_file_name=os.path.join(tmp_dir, \"i2.arrow\")),\n dset3.select([1, 0], indices_cache_file_name=os.path.join(tmp_dir, \"i3.arrow\")),\n )\n\n with concatenate_datasets([dset3, dset2, dset1]) as dset_concat:\n self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))\n self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))\n self.assertListEqual(dset_concat[\"id\"], [7, 6, 5, 4, 3, 2, 1, 0])\n # in_memory = False:\n # 3 cache files for the dset_concat._data table, and 1 for the dset_concat._indices_table\n # There is only 1 for the indices tables (i1.arrow)\n # Indeed, the others are brought to memory since an offset is applied to them.\n # in_memory = True:\n # 1 cache file for i1.arrow since both dset_concat._data and dset_concat._indices are in memory\n self.assertEqual(len(dset_concat.cache_files), 1 if in_memory else 3 + 1)\n self.assertEqual(dset_concat.info.description, \"Dataset2\\n\\nDataset1\")\n del dset1, dset2, dset3\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 532, "n_words": 173, "vocab_size": 110, "complexity": 2, "nloc": 23, "token_counts": 347, "n_ast_nodes": 523, "n_identifiers": 31, "random_cut": "def test_concatenate_with_indices_from_disk(self, in_memory):\n data1, data2, data3 = {\"id\": [0, 1, 2] * 2}, {\"id\": [3, 4, 5] * 2}, {\"id\": [6, 7]}\n info1 = DatasetInfo(description=\"Dataset1\")\n info2 = DatasetInfo(description=\"Dataset2\")\n with tempfile.TemporaryDirectory() as tmp_dir:\n dset1, dset2, dset3 = (\n Dataset.from_dict(data1, info=info1),\n Dataset.from_dict(data2, info=info2),\n Dataset.from_dict(data3),\n )\n dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)\n dset1, dset2, dset3 = (\n dset1.select([2, " }, { "id": 151748, "commit_id": "afc00bc30a94abd64fee000535e66287fd91595f", "repo": "freqtrade", "path": "freqtrade/rpc/api_server/ws/channel.py", "file_name": "channel.py", "fun_name": "_calc_send_limit", "commit_message": "log warning if channel too far behind, add docstrings to message stream", "code": "def _calc_send_limit(self):\n \n\n # Only update if we have enough data\n if len(self._send_times) == self._send_times.maxlen:\n # At least 1s or twice the average of send times, with a\n # maximum of 3 seconds per message\n self._send_high_limit = min(max(self.avg_send_time * 2, 1), 3)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 95, "n_words": 41, "vocab_size": 37, "complexity": 2, "nloc": 3, "token_counts": 39, "n_ast_nodes": 66, "n_identifiers": 9, "random_cut": "def _calc_send_limit(self):\n \n\n # Only update if we have enough data\n if len(self._send_times) == self._send_times.maxlen:\n # At least 1s or twice the average of send times, with a\n # maximum of 3 seconds per message\n self._send_hig" }, { "id": 281511, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/portfolio/portfolio_analysis/pa_controller.py", "file_name": "pa_controller.py", "fun_name": "call_group", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def call_group(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"group\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Displays portfolio grouped by a given column\",\n )\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-g\")\n parser.add_argument(\n \"-g\",\n \"--group\",\n type=str,\n dest=\"group\",\n default=\"Ticker\",\n choices=self.portfolio.columns,\n help=\"Column to group by\",\n )\n parser.add_argument(\n \"-a\",\n \"--allocation\",\n action=\"store_true\",\n default=False,\n help=\"Add allocation column in % to dataframe\",\n dest=\"allocation\",\n )\n\n # The following arguments will be used in a later PR for customizable 'reports'\n\n # The --func flag will need to be tested that it exists for pandas groupby\n # parser.add_argument(\"-f\",\n # \"--func\",\n # type=str,\n # dest=\"function\",\n # help=\"Aggregate function to apply to groups\"\n # )\n # parser.add_argument(\"-d\",\n # \"--display\",\n # default = None,\n # help = \"Columns to display\",\n # dest=\"cols\")\n\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if ns_parser:\n if \"value\" in self.portfolio.columns:\n portfolio_view.display_group_holdings(\n portfolio=self.portfolio,\n group_column=ns_parser.group,\n allocation=ns_parser.allocation,\n )\n else:\n console.print(\n \"'value' column not in portfolio. \"\n \"Either add manually or load without --no_last_price flag\\n\"\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 863, "n_words": 146, "vocab_size": 106, "complexity": 5, "nloc": 39, "token_counts": 165, "n_ast_nodes": 287, "n_identifiers": 31, "random_cut": "def call_group(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"group\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n " }, { "id": 20654, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/testing.py", "file_name": "testing.py", "fun_name": "assertRaisesParseException", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def assertRaisesParseException(self, exc_type=ParseException, msg=None):\n with self.assertRaises(exc_type, msg=msg):\n yield\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 42, "n_identifiers": 6, "random_cut": "def assertRaisesParseException(self, exc_type=ParseException, msg=None):\n with self.assertRaises(exc_type, msg=msg):\n yield\n" }, { "id": 204154, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/humanize/templatetags/humanize.py", "file_name": "humanize.py", "fun_name": "apnumber", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def apnumber(value):\n \n try:\n value = int(value)\n except (TypeError, ValueError):\n return value\n if not 0 < value < 10:\n return value\n return (\n _(\"one\"),\n _(\"two\"),\n _(\"three\"),\n _(\"four\"),\n _(\"five\"),\n _(\"six\"),\n _(\"seven\"),\n _(\"eight\"),\n _(\"nine\"),\n )[value - 1]\n\n\n# Perform the comparison in the default time zone when USE_TZ = True\n# (unless a specific time zone has been applied with the |timezone filter).\n@register.filter(expects_localtime=True)", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@register.filter(expects_localtime=True)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 160, "n_words": 61, "vocab_size": 49, "complexity": 3, "nloc": 18, "token_counts": 86, "n_ast_nodes": 165, "n_identifiers": 9, "random_cut": "def apnumber(value):\n \n try:\n value = int(value)\n except (TypeError, ValueError):\n return value\n if not 0 < value < 10:\n return value\n return (\n _(\"one\"),\n _(\"two\"),\n _(\"three\"),\n _(\"four\"),\n _(\"five\"),\n _(\"six\"),\n _(\"seven\"),\n _(\"eight\"),\n _(\"nine\"),\n )[value - 1]\n\n\n# Perform the comparison in the default time zone " }, { "id": 183576, "commit_id": "7f27e70440c177b2a047b7f74a78ed5cd5b4b596", "repo": "textual", "path": "src/textual/_terminal_modes.py", "file_name": "_terminal_modes.py", "fun_name": "mode_is_supported", "commit_message": "[terminal buffering] Address PR feedback", "code": "def mode_is_supported(self) -> bool:\n \n return self.report_parameter in MODE_REPORTS_PARAMETERS_INDICATING_SUPPORT\n\n\nMODE_REPORTS_PARAMETERS_INDICATING_SUPPORT = frozenset(\n {\n ModeReportParameter.Set,\n ModeReportParameter.Reset,\n ModeReportParameter.PermanentlySet,\n ModeReportParameter.PermanentlyReset,\n }\n)\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 64, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 14, "n_ast_nodes": 57, "n_identifiers": 11, "random_cut": "def mode_is_supported(self) -> bool:\n \n return self.report_parameter in MODE_REPORTS_PARAMETERS_INDICATING_SUPPORT\n\n\nMODE_REPOR" }, { "id": 222381, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/datetime.py", "file_name": "datetime.py", "fun_name": "_days_before_year", "commit_message": "add python 3.10.4 for windows", "code": "def _days_before_year(year):\n \"year -> number of days before January 1st of year.\"\n y = year - 1\n return y*365 + y//4 - y//100 + y//400\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 33, "n_words": 25, "vocab_size": 22, "complexity": 1, "nloc": 4, "token_counts": 18, "n_ast_nodes": 45, "n_identifiers": 3, "random_cut": "def _days_before_year(year):\n \"year -> number of days before January 1st of year.\"\n y = year - 1\n return y*365 + y//4 - y//100 + y//400\n" }, { "id": 297669, "commit_id": "6a8d9a91cb3fd5a55f13de54ea5db23125e72632", "repo": "core", "path": "tests/components/matter/test_init.py", "file_name": "test_init.py", "fun_name": "listen_ready_timeout_fixture", "commit_message": "Fix matter websocket reconnect (#84192)", "code": "def listen_ready_timeout_fixture() -> Generator[int, None, None]:\n \n with patch(\n \"homeassistant.components.matter.LISTEN_READY_TIMEOUT\", new=0\n ) as timeout:\n yield timeout\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 38, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 6, "random_cut": "def listen_ready_timeout_fixture() -> Generator[int, None, None]:\n \n with patch(\n \"homeassistant.components.matter.LISTEN_READY_TIMEOUT\", new=0\n ) as timeout:\n yield timeout\n\n" }, { "id": 91424, "commit_id": "284e980df0018f8baee659999268bdd4c7d08255", "repo": "sentry", "path": "tests/sentry/models/test_groupmeta.py", "file_name": "test_groupmeta.py", "fun_name": "test_get_value_bulk", "commit_message": "ref: replace self.assertRaises with pytest.raises (#35685)\n\n* add flake8 plugin to detect assertRaises\r\n\r\n* ref: replace self.assertRaises with pytest.raises\r\n\r\n* non-sed fixes", "code": "def test_get_value_bulk(self):\n with pytest.raises(GroupMeta.CacheNotPopulated):\n GroupMeta.objects.get_value_bulk([self.group], \"foo\")\n\n GroupMeta.objects.create(group=self.group, key=\"foo\", value=\"bar\")\n with pytest.raises(GroupMeta.CacheNotPopulated):\n GroupMeta.objects.get_value_bulk([self.group], \"foo\")\n\n GroupMeta.objects.populate_cache([self.group])\n result = GroupMeta.objects.get_value_bulk([self.group], \"foo\")\n assert result == {self.group: \"bar\"}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 86, "n_words": 23, "vocab_size": 16, "complexity": 1, "nloc": 9, "token_counts": 111, "n_ast_nodes": 186, "n_identifiers": 14, "random_cut": "def test_get_value_bulk(self):\n with pytest.raises(GroupMeta.CacheNotPopulated):\n GroupMeta.objects.get_value_bulk([self.group], \"foo\")\n\n GroupMeta.objects.create(group=self.group, key=\"foo\", value=\"bar\")\n with pytest.raises(GroupMeta.CacheNotPopulated):\n GroupMet" }, { "id": 282470, "commit_id": "f8fc7d00ffe6b22b2e2a951fb887f1312644d32f", "repo": "OpenBBTerminal", "path": "tests/conftest.py", "file_name": "conftest.py", "fun_name": "brotli_check", "commit_message": "Updated uninstall brotli wording (#1333)", "code": "def brotli_check():\n installed_packages = pkg_resources.working_set\n for item in list(installed_packages):\n if \"brotli\" in str(item).lower():\n pytest.exit(\"Uninstall brotli and brotlipy before running tests\")\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 43, "n_words": 20, "vocab_size": 19, "complexity": 3, "nloc": 5, "token_counts": 35, "n_ast_nodes": 62, "n_identifiers": 10, "random_cut": "def brotli_check():\n installed_packages = pkg_resources.working_set\n for item in list(installed_packages):\n if \"brotli\" in str(item).lower():\n pytest.exit(\"Uninstall brotli and brotlipy before running tests\")\n\n" }, { "id": 262648, "commit_id": "fdeefcc6126dfe1382696d9105992295883be0a7", "repo": "TTS", "path": "TTS/tts/utils/text/phonemizers/espeak_wrapper.py", "file_name": "espeak_wrapper.py", "fun_name": "backend", "commit_message": "Handle espeak 1.48.15 (#2203)", "code": "def backend(self, backend):\n if backend not in [\"espeak\", \"espeak-ng\"]:\n raise Exception(\"Unknown backend: %s\" % backend)\n self._ESPEAK_LIB = backend\n self._ESPEAK_VER = get_espeakng_version() if backend == \"espeak-ng\" else get_espeak_version()\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 27, "vocab_size": 23, "complexity": 3, "nloc": 5, "token_counts": 44, "n_ast_nodes": 76, "n_identifiers": 7, "random_cut": "def backend(self, backend):\n if backend not in [\"espeak\", \"espeak-ng\"]:\n raise Exception(\"Unknown backend: %s\" % backend)\n self._ESPEAK_LIB = backend\n " }, { "id": 68315, "commit_id": "a896895a9e76a68ab055ce7871bb9d181d3fac15", "repo": "erpnext", "path": "erpnext/selling/report/customer_acquisition_and_loyalty/customer_acquisition_and_loyalty.py", "file_name": "customer_acquisition_and_loyalty.py", "fun_name": "get_data_by_territory", "commit_message": "fix: bulk fix (~330) missing translations", "code": "def get_data_by_territory(filters, common_columns):\n\tcolumns = [\n\t\t{\n\t\t\t\"label\": _(\"Territory\"),\n\t\t\t\"fieldname\": \"territory\",\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"options\": \"Territory\",\n\t\t\t\"width\": 150,\n\t\t}\n\t]\n\tcolumns += common_columns\n\n\tcustomers_in = get_customer_stats(filters, tree_view=True)\n\n\tterritory_dict = {}\n\tfor t in frappe.db.sql(\n\t\t, as_dict=1\n\t):\n\t\tterritory_dict.update({t.name: {\"parent\": t.parent_territory, \"is_group\": t.is_group}})\n\n\tdepth_map = frappe._dict()\n\tfor name, info in territory_dict.items():\n\t\tdefault = depth_map.get(info[\"parent\"]) + 1 if info[\"parent\"] else 0\n\t\tdepth_map.setdefault(name, default)\n\n\tdata = []\n\tfor name, indent in depth_map.items():\n\t\tcondition = customers_in.get(name)\n\t\tnew = customers_in[name][\"new\"] if condition else [0, 0.0]\n\t\trepeat = customers_in[name][\"repeat\"] if condition else [0, 0.0]\n\t\ttemp = {\n\t\t\t\"territory\": name,\n\t\t\t\"parent_territory\": territory_dict[name][\"parent\"],\n\t\t\t\"indent\": indent,\n\t\t\t\"new_customers\": new[0],\n\t\t\t\"repeat_customers\": repeat[0],\n\t\t\t\"total\": new[0] + repeat[0],\n\t\t\t\"new_customer_revenue\": new[1],\n\t\t\t\"repeat_customer_revenue\": repeat[1],\n\t\t\t\"total_revenue\": new[1] + repeat[1],\n\t\t\t\"bold\": 0 if indent else 1,\n\t\t}\n\t\tdata.append(temp)\n\n\tloop_data = sorted(data, key=lambda k: k[\"indent\"], reverse=True)\n\n\tfor ld in loop_data:\n\t\tif ld[\"parent_territory\"]:\n\t\t\tparent_data = [x for x in data if x[\"territory\"] == ld[\"parent_territory\"]][0]\n\t\t\tfor key in parent_data.keys():\n\t\t\t\tif key not in [\"indent\", \"territory\", \"parent_territory\", \"bold\"]:\n\t\t\t\t\tparent_data[key] += ld[key]\n\n\treturn columns, data, None, None, None, 1\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 119, "n_words": 166, "vocab_size": 113, "complexity": 14, "nloc": 47, "token_counts": 385, "n_ast_nodes": 613, "n_identifiers": 41, "random_cut": "def get_data_by_territory(filters, common_columns):\n\tcolumns = [\n\t\t{\n\t\t\t\"label\": _(\"Territory\"),\n\t\t\t\"fieldname\": \"territory\",\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"options\": \"Territory\",\n\t\t\t\"width\": 150,\n\t\t}\n\t]\n\tcolumns += common_columns\n\n\tcustomers_in = get_customer_stats(filters, tree_view=True)\n\n\tterritory_dict = {}\n\tfor t in frappe.db.sql(\n\t\t, as_dict=1\n\t):\n\t\tterritory_dict.update({t.name: {\"parent\": t.parent_territory, \"is_group\": t.is_group}})\n\n\tdepth_map = frappe._dict()\n\tfor name, info in te" }, { "id": 10786, "commit_id": "f4f8f314481dcdec3bc8d322012a1942303d768f", "repo": "jina", "path": "tests/unit/test_helper.py", "file_name": "test_helper.py", "fun_name": "test_random_port_unique", "commit_message": "fix: random port assignment (#4139)\n\n* fix: assign only unique ports\r\n\r\n* fix: check for none ports\r\n\r\n* fix: use port as int\r\n\r\n* fix: change debug out\r\n\r\n* fix: add more debug out\r\n\r\n* fix: protect partiald port finding\r\n\r\n* fix: track partiald ports\r\n\r\n* fix: move partial ports up\r\n\r\n* fix: lock as cls var\r\n\r\n* fix: more debug stuff\r\n\r\n* fix: more log output\r\n\r\n* fix: remove get\r\n\r\n* fix: try again on docker fail\r\n\r\n* Revert \"fix: try again on docker fail\"\r\n\r\nThis reverts commit c2947ee5c824fb8133319c26be4eb3de36ae7925.\r\n\r\n* fix: add more debug\r\n\r\n* fix: try connect with socket\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: set min port env in ci\r\n\r\n* fix: set min port in jinad\r\n\r\n* fix: port helper test\r\n\r\n* fix: keep track of port in\r\n\r\n* fix: clean up\r\n\r\n* fix: remove connect check\r\n\r\n* fix: remove psutil\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: seperate jinad port range\r\n\r\n* fix: use asyncio to run jinad pea\r\n\r\n* fix: kill jinad process with fire\r\n\r\n* fix: remove codecov\r\n\r\n* fix: docker compose tests\r\n\r\n* Revert \"fix: remove codecov\"\r\n\r\nThis reverts commit 31d0d41e882699656f5b109ff7d747bf74b47971.\r\n\r\n* fix: upgrade codecov action\r\n\r\n* fix: clean up\r\n\r\n* fix: remove codecov\r\n\r\n* fix: readd code cov\r\n\r\n* fix: increase timeout for k8s test\r\n\r\n* fix: wrong cov tag\r\n\r\n* Revert \"fix: wrong cov tag\"\r\n\r\nThis reverts commit 00ce072dd1eb5a84b19c0d3f3eafb5ebf8c1ae53.\r\n\r\n* Revert \"fix: increase timeout for k8s test\"\r\n\r\nThis reverts commit 9b0e3134489fc90953beed0c2ff1393e0abcf26d.\r\n\r\n* fix: reset ci file\r\n\r\n* fix: readd port config\r\n\r\n* fix: use run_async helper again\r\n\r\n* fix: dont touch import\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def test_random_port_unique(config):\n reset_ports()\n assert os.environ['JINA_RANDOM_PORT_MIN']\n generated_ports = set()\n for i in range(1000):\n port = random_port()\n assert port not in generated_ports\n assert int(os.environ['JINA_RANDOM_PORT_MIN']) <= port <= 65535\n generated_ports.add(port)\n\n\n@pytest.fixture", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 66, "n_words": 28, "vocab_size": 20, "complexity": 2, "nloc": 9, "token_counts": 58, "n_ast_nodes": 103, "n_identifiers": 15, "random_cut": "def test_random_port_unique(config):\n reset_ports()\n assert os.environ['JINA_RANDOM_PORT_MIN']\n generated_ports = set()\n for i in range(1000):\n port = random_port()\n assert port not in generated_ports\n assert int(os.environ['JINA_RANDOM_PORT_MIN']) <= port <= 65535\n generated_ports.add(port)\n\n\n@pyte" }, { "id": 154130, "commit_id": "8521bbe63f15fbfc6c86a9d5a3c99112738ce7fd", "repo": "modin", "path": "modin/test/exchange/dataframe_protocol/pandas/test_protocol.py", "file_name": "test_protocol.py", "fun_name": "test_simple_import", "commit_message": "FIX-#4652: Support categorical data in `from_dataframe` (#4737)\n\nSigned-off-by: Karthik Velayutham ", "code": "def test_simple_import():\n modin_df = pd.DataFrame(test_data[\"int_data\"])\n eval_df_protocol(modin_df)\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 34, "n_identifiers": 6, "random_cut": "def test_simple_import():\n modin_df = pd.DataFrame(test_data[\"int_data\"])\n eval_df_protocol(modin_df" }, { "id": 242931, "commit_id": "4e12ccc63e40a9b567af3b2e1ac821f5157cddc6", "repo": "Pillow", "path": "Tests/test_image_point.py", "file_name": "test_image_point.py", "fun_name": "test_sanity", "commit_message": "Support more affine expression forms in Image.point\n\nIn modes I and F, Image.point only supported affine expressions of the\nforms (lambda x:) x * a, x + a, and x * a + b. Expressions like 1 - x\nhad to be written x * -1 + 1.\n\nThis rewrite, though still limited to affine transformations, supports\nfar more expression forms, including 1 - x, (2 * x + 1) / 3, etc.", "code": "def test_sanity():\n im = hopper()\n\n with pytest.raises(ValueError):\n im.point(list(range(256)))\n im.point(list(range(256)) * 3)\n im.point(lambda x: x)\n im.point(lambda x: x * 1.2)\n\n im = im.convert(\"I\")\n with pytest.raises(ValueError):\n im.point(list(range(256)))\n im.point(lambda x: x * 1)\n im.point(lambda x: x + 1)\n im.point(lambda x: x * 1 + 1)\n im.point(lambda x: 0.1 + 0.2 * x)\n im.point(lambda x: -x)\n im.point(lambda x: x - 0.5)\n im.point(lambda x: 1 - x / 2)\n im.point(lambda x: (2 + x) / 3)\n im.point(lambda x: 0.5)\n with pytest.raises(TypeError):\n im.point(lambda x: x * x)\n with pytest.raises(TypeError):\n im.point(lambda x: 1 / x)\n with pytest.raises(TypeError):\n im.point(lambda x: x // 2)\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 186, "n_words": 95, "vocab_size": 30, "complexity": 1, "nloc": 25, "token_counts": 262, "n_ast_nodes": 430, "n_identifiers": 12, "random_cut": "def test_sanity():\n im = hopper()\n\n with pytest.raises(ValueError):\n im.point(list(range(256)))\n im.point(list(range(256)) * 3)\n im.point(lambda x: x)\n im.point(lambda x: x * 1.2)\n\n im = im.convert(\"I\")\n with pytest.raises(ValueE" }, { "id": 54773, "commit_id": "262f05bece5560d0e8cfc36daa6403a67239f825", "repo": "prefect", "path": "tests/test_deployments.py", "file_name": "test_deployments.py", "fun_name": "test_multiple_specs_from_yaml", "commit_message": "Update tests", "code": "async def test_multiple_specs_from_yaml(self):\n specs = deployment_specs_from_yaml(TEST_FILES_DIR / \"multiple-deployments.yaml\")\n assert len(specs) == 2\n specs_by_name = {spec.name: spec for spec in specs}\n assert set(specs_by_name.keys()) == {\n \"hello-sun-deployment\",\n \"hello-moon-deployment\",\n }\n\n sun_deploy = specs_by_name[\"hello-sun-deployment\"]\n moon_deploy = specs_by_name[\"hello-moon-deployment\"]\n assert sun_deploy.flow_location == str(TEST_FILES_DIR / \"multiple_flows.py\")\n assert sun_deploy.flow_name == \"hello-sun\"\n assert moon_deploy.flow_location == str(TEST_FILES_DIR / \"multiple_flows.py\")\n assert moon_deploy.flow_name == \"hello-moon\"\n\n sun_src = specs[sun_deploy]\n moon_src = specs[moon_deploy]\n assert sun_src[\"file\"] == str(TEST_FILES_DIR / \"multiple-deployments.yaml\")\n assert moon_src[\"file\"] == str(TEST_FILES_DIR / \"multiple-deployments.yaml\")\n assert sun_src[\"line\"] == 1\n assert moon_src[\"line\"] == 5\n\n for spec in specs:\n await spec.validate()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 243, "n_words": 85, "vocab_size": 48, "complexity": 3, "nloc": 22, "token_counts": 156, "n_ast_nodes": 269, "n_identifiers": 19, "random_cut": "async def test_multiple_specs_from_yaml(self):\n specs = deployment_specs_from_yaml(TEST_FILES_DIR / \"multiple-deployments.yaml\")\n " }, { "id": 176256, "commit_id": "290ebce534b84f9db20ec58b98cbb170e65a0ba1", "repo": "networkx", "path": "networkx/algorithms/community/tests/test_louvain.py", "file_name": "test_louvain.py", "fun_name": "test_none_weight_param", "commit_message": "Add weights to karate club graph (#5285)\n\nAdd weights to the karate_club_graph.\r\nModifies `non_randomness` and `naive_greedy_modularity_communities` to\r\naccept a `weight` parameter and modifies tests that use the kcg accordingly\r\n\r\nCo-authored-by: Kevin Berry \r\nCo-authored-by: Dan Schult ", "code": "def test_none_weight_param():\n G = nx.karate_club_graph()\n nx.set_edge_attributes(\n G, {edge: i * i for i, edge in enumerate(G.edges)}, name=\"foo\"\n )\n\n part = [\n {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21},\n {16, 4, 5, 6, 10},\n {23, 25, 27, 28, 24, 31},\n {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30},\n ]\n partition1 = louvain_communities(G, weight=None, seed=2)\n partition2 = louvain_communities(G, weight=\"foo\", seed=2)\n partition3 = louvain_communities(G, weight=\"weight\", seed=2)\n\n assert part == partition1\n assert part != partition2\n assert part != partition3\n assert partition2 != partition3\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 157, "n_words": 87, "vocab_size": 65, "complexity": 2, "nloc": 18, "token_counts": 178, "n_ast_nodes": 239, "n_identifiers": 17, "random_cut": "def test_none_weight_param():\n G = nx.karate_club_graph()\n nx.set_edge_attributes(\n G, {edge: i * i for i, edge in enumerate(G.edges)}, name=\"foo\"\n )\n\n part = [\n {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21},\n {16, 4, 5, 6, 10},\n {23, 25, 27, 28, 24, 31},\n {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30},\n ]\n partition1 = louvain_communities(G, weight=None, seed=2)\n partition2 = louvain_communities(G, weight=\"foo\", seed=2)\n partition3 = louvain_communities(G, weight=\"weight\", seed=2" }, { "id": 19802, "commit_id": "9a3b3ce70621af6f9adaa9eeac9cf83fa149319c", "repo": "pipenv", "path": "pipenv/utils/shell.py", "file_name": "shell.py", "fun_name": "find_requirements", "commit_message": "Issue 4993 Add standard pre commit hooks and apply linting. (#4994)\n\n* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.", "code": "def find_requirements(max_depth=3):\n \n i = 0\n for c, _, _ in walk_up(os.getcwd()):\n i += 1\n if i < max_depth:\n r = os.path.join(c, \"requirements.txt\")\n if os.path.isfile(r):\n return r\n\n raise RuntimeError(\"No requirements.txt found!\")\n\n\n# Borrowed from Pew.\n# See https://github.com/berdario/pew/blob/master/pew/_utils.py#L82\n@contextmanager", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "@contextmanager", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 98, "n_words": 38, "vocab_size": 32, "complexity": 4, "nloc": 9, "token_counts": 64, "n_ast_nodes": 113, "n_identifiers": 14, "random_cut": "def find_requirements(max_depth=3):\n \n i = 0\n for c, _, _ in walk_u" }, { "id": 202760, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/db_functions/math/test_sin.py", "file_name": "test_sin.py", "fun_name": "test_integer", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_integer(self):\n IntegerModel.objects.create(small=-20, normal=15, big=-1)\n obj = IntegerModel.objects.annotate(\n small_sin=Sin(\"small\"),\n normal_sin=Sin(\"normal\"),\n big_sin=Sin(\"big\"),\n ).first()\n self.assertIsInstance(obj.small_sin, float)\n self.assertIsInstance(obj.normal_sin, float)\n self.assertIsInstance(obj.big_sin, float)\n self.assertAlmostEqual(obj.small_sin, math.sin(obj.small))\n self.assertAlmostEqual(obj.normal_sin, math.sin(obj.normal))\n self.assertAlmostEqual(obj.big_sin, math.sin(obj.big))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 119, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 13, "token_counts": 140, "n_ast_nodes": 220, "n_identifiers": 20, "random_cut": "def test_integer(self):\n IntegerModel.objects.create(small=-20, normal=15, big=-1)\n obj = IntegerModel.objects.annotate(\n small_sin=Sin(\"small\"),\n normal_sin=Sin(\"normal\"),\n big_sin=Sin(\"big\"),\n ).first()\n self.assertIsInstance(obj.small_sin, float)\n self.assertIsInstance(obj.normal_sin, float)\n self.assertIsInstance(obj.big_sin, float)\n self.assertAlmostEqual(obj.small_sin, math.sin(obj.sma" }, { "id": 29740, "commit_id": "decd505f55d02c616ce5b804c06a71e120d15c15", "repo": "saleor", "path": "saleor/core/tests/test_dataloaders.py", "file_name": "test_dataloaders.py", "fun_name": "test_plugins_manager_loader_loads_requestor_in_plugin", "commit_message": "Add plugin manager promise (#11414)", "code": "def test_plugins_manager_loader_loads_requestor_in_plugin(rf, customer_user, settings):\n settings.PLUGINS = [\"saleor.plugins.tests.sample_plugins.ActivePlugin\"]\n request = rf.request()\n request.user = customer_user\n request.app = None\n\n handler = BaseHandler()\n handler.load_middleware()\n handler.get_response(request)\n manager = get_plugin_manager_promise(request).get()\n plugin = manager.all_plugins.pop()\n\n assert isinstance(plugin.requestor, type(customer_user))\n assert plugin.requestor.id == customer_user.id\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 66, "n_words": 34, "vocab_size": 27, "complexity": 1, "nloc": 12, "token_counts": 90, "n_ast_nodes": 147, "n_identifiers": 22, "random_cut": "def test_plugins_manager_loader_loads_requestor_in_plugin(rf, customer_user, settings):\n settings.PLUGINS = [\"saleor.plugins.tests.sample_plugins.ActivePlugin\"]\n request = rf.request()\n request.user = customer_user\n request.app = None\n\n handler = BaseHandler()\n handler.load_middleware()\n handler.get_response(request)\n manager = get_plugin_manager_pro" }, { "id": 172633, "commit_id": "8007e450b3178f517b83b0989744c6df38867932", "repo": "calibre-web", "path": "cps/comic.py", "file_name": "comic.py", "fun_name": "_extract_Cover_from_archive", "commit_message": "Bugfix for cbr support without comicapi", "code": "def _extract_Cover_from_archive(original_file_extension, tmp_file_name, rarExecutable):\n cover_data = extension = None\n if original_file_extension.upper() == '.CBZ':\n cf = zipfile.ZipFile(tmp_file_name)\n for name in cf.namelist():\n ext = os.path.splitext(name)\n if len(ext) > 1:\n extension = ext[1].lower()\n if extension in COVER_EXTENSIONS:\n cover_data = cf.read(name)\n break\n elif original_file_extension.upper() == '.CBT':\n cf = tarfile.TarFile(tmp_file_name)\n for name in cf.getnames():\n ext = os.path.splitext(name)\n if len(ext) > 1:\n extension = ext[1].lower()\n if extension in COVER_EXTENSIONS:\n cover_data = cf.extractfile(name).read()\n break\n elif original_file_extension.upper() == '.CBR' and use_rarfile:\n try:\n rarfile.UNRAR_TOOL = rarExecutable\n cf = rarfile.RarFile(tmp_file_name)\n for name in cf.namelist():\n ext = os.path.splitext(name)\n if len(ext) > 1:\n extension = ext[1].lower()\n if extension in COVER_EXTENSIONS:\n cover_data = cf.read(name)\n break\n except Exception as ex:\n log.debug('Rarfile failed with error: %s', ex)\n return cover_data, extension\n\n", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 509, "n_words": 115, "vocab_size": 51, "complexity": 15, "nloc": 34, "token_counts": 248, "n_ast_nodes": 408, "n_identifiers": 32, "random_cut": "def _extract_Cover_from_archive(original_file_extension, tmp_file_name, rarExecutable):\n cover_data = extension = None\n if original_file_extension.upper() == '.CBZ':\n cf = zipfile.ZipFile(tmp_file_name)\n for name in cf.namelist():\n ext = os.path.splitext(name)\n if len(ext) > 1:\n extension = ext[1].lower()\n if extension in COVER_EXTENSIONS:\n cover_data = " }, { "id": 154081, "commit_id": "02363589aa5105e091fa3d790b29cddf94cc8118", "repo": "modin", "path": "modin/config/__main__.py", "file_name": "__main__.py", "fun_name": "print_config_help", "commit_message": "REFACTOR-#4629: Add type annotations to `modin/config` (#4685)\n\nSigned-off-by: Karthik Velayutham ", "code": "def print_config_help() -> None:\n \n for objname in sorted(globals()):\n obj = globals()[objname]\n if isinstance(obj, type) and issubclass(obj, Parameter) and not obj.is_abstract:\n print(f\"{obj.get_help()}\\n\\tCurrent value: {obj.get()}\") # noqa: T201\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 58, "n_words": 26, "vocab_size": 25, "complexity": 5, "nloc": 6, "token_counts": 50, "n_ast_nodes": 108, "n_identifiers": 13, "random_cut": "def print_config_help() -> None:\n \n for objname in sorted(globals()):\n obj = globals()[objname]\n if isinstance(obj, type) and issubcla" }, { "id": 280875, "commit_id": "041a6cd5a06af3809419d043c2410d5317799d1a", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py", "file_name": "dd_controller.py", "fun_name": "call_reset", "commit_message": "reset different coin from dd controller on crypto (#1118)", "code": "def call_reset(self, _):\n \n self.queue.insert(0, \"dd\")\n if self.current_coin:\n self.queue.insert(0, f\"load {self.current_coin} --source {self.source}\")\n self.queue.insert(0, \"crypto\")\n self.queue.insert(0, \"reset\")\n self.queue.insert(0, \"quit\")\n self.queue.insert(0, \"quit\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 80, "n_words": 20, "vocab_size": 14, "complexity": 2, "nloc": 8, "token_counts": 74, "n_ast_nodes": 139, "n_identifiers": 7, "random_cut": "def call_reset(self, _):\n \n " }, { "id": 260677, "commit_id": "b7d01716216042dda9663f1732d8419e62858a1e", "repo": "scikit-learn", "path": "sklearn/metrics/tests/test_pairwise_distances_reduction.py", "file_name": "test_pairwise_distances_reduction.py", "fun_name": "relative_rounding", "commit_message": "FEA Add support for float32 on `PairwiseDistancesReduction` using Tempita (#23865)\n\n\r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Olivier Grisel ", "code": "def relative_rounding(scalar, n_significant_digits):\n \n if scalar == 0:\n return 0.0\n magnitude = int(floor(log10(abs(scalar)))) + 1\n return round(scalar, n_significant_digits - magnitude)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 38, "n_words": 19, "vocab_size": 18, "complexity": 2, "nloc": 5, "token_counts": 43, "n_ast_nodes": 67, "n_identifiers": 9, "random_cut": "def relative_rounding(scalar, n_significant_digits):\n \n if scalar == 0:\n return 0.0\n magnitude = int(floor(log10(abs(scalar)))) + 1\n return round(scalar, n_signifi" }, { "id": 103676, "commit_id": "bfcd3249930a46c3ba5c53e48e182136809fb6e8", "repo": "kitty", "path": "kittens/ask/main.py", "file_name": "main.py", "fun_name": "draw_screen", "commit_message": "ask kitten: allow having hidden text in the message", "code": "def draw_screen(self) -> None:\n self.cmd.clear_screen()\n msg_lines: List[str] = []\n if self.message:\n for line in self.message.splitlines():\n msg_lines.extend(self.draw_long_text(line))\n y = self.screen_size.rows - len(msg_lines)\n y = max(0, (y // 2) - 2)\n self.print(end='\\r\\n'*y)\n for line in msg_lines:\n if self.replacement_text in line:\n idx = line.find(self.replacement_text)\n x = wcswidth(line[:idx])\n self.replacement_range = Range(x, x + wcswidth(self.replacement_text), y)\n self.print(line)\n y += 1\n if self.screen_size.rows > 2:\n self.print()\n y += 1\n if self.cli_opts.type == 'yesno':\n self.draw_yesno(y)\n else:\n self.draw_choice(y)\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 287, "n_words": 70, "vocab_size": 48, "complexity": 7, "nloc": 23, "token_counts": 181, "n_ast_nodes": 302, "n_identifiers": 30, "random_cut": "def draw_screen(self) -> None:\n self.cmd.clear_screen()\n msg_lines: List[str] = []\n if self.message:\n for line in self.message.splitlines():\n msg_lines.extend(self.draw_long_text(line))\n y = self.screen_size.rows - len(msg_lines)\n y = max(0, (y // 2) - 2)\n self.print(end='\\r\\n'*y)\n for line in msg_lines:\n if self.replacement_text in line:\n idx = line.find(self.replacement_text)\n x = wcswidth(line[:idx])\n self.replacement_range = Range(x" }, { "id": 16076, "commit_id": "7a8a00ecacc1eb719ced2f95c06a472a8b55892c", "repo": "ccxt", "path": "python/ccxt/huobi.py", "file_name": "huobi.py", "fun_name": "cancel_orders", "commit_message": "1.68.88\n\n[ci skip]", "code": "def cancel_orders(self, ids, symbol=None, params={}):\n self.load_markets()\n marketType = None\n marketType, params = self.handle_market_type_and_params('cancelOrder', None, params)\n request = {\n # spot -----------------------------------------------------------\n # 'order-ids': ids.jsoin(','), # max 50\n # 'client-order-ids': ','.join(ids), # max 50\n # contracts ------------------------------------------------------\n # 'order_id': id, # comma separated, max 10\n # 'client_order_id': clientOrderId, # comma separated, max 10\n # 'contract_code': market['id'],\n # 'symbol': market['settleId'],\n }\n method = None\n if marketType == 'spot':\n clientOrderIds = self.safe_value_2(params, 'client-order-id', 'clientOrderId')\n clientOrderIds = self.safe_value_2(params, 'client-order-ids', 'clientOrderIds', clientOrderIds)\n if clientOrderIds is None:\n if isinstance(clientOrderIds, basestring):\n request['order-ids'] = ids\n else:\n request['order-ids'] = ','.join(ids)\n else:\n if isinstance(clientOrderIds, basestring):\n request['client-order-ids'] = clientOrderIds\n else:\n request['client-order-ids'] = ','.join(clientOrderIds)\n params = self.omit(params, ['client-order-id', 'client-order-ids', 'clientOrderId', 'clientOrderIds'])\n method = 'spotPrivatePostV1OrderOrdersBatchcancel'\n else:\n if symbol is None:\n raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol for ' + marketType + ' orders')\n market = self.market(symbol)\n request['contract_code'] = market['id']\n if market['linear']:\n defaultMargin = 'cross' if market['future'] else 'isolated'\n marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)\n if marginType == 'isolated':\n method = 'contractPrivatePostLinearSwapApiV1SwapCancel'\n elif marginType == 'cross':\n method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancel'\n elif market['inverse']:\n if market['future']:\n method = 'contractPrivatePostApiV1ContractCancel'\n request['symbol'] = market['settleId']\n elif market['swap']:\n method = 'contractPrivatePostSwapApiV1SwapCancel'\n else:\n raise NotSupported(self.id + ' cancelOrders() does not support ' + marketType + ' markets')\n clientOrderIds = self.safe_string_2(params, 'client_order_id', 'clientOrderId')\n clientOrderIds = self.safe_string_2(params, 'client_order_ids', 'clientOrderIds', clientOrderIds)\n if clientOrderIds is None:\n request['order_id'] = ','.join(ids)\n else:\n request['client_order_id'] = clientOrderIds\n params = self.omit(params, ['client_order_id', 'client_order_ids', 'clientOrderId', 'clientOrderIds'])\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"status\": \"ok\",\n # \"data\": {\n # \"success\": [\n # \"5983466\"\n # ],\n # \"failed\": [\n # {\n # \"err-msg\": \"Incorrect order state\",\n # \"order-state\": 7,\n # \"order-id\": \"\",\n # \"err-code\": \"order-orderstate-error\",\n # \"client-order-id\": \"first\"\n # },\n # {\n # \"err-msg\": \"Incorrect order state\",\n # \"order-state\": 7,\n # \"order-id\": \"\",\n # \"err-code\": \"order-orderstate-error\",\n # \"client-order-id\": \"second\"\n # },\n # {\n # \"err-msg\": \"The record is not found.\",\n # \"order-id\": \"\",\n # \"err-code\": \"base-not-found\",\n # \"client-order-id\": \"third\"\n # }\n # ]\n # }\n # }\n #\n # contracts\n #\n # {\n # \"status\": \"ok\",\n # \"data\": {\n # \"errors\": [\n # {\n # \"order_id\": \"769206471845261312\",\n # \"err_code\": 1061,\n # \"err_msg\": \"This order doesnt exist.\"\n # }\n # ],\n # \"successes\": \"773120304138219520\"\n # },\n # \"ts\": 1604367997451\n # }\n #\n return response\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 2109, "n_words": 367, "vocab_size": 159, "complexity": 14, "nloc": 51, "token_counts": 379, "n_ast_nodes": 730, "n_identifiers": 27, "random_cut": "def cancel_orders(self, ids, symbol=None, params={}):\n self.load_markets()\n marketType = None\n marketType, params = self.handle_market_type_and_params('cancelOrder', None, params)\n request = {\n # spot -----------------------------------------------------------\n # 'order-ids': ids.jsoin(','), # max 50\n # 'client-order-ids': ','.join(ids), # max 50\n # contracts ------------------------------------------------------\n # 'order_id': id, # comma separated, max 10\n # 'client_order_id': clientOrderId, # comma separated, max 10\n # 'contract_code': market['id'],\n # 'symbol': market['settleId'],\n }\n method = None\n if marketType == 'spot':\n clientOrderIds = self.safe_value_2(params, 'client-order-id', 'clientOrderId')\n clientOrderIds = self.safe_value_2(params, 'client-order-ids', 'clientOrderIds', clientOrderIds)\n if clientOrderIds is None:\n if isinstance(clientOrderIds, basestring):\n request['order-ids'] = ids\n else:\n request['order-ids'] = ','.join(ids)\n else:\n if isinstance(clientOrderIds, basestring):\n request['client-order-ids'] = clientOrderIds\n else:\n request['client-order-ids'] = ','.join(clientOrderIds)\n params = self.omit(params, ['client-order-id', 'client-order-ids', 'clientOrderId', 'clientOrderIds'])\n method = 'spotPrivatePostV1OrderOrdersBatchcancel'\n else:\n if symbol is None:\n raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol for ' + marketType + ' orders')\n market = self.market(symbol)\n request['contract_code'] = market['id']\n if market['linear']:\n defaultMargin = 'cross' if market['future'] else 'isolated'\n marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)\n if marginType == 'isolated':\n method = 'contractPrivatePostLinearSwapApiV1SwapCancel'\n elif marginType == 'cross':\n method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancel'\n elif market['inverse']:\n if market['future']:\n method = 'contractPrivatePostApiV1ContractCancel'\n request['symbol'] = market['settleId']\n elif market['swap']:\n method = 'contractPrivatePostSwapApiV1SwapCancel'\n else:\n raise NotSupported(self.id + ' cancelOrders() does not support ' + marketType + ' markets')\n clientOrderIds = self.safe_string_2(params, 'client_order_id', 'clientOrderId')\n clientOrderIds = self.safe_string_2(params, 'client_order_ids', 'clientOrderIds', clientOrderIds)\n if clientOrderIds is None:\n request['order_id'] = ','.join(ids)\n else:\n request['client_order_id'] = clientOrderIds\n params = self.omit(params, ['client_order_id', 'client_order_ids', 'clientOrderId', 'clientOrderIds'])\n response = getattr(self, method)(self.extend(request, params))\n #\n # spot\n #\n # {\n # \"status\": \"ok\",\n # \"data\": {\n # \"success\": [\n # \"5983466\"\n # ],\n # \"failed\": [\n # {\n # \"err-msg\": \"Incorrect order state\",\n # \"order-state\": 7,\n # \"order-id\": \"\",\n # \"err-code\": \"order-orderstate-error\",\n # \"client-order-id\": \"first\"\n # },\n # {\n # \"err-msg\": \"Incorrect order state\",\n # \"order-state\": 7,\n # \"order-id\": \"\",\n # \"err-code\": \"order-orderstate-error\",\n # " }, { "id": 84401, "commit_id": "51df4031ac45ae6fcc6d2ccea22eed897116c582", "repo": "zulip", "path": "zerver/tests/test_auth_backends.py", "file_name": "test_auth_backends.py", "fun_name": "test_get_external_method_dicts_correctly_sorted", "commit_message": "test_auth_backends: Extract external_auth_backends.\n\nSigned-off-by: Zixuan James Li ", "code": "def test_get_external_method_dicts_correctly_sorted(self) -> None:\n with self.settings(\n AUTHENTICATION_BACKENDS=(\n \"zproject.backends.EmailAuthBackend\",\n \"zproject.backends.GitHubAuthBackend\",\n \"zproject.backends.GoogleAuthBackend\",\n \"zproject.backends.ZulipRemoteUserBackend\",\n \"zproject.backends.SAMLAuthBackend\",\n \"zproject.backends.AzureADAuthBackend\",\n ),\n ):\n external_auth_methods = get_external_method_dicts()\n external_auth_backends: List[Type[ExternalAuthMethod]] = [\n ZulipRemoteUserBackend,\n GitHubAuthBackend,\n AzureADAuthBackend,\n GoogleAuthBackend,\n ]\n # First backends in the list should be SAML:\n self.assertIn(\"saml:\", external_auth_methods[0][\"name\"])\n self.assertEqual(\n [social_backend[\"name\"] for social_backend in external_auth_methods[1:]],\n [\n social_backend.name\n for social_backend in sorted(\n external_auth_backends,\n key=lambda x: x.sort_order,\n reverse=True,\n )\n ],\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 491, "n_words": 58, "vocab_size": 51, "complexity": 3, "nloc": 30, "token_counts": 117, "n_ast_nodes": 185, "n_identifiers": 23, "random_cut": "def test_get_external_method_dicts_correctly_sorted(self) -> None:\n with self.settings(\n AUTHENTICATION_BACKENDS=(\n \"zproject.backends.EmailAuthBackend\",\n \"zproject.backends.GitHubAuthBackend\",\n \"zproject.backends.GoogleAuthBackend\",\n \"zproject.backends.ZulipRemoteUserBackend\",\n \"zproject.backends.SAMLAuthBackend\",\n \"zproject.backends.AzureADAuthBackend\",\n ),\n ):\n external_auth_methods = get_external_method_dicts()\n external_auth_backends: List[Type[ExternalAuthMethod]] = [\n ZulipRemoteUserBackend,\n GitHubAuthBackend,\n AzureADAuthBackend,\n " }, { "id": 306498, "commit_id": "458001a06e7678273ea43c33e55b833adadced9e", "repo": "core", "path": "homeassistant/components/tado/sensor.py", "file_name": "sensor.py", "fun_name": "async_added_to_hass", "commit_message": "Improve entity type hints [t] (#77883)", "code": "async def async_added_to_hass(self) -> None:\n \n\n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n SIGNAL_TADO_UPDATE_RECEIVED.format(\n self._tado.home_id, \"weather\", \"data\"\n ),\n self._async_update_callback,\n )\n )\n self._async_update_home_data()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 146, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 12, "token_counts": 44, "n_ast_nodes": 74, "n_identifiers": 11, "random_cut": "async def async_added_to_hass(self) -> None:\n \n\n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n SIGNAL_TADO_UPDATE_RECEIVED.format(\n self._tado.home_id, \"weather\", \"data\"\n ),\n self._async_update_callback,\n )\n )\n self._async_update_home_data()\n" }, { "id": 63218, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "obtain", "commit_message": "upd; format", "code": "def obtain(self, requirement, installer=None):\n \n if installer is not None:\n return installer(requirement)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 36, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 4, "random_cut": "def obtain(self, requirement, installer=None):\n \n if installer is not None:\n return installer(requirement)\n" }, { "id": 145238, "commit_id": "58e5f0140d247059ca45b249446614929930c126", "repo": "ray", "path": "dashboard/modules/job/common.py", "file_name": "common.py", "fun_name": "get_all_jobs", "commit_message": "[jobs] Rename JobData -> JobInfo (#22499)\n\n`JobData` could be confused with the actual output data of a job, `JobInfo` makes it more clear that this is status information + metadata.", "code": "def get_all_jobs(self) -> Dict[str, JobInfo]:\n raw_job_ids_with_prefixes = _internal_kv_list(\n self.JOB_DATA_KEY_PREFIX, namespace=ray_constants.KV_NAMESPACE_JOB\n )\n job_ids_with_prefixes = [\n job_id.decode() for job_id in raw_job_ids_with_prefixes\n ]\n job_ids = []\n for job_id_with_prefix in job_ids_with_prefixes:\n assert job_id_with_prefix.startswith(\n self.JOB_DATA_KEY_PREFIX\n ), \"Unexpected format for internal_kv key for Job submission\"\n job_ids.append(job_id_with_prefix[len(self.JOB_DATA_KEY_PREFIX) :])\n return {job_id: self.get_info(job_id) for job_id in job_ids}\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 166, "n_words": 48, "vocab_size": 38, "complexity": 4, "nloc": 14, "token_counts": 89, "n_ast_nodes": 137, "n_identifiers": 20, "random_cut": "def get_all_jobs(self) -> Dict[str, JobInfo]:\n raw_job_ids_with_prefixes = _internal_kv_list(\n self.JOB_DATA_KEY_PREFIX, namespace=ray_constants.KV_NAMESPACE_JOB\n )\n job_ids_with_prefixe" }, { "id": 175895, "commit_id": "b349e06f3b6e80ba527347b2b0463bcc403ae8c5", "repo": "autokeras", "path": "autokeras/tasks/image_test.py", "file_name": "image_test.py", "fun_name": "test_img_seg_fit_call_auto_model_fit", "commit_message": "mvoing tests (#1664)\n\nCo-authored-by: Haifeng Jin ", "code": "def test_img_seg_fit_call_auto_model_fit(fit, tmp_path):\n auto_model = ak.tasks.image.ImageSegmenter(\n directory=tmp_path, seed=test_utils.SEED\n )\n\n auto_model.fit(\n x=test_utils.generate_data(num_instances=100, shape=(32, 32, 3)),\n y=test_utils.generate_data(num_instances=100, shape=(32, 32)),\n )\n\n assert fit.is_called\n\n\n@mock.patch(\"autokeras.AutoModel.fit\")", "url": "https://github.com/keras-team/autokeras.git", "language": "Python", "ast_errors": "@mock.patch(\"autokeras.AutoModel.fit\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 55, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 76, "n_ast_nodes": 124, "n_identifiers": 20, "random_cut": "def test_img_seg_fit_call_auto_model_fit(fit, tmp_path):\n auto_model = ak.tasks.image.ImageSegmenter(\n directory=tmp_path, seed=test_utils.SEED\n )\n\n auto_model.fit(\n x=test_utils.generate_data(num_instances=100, shape=(32, 32, 3)),\n y=test_utils.generate_data(num_instances=100, shape=(32, 32)),\n )\n\n assert fit.is_called\n" }, { "id": 6163, "commit_id": "6ec371f2201c8b0a13b5719d59a19da94aaa09de", "repo": "ludwig", "path": "tests/integration_tests/test_visualization.py", "file_name": "test_visualization.py", "fun_name": "test_visualization_binary_threshold_vs_metric_output_saved", "commit_message": "Renamed \"training\" to \"trainer\" and \"numerical\" to \"number\" (#1743)", "code": "def test_visualization_binary_threshold_vs_metric_output_saved(csv_filename):\n \n input_features = [\n text_feature(vocab_size=10, min_len=1, encoder=\"stacked_cnn\"),\n number_feature(),\n category_feature(vocab_size=10, embedding_size=5),\n set_feature(),\n sequence_feature(vocab_size=10, max_len=10, encoder=\"embed\"),\n ]\n output_features = [category_feature(vocab_size=4, reduce_input=\"sum\")]\n\n # Generate test data\n rel_path = generate_data(input_features, output_features, csv_filename)\n input_features[0][\"encoder\"] = \"parallel_cnn\"\n exp_dir_name = run_experiment_with_visualization(input_features, output_features, dataset=rel_path)\n vis_output_pattern_pdf = os.path.join(exp_dir_name, \"*.pdf\")\n vis_output_pattern_png = os.path.join(exp_dir_name, \"*.png\")\n output_feature_name = get_output_feature_name(exp_dir_name)\n probability = os.path.join(exp_dir_name, PREDICTIONS_PARQUET_FILE_NAME)\n experiment_source_data_name = csv_filename.split(\".\")[0]\n ground_truth = experiment_source_data_name + \".csv\"\n split_file = experiment_source_data_name + \".split.csv\"\n test_cmd_pdf = [\n \"python\",\n \"-m\",\n \"ludwig.visualize\",\n \"--visualization\",\n \"binary_threshold_vs_metric\",\n \"--positive_label\",\n \"2\",\n \"--metrics\",\n \"accuracy\",\n \"--ground_truth\",\n ground_truth,\n \"--output_feature_name\",\n output_feature_name,\n \"--split_file\",\n split_file,\n \"--ground_truth_metadata\",\n exp_dir_name + \"/model/training_set_metadata.json\",\n \"--probabilities\",\n probability,\n probability,\n \"--model_names\",\n \"Model1\",\n \"Model2\",\n \"-od\",\n exp_dir_name,\n ]\n test_cmd_png = test_cmd_pdf.copy() + [\"-ff\", \"png\"]\n\n commands = [test_cmd_pdf, test_cmd_png]\n vis_patterns = [vis_output_pattern_pdf, vis_output_pattern_png]\n\n for command, viz_pattern in zip(commands, vis_patterns):\n result = subprocess.run(command)\n figure_cnt = glob.glob(viz_pattern)\n\n assert 0 == result.returncode\n assert 1 == len(figure_cnt)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 431, "n_words": 130, "vocab_size": 99, "complexity": 2, "nloc": 54, "token_counts": 291, "n_ast_nodes": 475, "n_identifiers": 48, "random_cut": "def test_visualization_binary_threshold_vs_metric_output_saved(csv_filename):\n \n input_features = [\n text_feature(vocab_size=10, min_len=1, encoder=\"stacked_cnn\"),\n number_feature(),\n category_feature(vocab_size=10, embedding_size=5),\n set_feature(),\n sequence_feature(vocab_size=10, max_len=10, encoder=\"embed\"),\n ]\n output_features = [category_feature(vocab_size=4, reduce_input=\"sum\")]\n\n # Generate test data\n rel_path = generate_data(input_features, output_features, csv_filename)\n input_features[0][\"encoder\"] = \"parallel_cnn\"\n exp_dir_name = run_experiment_with_visualization(input_features, output_features, dataset=rel_path)\n vis_output_pattern_pdf = os.path.join(exp_dir_name, \"*.pdf\")\n vis_output_pattern_png = os.path.join(exp_dir_name, \"*.png\")\n output_feature_name = get_output_feature_name(exp_dir_name)\n probability = os.path.join(exp_dir_name, PREDICTIONS_PARQUET_FILE_NAME)\n experiment_source_data_name = csv_filename.split(\".\")[0]\n ground_truth = experiment_source_data_name + \".csv\"\n split_file =" }, { "id": 78285, "commit_id": "d967eccef28ce47f60d26be1c28f2d83a25f40b0", "repo": "wagtail", "path": "wagtail/contrib/settings/tests/generic/test_model.py", "file_name": "test_model.py", "fun_name": "test_get_page_url_returns_empty_string_if_attribute_value_not_a_page", "commit_message": "Add generic settings to compliment site-specific settings (#8327)", "code": "def test_get_page_url_returns_empty_string_if_attribute_value_not_a_page(self):\n settings = self._create_importantpagesgenericsetting_object()\n for value in (None, self.default_site):\n with self.subTest(attribute_value=value):\n settings.test_attribute = value\n # when called directly\n self.assertEqual(settings.get_page_url(\"test_attribute\"), \"\")\n # when called indirectly via shortcut\n self.assertEqual(settings.page_url.test_attribute, \"\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 128, "n_words": 29, "vocab_size": 23, "complexity": 2, "nloc": 7, "token_counts": 63, "n_ast_nodes": 108, "n_identifiers": 12, "random_cut": "def test_get_page_url_returns_empty_string_if_attribute_value_not_a_page(self):\n settings = self._create_importantpagesgenericsetting_object()\n for value in (None, self.default_site):\n with self.subTest(attribute_value=value):\n settings.test" }, { "id": 171315, "commit_id": "86f182829d2cfe2f4c380d7f2ecd6ea27d6e0f1d", "repo": "pandas", "path": "pandas/io/excel/_odfreader.py", "file_name": "_odfreader.py", "fun_name": "_get_cell_value", "commit_message": "REF change to_datetime -> Timestamp for in odf reader (#49712)\n\nchange to_datetime -> Timestamp for single value\r\n\r\nCo-authored-by: MarcoGorelli <>", "code": "def _get_cell_value(self, cell) -> Scalar | NaTType:\n from odf.namespaces import OFFICENS\n\n if str(cell) == \"#N/A\":\n return np.nan\n\n cell_type = cell.attributes.get((OFFICENS, \"value-type\"))\n if cell_type == \"boolean\":\n if str(cell) == \"TRUE\":\n return True\n return False\n if cell_type is None:\n return self.empty_value\n elif cell_type == \"float\":\n # GH5394\n cell_value = float(cell.attributes.get((OFFICENS, \"value\")))\n val = int(cell_value)\n if val == cell_value:\n return val\n return cell_value\n elif cell_type == \"percentage\":\n cell_value = cell.attributes.get((OFFICENS, \"value\"))\n return float(cell_value)\n elif cell_type == \"string\":\n return self._get_cell_string_value(cell)\n elif cell_type == \"currency\":\n cell_value = cell.attributes.get((OFFICENS, \"value\"))\n return float(cell_value)\n elif cell_type == \"date\":\n cell_value = cell.attributes.get((OFFICENS, \"date-value\"))\n return pd.Timestamp(cell_value)\n elif cell_type == \"time\":\n stamp = pd.Timestamp(str(cell))\n # cast needed here because Scalar doesn't include datetime.time\n return cast(Scalar, stamp.time())\n else:\n self.close()\n raise ValueError(f\"Unrecognized type {cell_type}\")\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 465, "n_words": 121, "vocab_size": 65, "complexity": 12, "nloc": 34, "token_counts": 232, "n_ast_nodes": 398, "n_identifiers": 27, "random_cut": "def _get_cell_value(self, cell) -> Scalar | NaTType:\n from odf.namespaces import OFFICENS\n\n if str(cell) == \"#N/A\":\n return np.nan\n\n cell_type = cell.attributes.get((OFFICENS, \"value-type\"))\n if cell_type == \"boolean\":\n if str(cell) == \"TRUE\":\n return True\n return False\n if cell_type is N" }, { "id": 4331, "commit_id": "0f475ce6ff95c4fc1e6793edd8d88861c1e8f60a", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-github/unit_tests/test_stream.py", "file_name": "test_stream.py", "fun_name": "test_stream_organizations_read", "commit_message": "🐛 Source Github: test coverage more than 90% (#10967)\n\nSigned-off-by: Sergey Chvalyuk ", "code": "def test_stream_organizations_read():\n organization_args = {\"organizations\": [\"org1\", \"org2\"]}\n stream = Organizations(**organization_args)\n responses.add(\"GET\", \"https://api.github.com/orgs/org1\", json={\"id\": 1})\n responses.add(\"GET\", \"https://api.github.com/orgs/org2\", json={\"id\": 2})\n records = read_full_refresh(stream)\n assert records == [{\"id\": 1}, {\"id\": 2}]\n\n\n@responses.activate", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "@responses.activate", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 45, "n_words": 29, "vocab_size": 24, "complexity": 1, "nloc": 7, "token_counts": 76, "n_ast_nodes": 144, "n_identifiers": 10, "random_cut": "def test_stream_organizations_read():\n organization_args = {\"organizations\": [\"org1\", \"org2\"]}\n stream = Organizations(**organization_args)\n responses.add(\"GET\", \"https://api.github.com/orgs/org1\", json={\"id\": 1})\n responses.add(\"GET\", \"https://api.github.com/or" }, { "id": 246319, "commit_id": "c3db7a0b59d48b8872bc24096f9a2467ef35f703", "repo": "synapse", "path": "tests/federation/transport/test_server.py", "file_name": "test_server.py", "fun_name": "test_open_public_room_list_over_federation", "commit_message": "Tests: replace mocked Authenticator with the real thing (#11913)\n\nIf we prepopulate the test homeserver with a key for a remote homeserver, we\r\ncan make federation requests to it without having to stub out the\r\nauthenticator. This has two advantages:\r\n\r\n * means that what we are testing is closer to reality (ie, we now have\r\n complete tests for the incoming-request-authorisation flow)\r\n\r\n * some tests require that other objects be signed by the remote server (eg,\r\n the event in `/send_join`), and doing that would require a whole separate\r\n set of mocking out. It's much simpler just to use real keys.", "code": "def test_open_public_room_list_over_federation(self):\n \n channel = self.make_signed_federation_request(\n \"GET\",\n \"/_matrix/federation/v1/publicRooms\",\n )\n self.assertEquals(200, channel.code)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 60, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 27, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "def test_open_public_room_list_over_federation(self):\n \n channel = self.make_signed_federation_request(\n \"GET\",\n \"/" }, { "id": 215739, "commit_id": "3bb43882e727b1d36abe2e501759c9c5e9048ecf", "repo": "salt", "path": "tests/pytests/unit/utils/win_dacl/test_get_name.py", "file_name": "test_get_name.py", "fun_name": "test_get_name_normal_name", "commit_message": "Add tests, migrate some tests to pytest", "code": "def test_get_name_normal_name():\n \n result = salt.utils.win_dacl.get_name(\"Administrators\")\n expected = \"Administrators\"\n assert result == expected\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 12, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 24, "n_ast_nodes": 46, "n_identifiers": 7, "random_cut": "def test_get_name_normal_name():\n \n result = salt.utils.win_dacl.get_name(\"Administrators\")\n expected = \"Administrators\"\n assert result == expected\n\n" }, { "id": 109648, "commit_id": "907f78dbf959c0609ab484c59e840eea3eafee31", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_axes.py", "file_name": "test_axes.py", "fun_name": "test_mixed_errorbar_polar_caps", "commit_message": "Curved polar errorbars\n\n - uses _interpolation_steps\n - prefers transform MarkerStyle in init over _transform property\n - adjusted what's new\n - added more tests for overlapping, asymmetric and long errorbars\n - combine all tests to a single figure\n - remove overlappnig since it does not work same on all platforms\n - rework test figure, add overlapping, might work by avoiding grid\n - update what's new with image and link to example", "code": "def test_mixed_errorbar_polar_caps():\n \n fig = plt.figure()\n ax = plt.subplot(111, projection='polar')\n\n # symmetric errorbars\n th_sym = [1, 2, 3]\n r_sym = [0.9]*3\n ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt=\"o\")\n\n # long errorbars\n th_long = [np.pi/2 + .1, np.pi + .1]\n r_long = [1.8, 2.2]\n ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt=\"o\")\n\n # asymmetric errorbars\n th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1]\n r_asym = [1.1]*3\n xerr = [[.3, .3, .2], [.2, .3, .3]]\n yerr = [[.35, .5, .5], [.5, .35, .5]]\n ax.errorbar(th_asym, r_asym, xerr=xerr, yerr=yerr, fmt=\"o\")\n\n # overlapping errorbar\n th_over = [2.1]\n r_over = [3.1]\n ax.errorbar(th_over, r_over, xerr=10, yerr=.2, fmt=\"o\")\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 160, "n_words": 97, "vocab_size": 72, "complexity": 1, "nloc": 17, "token_counts": 273, "n_ast_nodes": 348, "n_identifiers": 21, "random_cut": "def test_mixed_errorbar_polar_caps():\n \n fig = plt.figure()\n ax = plt.subplot(111, projection='polar')\n\n # symmetric errorbars\n th_sym = [1, 2, 3]\n r_sym = [0.9]*3\n ax.errorbar(th_sym, r_sym, xerr=0.35, yerr=0.2, fmt=\"o\")\n\n # long errorbars\n th_long = [np.pi/2 + .1, np.pi + .1]\n r_long = [1.8, 2.2]\n ax.errorbar(th_long, r_long, xerr=0.8 * np.pi, yerr=0.15, fmt=\"o\")\n\n # asymmetric errorbars\n th_asym = [4*np.pi/3 + .1, 5*np.pi/3 + .1, 2*np.pi-0.1]\n r" }, { "id": 317749, "commit_id": "7cf2d1759dde088105f77ca61dba8e58e3474b83", "repo": "core", "path": "tests/components/risco/util.py", "file_name": "util.py", "fun_name": "two_zone_alarm", "commit_message": "Upgrade pyrisco to 0.5.0 (#75648)\n\n* Upgrade to pyrisco 0.4.0\r\n\r\n* Parametrized error tests in config flow\r\n\r\n* Inline error parameters\r\n\r\n* Switch to RiscoCloud", "code": "def two_zone_alarm():\n \n zone_mocks = {0: _zone_mock(), 1: _zone_mock()}\n alarm_mock = MagicMock()\n with patch.object(\n zone_mocks[0], \"id\", new_callable=PropertyMock(return_value=0)\n ), patch.object(\n zone_mocks[0], \"name\", new_callable=PropertyMock(return_value=\"Zone 0\")\n ), patch.object(\n zone_mocks[1], \"id\", new_callable=PropertyMock(return_value=1)\n ), patch.object(\n zone_mocks[1], \"name\", new_callable=PropertyMock(return_value=\"Zone 1\")\n ), patch.object(\n alarm_mock,\n \"zones\",\n new_callable=PropertyMock(return_value=zone_mocks),\n ), patch(\n \"homeassistant.components.risco.RiscoCloud.get_state\",\n return_value=alarm_mock,\n ):\n yield alarm_mock\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 145, "n_words": 45, "vocab_size": 30, "complexity": 1, "nloc": 20, "token_counts": 141, "n_ast_nodes": 230, "n_identifiers": 10, "random_cut": "def two_zone_alarm():\n \n zone_mocks = {0: _zone_mock(), 1: _zone_mock()}\n alarm_mock = MagicMock()\n with patch.object(\n zone_mocks[0], \"id\", new_callable=PropertyMock(return_value=0)\n ), patch.object(\n zone_mocks[0], \"name\", new_callable=PropertyMock(return_value=\"Zone 0\")\n ), patch.object(\n zone_mocks[1], \"id\", new_callable=PropertyMock(return_value=1)\n ), patch.object(\n zone_mocks[1], \"name\", new_callable=PropertyMock(return_value=\"Zone 1\")\n ), patch.object(\n alarm_mock,\n \"zones\",\n new_callable=PropertyMock(return_value=zone_mocks),\n ), patch(\n \"homeassistant.components.risco.RiscoCloud.get_state\",\n return_v" }, { "id": 176450, "commit_id": "cc1db275efc709cb964ce88abbfa877798d58c10", "repo": "networkx", "path": "networkx/readwrite/nx_shp.py", "file_name": "nx_shp.py", "fun_name": "read_shp", "commit_message": "Minor improvements from general code readthrough (#5414)\n\n* Add deprecated directive to reversed docstring.\r\n\r\n* Add missing dep directives to shpfiles.\r\n\r\n* Remove defn of INF sentinel.\r\n\r\n* typo.\r\n\r\n* str -> comment in forloop.\r\n\r\n* STY: appropriate casing for var name.", "code": "def read_shp(path, simplify=True, geom_attrs=True, strict=True):\n \n msg = (\n \"read_shp is deprecated and will be removed in 3.0.\"\n \"See https://networkx.org/documentation/latest/auto_examples/index.html#geospatial.\"\n )\n warnings.warn(msg, DeprecationWarning, stacklevel=2)\n try:\n from osgeo import ogr\n except ImportError as err:\n raise ImportError(\"read_shp requires OGR: http://www.gdal.org/\") from err\n\n if not isinstance(path, str):\n return\n\n net = nx.DiGraph()\n shp = ogr.Open(path)\n if shp is None:\n raise RuntimeError(f\"Unable to open {path}\")\n for lyr in shp:\n fields = [x.GetName() for x in lyr.schema]\n for f in lyr:\n g = f.geometry()\n if g is None:\n if strict:\n raise nx.NetworkXError(\"Bad data: feature missing geometry\")\n else:\n continue\n flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]\n attributes = dict(zip(fields, flddata))\n attributes[\"ShpName\"] = lyr.GetName()\n # Note: Using layer level geometry type\n if g.GetGeometryType() == ogr.wkbPoint:\n net.add_node((g.GetPoint_2D(0)), **attributes)\n elif g.GetGeometryType() in (ogr.wkbLineString, ogr.wkbMultiLineString):\n for edge in edges_from_line(g, attributes, simplify, geom_attrs):\n e1, e2, attr = edge\n net.add_edge(e1, e2)\n net[e1][e2].update(attr)\n else:\n if strict:\n raise nx.NetworkXError(\n f\"GeometryType {g.GetGeometryType()} not supported\"\n )\n\n return net\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 573, "n_words": 150, "vocab_size": 109, "complexity": 14, "nloc": 41, "token_counts": 280, "n_ast_nodes": 463, "n_identifiers": 50, "random_cut": "def read_shp(path, simplify=True, geom_attrs=True, strict=True):\n \n msg = (\n \"read_shp is deprecated and will be removed in 3.0.\"\n \"See https://networkx.org/documentation/latest/auto_examples/index.html#geospatial.\"\n )\n warnings.warn(msg, DeprecationWarning, stacklevel=2)\n try:\n from osgeo import ogr\n except ImportError as err:\n raise ImportError(\"read_shp requires OGR: http://www.gdal.org/\") from e" }, { "id": 40630, "commit_id": "9cfdd4c5ed60562f3429386d25758476c5c57998", "repo": "seaborn", "path": "seaborn/tests/_core/test_plot.py", "file_name": "test_plot.py", "fun_name": "test_col_wrapping", "commit_message": "Implement prototype Plot.pair behavior", "code": "def test_col_wrapping(self):\n\n cols = list(\"abcd\")\n wrap = 3\n p = Plot().facet(col=cols, wrap=wrap).plot()\n\n gridspec = p._figure.axes[0].get_gridspec()\n assert len(p._figure.axes) == 4\n assert gridspec.ncols == 3\n assert gridspec.nrows == 2\n\n # TODO test axis labels and titles\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 89, "n_words": 34, "vocab_size": 26, "complexity": 1, "nloc": 8, "token_counts": 71, "n_ast_nodes": 116, "n_identifiers": 17, "random_cut": "def test_col_wrapping(self):\n\n cols = list(\"abcd\")\n wrap = 3\n p = Plot().facet(col=cols, wrap=wrap).plot()\n\n gridspec = p._figure.axes[0].get_gridspec()\n assert len(p._figure.axes) == 4\n assert gridspec.ncols == 3\n assert gridspec.nrows == 2\n\n # TODO test axis labels and titles\n" }, { "id": 44472, "commit_id": "ab762a5a8ae147ae33500ee3c7e7a73d25d03ad7", "repo": "airflow", "path": "tests/providers/ssh/operators/test_ssh.py", "file_name": "test_ssh.py", "fun_name": "test_command_errored", "commit_message": "Refactor SSH tests to not use SSH server in operator tests (#21326)\n\nThis required a slight refactor to the SSHOperator (moving\r\n`exec_ssh_client_command` \"down\" in to the Hook) but the SSH _Operator_\r\ntests now just use stubbing, and the only place that connects to a real\r\nSSH server is the one test of `test_exec_ssh_client_command` in SSHHook.\r\n\r\nThis is both better structured, and hopefully produces less (or ideally\r\nno) random failures in our tests", "code": "def test_command_errored(self):\n # Test that run_ssh_client_command works on invalid commands\n command = \"not_a_real_command\"\n task = SSHOperator(\n task_id=\"test\",\n ssh_hook=self.hook,\n command=command,\n )\n self.exec_ssh_client_command.return_value = (1, b'', b'Error here')\n with pytest.raises(AirflowException, match=f\"error running cmd: {command}, error: Error here\"):\n task.execute(None)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 121, "n_words": 36, "vocab_size": 34, "complexity": 1, "nloc": 10, "token_counts": 61, "n_ast_nodes": 104, "n_identifiers": 15, "random_cut": "def test_command_errored(self):\n # Test that run_ssh_client_command works on invalid commands\n command = \"not_a_real_command\"\n task = SSHOperator(\n task_id=\"test\",\n ssh_hook=self.hook,\n command=command,\n )\n self.exec_ssh_client_comman" }, { "id": 145168, "commit_id": "baa14d695aafd0abb817026ddd4b4182f68f6b61", "repo": "ray", "path": "release/nightly_tests/dataset/pipelined_training.py", "file_name": "pipelined_training.py", "fun_name": "create_dataset", "commit_message": "Round robin during spread scheduling (#21303)\n\n- Separate spread scheduling and default hydra scheduling (i.e. SpreadScheduling != HybridScheduling(threshold=0)): they are already separated in the API layer and they have the different end goals so it makes sense to separate their implementations and evolve them independently.\r\n- Simple round robin for spread scheduling: this is just a starting implementation, can be optimized later.\r\n- Prefer not to spill back tasks that are waiting for args since the pull is already in progress.", "code": "def create_dataset(files, num_workers=4, epochs=50, num_windows=1):\n if num_windows > 1:\n num_rows = ray.data.read_parquet(\n files\n ).count() # This should only read Parquet metadata.\n file_splits = np.array_split(files, num_windows)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 60, "n_words": 25, "vocab_size": 24, "complexity": 2, "nloc": 22, "token_counts": 129, "n_ast_nodes": 71, "n_identifiers": 13, "random_cut": "def create_dataset(files, num_workers=4, epochs=50, num_windows=1):\n if num_windows > 1:\n num_rows = ray.data.read_parquet(\n files\n ).count() # This should only read Parquet metadata.\n file_splits = np.array_split(files, num_windows)\n" }, { "id": 151313, "commit_id": "041258a5499534fae6de843a7b9d10dde02c7659", "repo": "freqtrade", "path": "tests/freqai/test_freqai_datakitchen.py", "file_name": "test_freqai_datakitchen.py", "fun_name": "test_use_DBSCAN_to_remove_outliers", "commit_message": "Fix arm test failure", "code": "def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog):\n freqai = make_data_dictionary(mocker, freqai_conf)\n # freqai_conf['freqai']['feature_parameters'].update({\"outlier_protection_percentage\": 1})\n freqai.dk.use_DBSCAN_to_remove_outliers(predict=False)\n assert log_has_re(r\"DBSCAN found eps of 2\\.3\\d\\.\", caplog)\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 54, "n_identifiers": 10, "random_cut": "def test_use_DBSCAN_to_remove_outliers(mocker, freqai_conf, caplog):\n freqai = make_data_dictionary(mocker, freqai_conf)\n # freqai_conf['freqai']['feature_parameters'].update({\"outlier_protection_percentage\": 1})\n freqai.dk.use_DBSCAN_to_remove_outliers(predict=False)\n assert log_has_re(r\"DBSCAN found eps of 2\\." }, { "id": 131150, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/perf_integration_tests/test_perf_integration.py", "file_name": "test_perf_integration.py", "fun_name": "warmup", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def warmup():\n x = np.zeros(10 ** 6, dtype=np.uint8)\n for _ in range(5):\n for _ in range(5):\n ray.put(x)\n for _ in range(5):\n ray.get([dummy_task.remote(0) for _ in range(1000)])\n\n\n@pytest.mark.benchmark\n@pytest.mark.parametrize(\"num_tasks\", num_tasks_submitted, ids=num_tasks_ids)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.benchmark\n@pytest.mark.parametrize(\"num_tasks\", num_tasks_submitted, ids=num_tasks_ids)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 69, "n_words": 30, "vocab_size": 19, "complexity": 5, "nloc": 7, "token_counts": 70, "n_ast_nodes": 142, "n_identifiers": 20, "random_cut": "def warmup():\n x = np.zeros(10 ** 6, dtype=np.uint8)\n for _ in range(5):\n for _ in range(5):\n ray.put(x)\n for _ in range(5):\n ray.ge" }, { "id": 290796, "commit_id": "39ac2c11017f84276cb23d15843dcccae5b104f4", "repo": "core", "path": "homeassistant/components/zha/climate.py", "file_name": "climate.py", "fun_name": "supported_features", "commit_message": "Adjust type hints for ClimateEntityFeature (#82206)", "code": "def supported_features(self) -> ClimateEntityFeature:\n \n features = self._supported_flags\n if HVACMode.HEAT_COOL in self.hvac_modes:\n features |= ClimateEntityFeature.TARGET_TEMPERATURE_RANGE\n if self._fan is not None:\n self._supported_flags |= ClimateEntityFeature.FAN_MODE\n return features\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 81, "n_words": 24, "vocab_size": 19, "complexity": 3, "nloc": 8, "token_counts": 44, "n_ast_nodes": 72, "n_identifiers": 11, "random_cut": "def supported_features(self) -> ClimateEntityFeature:\n \n features = self._supported_flags\n if HVACMode.HEAT_COOL in self.hvac_modes:\n " }, { "id": 215879, "commit_id": "53b3ebc92648c2081c58865713b50a2859ae8310", "repo": "salt", "path": "salt/modules/win_certutil.py", "file_name": "win_certutil.py", "fun_name": "__virtual__", "commit_message": "Fix win_certutil module to handle paths with spaces", "code": "def __virtual__():\n \n if salt.utils.platform.is_windows():\n return __virtualname__\n return False, \"Module win_certutil: module only works on Windows systems.\"\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 4, "token_counts": 22, "n_ast_nodes": 41, "n_identifiers": 6, "random_cut": "def __virtual__():\n \n if salt.utils.platform.is_windows():\n return __virtualname__" }, { "id": 100121, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_user_index.py", "file_name": "test_user_index.py", "fun_name": "test_email_query", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_email_query(self):\n response = self.get_success_response(qs_params={\"query\": \"email:bar@example.com\"})\n assert len(response.data) == 1\n assert response.data[0][\"id\"] == str(self.superuser.id)\n\n response = self.get_success_response(qs_params={\"query\": \"email:foobar\"})\n assert len(response.data) == 0\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 56, "n_words": 22, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 70, "n_ast_nodes": 119, "n_identifiers": 10, "random_cut": "def test_email_query(self):\n response = self.get_success_response(qs_params={\"query\": \"email:bar@example.com\"})\n assert len(response.data) == 1\n assert response.data[0][\"id\"] == str(self.superuser.id)\n\n response = s" }, { "id": 22768, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "rotate_string.py", "file_name": "rotate_string.py", "fun_name": "circular_rotate", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def circular_rotate(s):\n s = list(s)\n idx = 0\n mid = len(s) // 2\n for i in reversed(range(mid, len(s))):\n s[idx], s[i] = s[i], s[idx]\n idx += 1\n return s\n\n\ns = \"aditya\"\nprint(\"\".join(circular_rotate(s)))\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 32, "vocab_size": 25, "complexity": 2, "nloc": 8, "token_counts": 61, "n_ast_nodes": 125, "n_identifiers": 11, "random_cut": "def circular_rotate(s):\n s = list(s)\n idx = 0\n mid = len(s) // 2\n for i in reversed(range(mid, len(s))):\n s[idx], s[i] = s[i], s[" }, { "id": 282025, "commit_id": "683a8bdd83c1b931df111a5b2b8b19350930b73a", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/economy/test_economy_controller.py", "file_name": "test_economy_controller.py", "fun_name": "test_switch", "commit_message": "Tests : Economy + Conftest (#1260)\n\n* Updating tests : economy\r\n\r\n* Updating tests : removing breaklines\r\n\r\n* Updating tests : economy\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : economy", "code": "def test_switch(an_input, expected_queue):\n controller = economy_controller.EconomyController(queue=None)\n queue = controller.switch(an_input=an_input)\n\n assert queue == expected_queue\n\n\n@pytest.mark.vcr(record_mode=\"none\")", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 21, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 69, "n_identifiers": 12, "random_cut": "def test_switch(an_input, expected_queue):\n " }, { "id": 45079, "commit_id": "46a337c8cda6fcc515fffe9a4e4cc324edaefa0a", "repo": "airflow", "path": "airflow/decorators/base.py", "file_name": "base.py", "fun_name": "get_serialized_fields", "commit_message": "Implement mapped value unpacking (#21641)", "code": "def get_serialized_fields(cls):\n # The magic super() doesn't work here, so we use the explicit form.\n # Not using super(..., cls) to work around pyupgrade bug.\n sup = super(DecoratedMappedOperator, DecoratedMappedOperator)\n return sup.get_serialized_fields() | {\"mapped_op_kwargs\"}\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 60, "n_words": 33, "vocab_size": 31, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 5, "random_cut": "def get_serialized_fields(cls):\n # The magic super() doesn't work here, so we use the explicit form.\n # Not using super(..., cls) to work around pyupgrade bug.\n sup = super(DecoratedMappedOperator, DecoratedMappedOperator)\n return sup" }, { "id": 156279, "commit_id": "bfc76afdcdc43c575a10ffefda94aaba424fe347", "repo": "dask", "path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "fun_name": "test_optimize_and_not", "commit_message": "Warn users that `engine='auto'` will change in future (#8907)\n\n* Warn user that `engine='auto'` will change in future\r\n\r\nAdds a warning to `read_parquet` and `to_parquet` that the meaning of\r\n`engine='auto'` will change in the future (switching to using `pyarrow`\r\nif it is installed, and falling back to `fastparquet`). This warning is\r\nonly raised for users that have both libraries installed and have\r\n`engine='auto'`. Users without both libraries installed or that already\r\nspecify an engine will not see this warning.\r\n\r\n* Fixup failing test\r\n\r\n* Only warn if backend-specific options are passed\r\n\r\nWe now warn if:\r\n\r\n- `engine='auto'`\r\n- Both `pyarrow` and `fastparquet` are installed\r\n- Backend specific options are provided", "code": "def test_optimize_and_not(tmpdir, engine):\n path = os.path.join(tmpdir, \"path.parquet\")\n df = pd.DataFrame(\n {\"a\": [3, 4, 2], \"b\": [1, 2, 4], \"c\": [5, 4, 2], \"d\": [1, 2, 3]},\n index=[\"a\", \"b\", \"c\"],\n )\n df.to_parquet(path, engine=engine)\n\n df2 = dd.read_parquet(path, engine=engine)\n df2a = df2[\"a\"].groupby(df2[\"c\"]).first().to_delayed()\n df2b = df2[\"b\"].groupby(df2[\"c\"]).first().to_delayed()\n df2c = df2[[\"a\", \"b\"]].rolling(2).max().to_delayed()\n df2d = df2.rolling(2).max().to_delayed()\n (result,) = dask.compute(df2a + df2b + df2c + df2d)\n\n expected = [\n dask.compute(df2a)[0][0],\n dask.compute(df2b)[0][0],\n dask.compute(df2c)[0][0],\n dask.compute(df2d)[0][0],\n ]\n for a, b in zip(result, expected):\n assert_eq(a, b)\n\n\n@write_read_engines()", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@write_read_engines()", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 161, "n_words": 75, "vocab_size": 58, "complexity": 2, "nloc": 21, "token_counts": 274, "n_ast_nodes": 437, "n_identifiers": 32, "random_cut": "def test_optimize_and_not(tmpdir, engine):\n path = os.path.join(tmpd" }, { "id": 118276, "commit_id": "402b28c300dbca149db75439c358f811ea1d36ac", "repo": "mindsdb", "path": "mindsdb/interfaces/model/model_controller.py", "file_name": "model_controller.py", "fun_name": "delete_model_version", "commit_message": "keep del company_id", "code": "def delete_model_version(self, models):\n if len(models) == 0:\n raise Exception(f\"Version to delete is not found\")\n\n for model in models:\n model_record = get_model_record(\n name=model['NAME'],\n project_name=model['PROJECT'],\n version=model['VERSION']\n )\n if model_record.active:\n raise Exception(f\"Can't remove active version: f{model['PROJECT']}.{model['NAME']}.{model['VERSION']}\")\n\n is_cloud = self.config.get('cloud', False)\n if is_cloud:\n model_record.deleted_at = dt.datetime.now()\n else:\n db.session.delete(model_record)\n modelStorage = ModelStorage(model_record.id)\n modelStorage.delete()\n\n db.session.commit()\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 258, "n_words": 49, "vocab_size": 43, "complexity": 5, "nloc": 19, "token_counts": 118, "n_ast_nodes": 224, "n_identifiers": 26, "random_cut": "def delete_model_version(self, models):\n if len(models) == 0:\n raise Exception(f\"Version to dele" }, { "id": 129927, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "dashboard/modules/job/tests/test_job_manager.py", "file_name": "test_job_manager.py", "fun_name": "test_unknown_job", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "async def test_unknown_job(self, job_manager):\n with pytest.raises(RuntimeError, match=\"Job 'unknown' does not exist.\"):", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 17, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 31, "n_ast_nodes": 34, "n_identifiers": 7, "random_cut": "async def test_unknown_job(self, job_manager):\n with pytest.raises(RuntimeError, match=\"Job 'unknown' does not exist.\"):" }, { "id": 8288, "commit_id": "0f509327aed9f9ec687543e1b851f66061603375", "repo": "ludwig", "path": "tests/ludwig/utils/test_dataframe_utils.py", "file_name": "test_dataframe_utils.py", "fun_name": "test_to_numpy_dataset_with_pandas_backend_mismatch", "commit_message": "Ensure no ghost ray instances are running in tests (#2607)\n\n* Add test fixtures to prevent double initialization\r\n\r\n* contain dataframe utils test in ray test fixture\r\n\r\n* add fixture for training determinism test\r\n\r\n* fix typo in docstring", "code": "def test_to_numpy_dataset_with_pandas_backend_mismatch(ray_cluster_2cpu):\n pd_df = pd.DataFrame([[1, 2, 3]], columns=[\"col1\", \"col2\", \"col3\"])\n ray_backend = create_backend(\"ray\")\n\n with pytest.raises(AttributeError):\n to_numpy_dataset(pd_df, backend=ray_backend)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 53, "n_ast_nodes": 89, "n_identifiers": 13, "random_cut": "def test_to_numpy_dataset_with_pandas_backend_mismatch(ray_cluster_2cpu):\n pd_df = pd.DataFrame([[" }, { "id": 164211, "commit_id": "9a98aca4b57af277b7747e402029bd57088cba2c", "repo": "pandas", "path": "pandas/core/arrays/interval.py", "file_name": "interval.py", "fun_name": "_validate_listlike", "commit_message": "REF: implement LossySetitemError (#45672)", "code": "def _validate_listlike(self, value):\n # list-like of intervals\n try:\n array = IntervalArray(value)\n self._check_closed_matches(array, name=\"value\")\n value_left, value_right = array.left, array.right\n except TypeError as err:\n # wrong type: not interval or NA\n msg = f\"'value' should be an interval type, got {type(value)} instead.\"\n raise TypeError(msg) from err\n\n try:\n self.left._validate_fill_value(value_left)\n except (LossySetitemError, TypeError) as err:\n msg = (\n \"'value' should be a compatible interval type, \"\n f\"got {type(value)} instead.\"\n )\n raise TypeError(msg) from err\n\n return value_left, value_right\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 254, "n_words": 73, "vocab_size": 51, "complexity": 3, "nloc": 17, "token_counts": 89, "n_ast_nodes": 166, "n_identifiers": 17, "random_cut": "def _validate_listlike(self, value):\n # list-like of intervals\n try:\n array = Inter" }, { "id": 25940, "commit_id": "f365f77eb90579cb0dcccc24735f82e039b71f62", "repo": "saleor", "path": "saleor/payment/tests/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_create_refund_data_order_lines", "commit_message": "Refactor voucher and shipping amount for payment lines data", "code": "def test_create_refund_data_order_lines(order_with_lines, refund_shipping_costs):\n # given\n order_lines = order_with_lines.lines.all()\n order_refund_lines = [\n OrderLineInfo(line=(line := order_lines[0]), quantity=2, variant=line.variant),\n OrderLineInfo(line=(line := order_lines[1]), quantity=1, variant=line.variant),\n ]\n fulfillment_refund_lines = []\n\n # when\n refund_data = create_refund_data(\n order_with_lines,\n order_refund_lines,\n fulfillment_refund_lines,\n refund_shipping_costs,\n )\n\n # then\n assert refund_data.lines == {\n line.variant_id: line.quantity - refund_line.quantity\n for line, refund_line in zip(order_lines, order_refund_lines)\n }\n assert refund_data.shipping == refund_shipping_costs\n\n\n@pytest.mark.parametrize(\"refund_shipping_costs\", [True, False])", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"refund_shipping_costs\", [True, False])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 149, "n_words": 59, "vocab_size": 49, "complexity": 2, "nloc": 18, "token_counts": 120, "n_ast_nodes": 201, "n_identifiers": 21, "random_cut": "def test_create_refund_data_order_lines(order_with_lines, refund_shipping_costs):\n # given\n order_lines = order_with_lines.lines.all()\n order_refund_lines = [\n OrderLineInfo(line=(line := order_lines[0]), quantity=2, variant=" }, { "id": 113967, "commit_id": "4f36633784a0b0e8a043bbd71d5a1f2ae8f584ed", "repo": "mindsdb", "path": "mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", "file_name": "mysql_proxy.py", "fun_name": "answer_create_predictor", "commit_message": "cast column names on create predictor", "code": "def answer_create_predictor(self, statement):\n struct = {\n 'predictor_name': statement.name.parts[-1],\n 'integration_name': statement.integration_name.parts[-1],\n 'select': statement.query_str,\n 'predict': [x.parts[-1] for x in statement.targets]\n }\n if len(struct['predict']) > 1:\n raise Exception(\"Only one field can be in 'PREDICT'\")\n if statement.using is not None:\n struct['using'] = statement.using\n if statement.datasource_name is not None:\n struct['datasource_name'] = statement.datasource_name.parts[-1]\n if statement.order_by is not None:\n struct['order_by'] = [x.field.parts[-1] for x in statement.order_by]\n if len(struct['order_by']) > 1:\n raise Exception(\"Only one field can be in 'OPRDER BY'\")\n if statement.group_by is not None:\n struct['group_by'] = [x.parts[-1] for x in statement.group_by]\n if statement.window is not None:\n struct['window'] = statement.window\n if statement.horizon is not None:\n struct['horizon'] = statement.horizon\n\n model_interface = self.session.model_interface\n data_store = self.session.data_store\n\n predictor_name = struct['predictor_name']\n integration_name = struct['integration_name']\n\n if integration_name.lower().startswith('datasource.'):\n ds_name = integration_name[integration_name.find('.') + 1:]\n ds = data_store.get_datasource_obj(ds_name, raw=True)\n ds_data = data_store.get_datasource(ds_name)\n else:\n if self.session.datasource_interface.get_db_integration(integration_name) is None:\n raise Exception(f\"Unknown integration: {integration_name}\")\n\n ds_name = struct.get('datasource_name')\n if ds_name is None:\n ds_name = data_store.get_vacant_name(predictor_name)\n\n ds = data_store.save_datasource(ds_name, integration_name, {'query': struct['select']})\n ds_data = data_store.get_datasource(ds_name)\n\n timeseries_settings = {}\n for w in ['order_by', 'group_by', 'window', 'horizon']:\n if w in struct:\n timeseries_settings[w] = struct.get(w)\n\n kwargs = struct.get('using', {})\n if len(timeseries_settings) > 0:\n if 'timeseries_settings' not in kwargs:\n kwargs['timeseries_settings'] = timeseries_settings\n else:\n if isinstance(kwargs.get('timeseries_settings'), str):\n kwargs['timeseries_settings'] = json.loads(kwargs['timeseries_settings'])\n kwargs['timeseries_settings'].update(timeseries_settings)\n\n ds_column_names = [x['name'] for x in ds_data['columns']]\n try:\n predict = self._check_predict_columns(struct['predict'], ds_column_names)\n except Exception:\n data_store.delete_datasource(ds_name)\n raise\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 774, "n_words": 211, "vocab_size": 113, "complexity": 28, "nloc": 71, "token_counts": 659, "n_ast_nodes": 837, "n_identifiers": 49, "random_cut": "def answer_create_predictor(self, statement):\n struct = {\n 'predictor_name': statement.name.parts[-1],\n 'integration_name': statement.integration_name.parts[-1],\n 'select': statement.query_str,\n 'predict': [x.parts[-1] for x in statement.targets]\n }\n if len(struct['predict']) > 1:\n raise Exception(\"Only one field can be in 'PREDICT'\")\n if statement.using is not None:\n struct['using'] = statement.using\n if statement.datasource_name is not None:\n struct['datasource_name'] = statement.datasource_name.parts[-1]\n if statement.order_by is not None:\n struct['order_by'] = [x.field.parts[-1] for x in statement.order_by]\n if len(struct['order_by']) > 1:\n raise Exception(\"Only one field can be in 'OPRDER BY'\")\n if statemen" }, { "id": 201790, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/sqlite/tests.py", "file_name": "tests.py", "fun_name": "test_distinct_aggregation_multiple_args_no_distinct", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_distinct_aggregation_multiple_args_no_distinct(self):\n # Aggregate functions accept multiple arguments when DISTINCT isn't\n # used, e.g. GROUP_CONCAT().", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 4, "n_whitespaces": 28, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 34, "n_ast_nodes": 11, "n_identifiers": 2, "random_cut": "def test_distinct_aggregation_multiple_args_no_distinct(self):\n # Aggregate" }, { "id": 2049, "commit_id": "5724f9c7db0a840c6c209d1d02d03c24ddf59d35", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/node_service/tff/tff_service.py", "file_name": "tff_service.py", "fun_name": "aux_recursive_od2d", "commit_message": "fixed security tests", "code": "def aux_recursive_od2d(dit):\n new_dict = {}\n for key in dit:\n if type(dit[key]) == collections.OrderedDict:\n new_elem = aux_recursive_od2d(dit[key])\n new_dict[key] = new_elem\n else:\n new_dict[key] = dit[key]\n return new_dict\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 80, "n_words": 25, "vocab_size": 19, "complexity": 3, "nloc": 9, "token_counts": 55, "n_ast_nodes": 86, "n_identifiers": 8, "random_cut": "def aux_recursive_od2d(dit):\n new_dict = {}\n for key in dit:\n if type(dit[key]) == collections.OrderedDict:\n new_elem = aux_recursive_od2d(dit[key])\n new_dict[key] = new_elem\n " }, { "id": 124070, "commit_id": "ac831fded416381ad3c7fe2ba135eaa4aaab5879", "repo": "ray", "path": "python/ray/tune/examples/cifar10_pytorch.py", "file_name": "cifar10_pytorch.py", "fun_name": "train_cifar", "commit_message": "[air] update documentation to use `session.report` (#26051)\n\nUpdate documentation to use `session.report`.\r\n\r\nNext steps:\r\n1. Update our internal caller to use `session.report`. Most importantly, CheckpointManager and DataParallelTrainer.\r\n2. Update `get_trial_resources` to use PGF notions to incorporate the requirement of ResourceChangingScheduler. @Yard1 \r\n3. After 2 is done, change all `tune.get_trial_resources` to `session.get_trial_resources`\r\n4. [internal implementation] remove special checkpoint handling logic from huggingface trainer. Optimize the flow for checkpoint conversion with `session.report`.\r\n\r\nCo-authored-by: Antoni Baum ", "code": "def train_cifar(config):\n net = Net(config[\"l1\"], config[\"l2\"])\n\n device = \"cpu\"\n if torch.cuda.is_available():\n device = \"cuda:0\"\n if torch.cuda.device_count() > 1:\n net = nn.DataParallel(net)\n net.to(device)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=config[\"lr\"], momentum=0.9)\n\n # Load existing checkpoint through `session.get_checkpoint()` API.\n if session.get_checkpoint():\n loaded_checkpoint = session.get_checkpoint()\n with loaded_checkpoint.as_directory() as loaded_checkpoint_dir:\n model_state, optimizer_state = torch.load(os.path.join(loaded_checkpoint_dir, \"checkpoint.pt\"))\n net.load_state_dict(model_state)\n optimizer.load_state_dict(optimizer_state)\n\n data_dir = os.path.abspath(\"./data\")\n trainset, testset = load_data(data_dir)\n\n test_abs = int(len(trainset) * 0.8)\n train_subset, val_subset = random_split(\n trainset, [test_abs, len(trainset) - test_abs])\n\n trainloader = torch.utils.data.DataLoader(\n train_subset,\n batch_size=int(config[\"batch_size\"]),\n shuffle=True,\n num_workers=8)\n valloader = torch.utils.data.DataLoader(\n val_subset,\n batch_size=int(config[\"batch_size\"]),\n shuffle=True,\n num_workers=8)\n\n for epoch in range(10): # loop over the dataset multiple times\n running_loss = 0.0\n epoch_steps = 0\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n epoch_steps += 1\n if i % 2000 == 1999: # print every 2000 mini-batches\n print(\"[%d, %5d] loss: %.3f\" % (epoch + 1, i + 1,\n running_loss / epoch_steps))\n running_loss = 0.0\n\n # Validation loss\n val_loss = 0.0\n val_steps = 0\n total = 0\n correct = 0\n for i, data in enumerate(valloader, 0):\n with torch.no_grad():\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n loss = criterion(outputs, labels)\n val_loss += loss.cpu().numpy()\n val_steps += 1\n\n # Here we save a checkpoint. It is automatically registered with\n # Ray Tune and will potentially be accessed through in ``session.get_checkpoint()``\n # in future iterations.\n # Note to save a file like checkpoint, you still need to put it under a directory\n # to construct an AIR checkpoint.\n os.makedirs(\"my_model\", exist_ok=True) # ok to overwrite the previous one.\n path = os.path.join(\"my_model\", \"checkpoint.pt\")\n torch.save(\n (net.state_dict(), optimizer.state_dict()), path)\n checkpoint = Checkpoint.from_directory(\"my_model\")\n session.report({\"loss\": (val_loss / val_steps), \"accuracy\": correct / total}, checkpoint=checkpoint)\n print(\"Finished Training\")\n# __train_end__\n\n\n# __test_acc_begin__", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1035, "n_words": 331, "vocab_size": 201, "complexity": 8, "nloc": 70, "token_counts": 572, "n_ast_nodes": 935, "n_identifiers": 86, "random_cut": "def train_cifar(config):\n net = Net(config[\"l1\"], config[\"l2\"])\n\n device = \"cpu\"\n if torch.cuda.is_available():\n device = \"cuda:0\"\n if torch.cuda.device_count() > 1:\n net = nn.DataParallel(net)\n net.to(device)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=config[\"lr\"], momentum=0.9)\n\n # Load existing checkpoint through `session.get_checkpoint()` API.\n if session.get_checkpoint():\n loaded_checkpoint = session.get_checkpoint()\n with loaded_checkpoint.as_directory() as loaded_checkpoint_dir:\n model_state, optimizer_state = torch.load(os.path.join(loaded_checkpoint_dir, \"checkpoint.pt\"))\n net.load_state_dict(model_state)\n optimizer.load_state_dict(optimizer_state)\n\n data_dir = os.path.abspath(\"./data\")\n trainset, testset = load_data(data_dir)\n\n test_abs = int(len(trainset) * 0.8)\n train_subset, val_subset = random_split(\n trainset, [test_abs, len(trainset) - test_abs])\n\n trainloader = torch.utils.data.DataLoader(\n train_subset,\n batch_size=int(config[\"batch_size\"]),\n shuffle=True,\n num_workers=8)\n valloader = torch.utils.data.DataLoader(\n val_subset,\n batch_size=int(config[\"batch_size\"]),\n shuffle=True,\n num_workers=8)\n\n for epoch in range(10): # loop over the dataset multiple times\n running_loss = 0.0\n epoch_steps = 0\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n " }, { "id": 32004, "commit_id": "49cd736a288a315d741e5c337790effa4c9fa689", "repo": "transformers", "path": "tests/pipelines/test_pipelines_common.py", "file_name": "test_pipelines_common.py", "fun_name": "test_register_pipeline", "commit_message": "feat: add pipeline registry abstraction (#17905)\n\n* feat: add pipeline registry abstraction\r\n\r\n- added `PipelineRegistry` abstraction\r\n- updates `add_new_pipeline.mdx` (english docs) to reflect the api addition\r\n- migrate `check_task` and `get_supported_tasks` from\r\n transformers/pipelines/__init__.py to\r\n transformers/pipelines/base.py#PipelineRegistry.{check_task,get_supported_tasks}\r\n\r\nSigned-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com>\r\n\r\n* fix: update with upstream/main\r\n\r\nchore: Apply suggestions from sgugger's code review\r\n\r\nSigned-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com>\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* chore: PR updates\r\n\r\n- revert src/transformers/dependency_versions_table.py from upstream/main\r\n- updates pipeline registry to use global variables\r\n\r\nSigned-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com>\r\n\r\n* tests: add tests for pipeline registry\r\n\r\nSigned-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com>\r\n\r\n* tests: add test for output warning.\r\n\r\nSigned-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com>\r\n\r\n* chore: fmt and cleanup unused imports\r\n\r\nSigned-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com>\r\n\r\n* fix: change imports to top of the file and address comments\r\n\r\nSigned-off-by: Aaron Pham <29749331+aarnphm@users.noreply.github.com>\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def test_register_pipeline(self):\n custom_text_classification = {\n \"impl\": CustomPipeline,\n \"tf\": (),\n \"pt\": (AutoModelForSequenceClassification,),\n \"default\": {\"model\": {\"pt\": \"hf-internal-testing/tiny-random-distilbert\"}},\n \"type\": \"text\",\n }\n PIPELINE_REGISTRY.register_pipeline(\"custom-text-classification\", custom_text_classification)\n assert \"custom-text-classification\" in PIPELINE_REGISTRY.get_supported_tasks()\n\n task_def, _ = PIPELINE_REGISTRY.check_task(\"custom-text-classification\")\n self.assertEqual(task_def, custom_text_classification)\n self.assertEqual(task_def[\"type\"], \"text\")\n self.assertEqual(task_def[\"impl\"], CustomPipeline)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 144, "n_words": 34, "vocab_size": 32, "complexity": 1, "nloc": 14, "token_counts": 97, "n_ast_nodes": 174, "n_identifiers": 12, "random_cut": "def test_register_pipeline(self):\n custom_text_classification = {\n \"impl\": CustomPipeline,\n \"tf\": (),\n \"pt\": (AutoModelForSequenceClassification,),\n \"default\": {\"model\": {\"pt\": \"hf-internal-testing/tiny-random-d" }, { "id": 4089, "commit_id": "16dfae0df7290392e5f0754731ae53ae7b7f939d", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-chargebee/source_chargebee/source.py", "file_name": "source.py", "fun_name": "streams", "commit_message": ":tada: Source Chargebee: add credit note model (#10795)\n\n* feat(chargebee) add credit note model\r\n\r\n* fix(airbyte): update version Dockerfile\r\n\r\n* fix(airbyte): update version Dockerfile v2", "code": "def streams(self, config) -> List[Stream]:\n # Configure the Chargebee Python SDK\n chargebee.configure(api_key=config[\"site_api_key\"], site=config[\"site\"])\n\n kwargs = {\"start_date\": config[\"start_date\"]}\n product_catalog_version = config[\"product_catalog\"]\n\n # Below streams are suitable for both `Product Catalog 1.0` and `Product Catalog 2.0`.\n common_streams = [\n Coupon(**kwargs),\n CreditNote(**kwargs),\n Customer(**kwargs),\n Event(**kwargs),\n Invoice(**kwargs),\n Order(**kwargs),\n Subscription(**kwargs),\n Transaction(**kwargs),\n ]\n\n if product_catalog_version == \"1.0\":\n # Below streams are suitable only for `Product Catalog 1.0`.\n product_catalog_v1_streams = [\n Addon(**kwargs),\n Plan(**kwargs),\n ]\n return common_streams + product_catalog_v1_streams\n\n # Below streams are suitable only for `Product Catalog 2.0`.\n product_catalog_v2_streams = [\n Item(**kwargs),\n ItemPrice(**kwargs),\n AttachedItem(**kwargs),\n ]\n return common_streams + product_catalog_v2_streams\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 369, "n_words": 91, "vocab_size": 55, "complexity": 2, "nloc": 26, "token_counts": 149, "n_ast_nodes": 242, "n_identifiers": 27, "random_cut": "def streams(self, config) -> List[Stream]:\n # Configure the Chargebee Python SDK\n chargebee.configure(api_key=config[\"site_api_key\"], site=config[\"site\"])\n\n kwargs = {\"start_date\": config[\"start_date\"]}\n product_catalog_version = config[\"product_catalog\"]\n\n # Below streams are suitable for both `Product Catalog 1.0` and `Product Catalog 2.0`.\n common_streams = [\n Coupon(**kwargs),\n CreditNote(**kwargs),\n Customer(**kwargs),\n Event(**kwargs),\n Invoice(**kwargs),\n Order(**kwargs),\n Subscription(**kwargs),\n Transaction(**kwargs),\n ]\n\n if product_catalog_version == \"1.0\":\n # Below streams are suitable only for `Product Catalog 1.0`.\n product_catalog_v1_s" }, { "id": 28352, "commit_id": "8201efcde2d7aacccf3512c544cceea6780a0598", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscriptions_payments.py", "file_name": "test_create_deliveries_for_subscriptions_payments.py", "fun_name": "test_payment_confirm", "commit_message": "GraphQL subscription support for synchronous webhook events (#9763)\n\n* WIP add sync webhooks subscription payload handling\r\n\r\n* add tests, fix minor things\r\n\r\n* update schema\r\n\r\n* remove unneeded code\r\n\r\n* add fix for circular field resolve\r\n\r\n* fix-filter-shipping-methods-payload\r\n\r\n* added_in added to desription\r\n\r\n* add missing types\r\n\r\n* revert refactor, precommit issues\r\n\r\n* fixes after review\r\n\r\n* cosmetix fixes post-review\r\n\r\n* subscription types description fixes\r\n\r\n* remove unneeded description from PaymentBase\r\n\r\n* add validation for creating webhook with two top level fields, add tests for shippingListMethodsForCheckout\r\n\r\n* add docstring, refactor prevent_sync_event_circular_wuery wrapper\r\n\r\n* fix docstring of revent_sync_event_circular_query\r\n\r\n* fix linters", "code": "def test_payment_confirm(payment, subscription_payment_confirm_webhook):\n # given\n webhooks = [subscription_payment_confirm_webhook]\n event_type = WebhookEventSyncType.PAYMENT_CONFIRM\n\n # when\n deliveries = create_deliveries_for_subscriptions(event_type, payment, webhooks)\n\n # then\n expected_payload = generate_payment_payload(payment)\n assert json.loads(deliveries[0].payload.payload) == expected_payload\n assert len(deliveries) == len(webhooks)\n assert deliveries[0].webhook == webhooks[0]\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 35, "vocab_size": 25, "complexity": 1, "nloc": 8, "token_counts": 71, "n_ast_nodes": 111, "n_identifiers": 16, "random_cut": "def test_payment_confirm(payment, subscription_payment_confirm_webhook):\n # given\n webhooks = [subscription_payment_confirm_webhoo" }, { "id": 215070, "commit_id": "fae21e4698d9bb45a407345e7dff5ce3b69f799d", "repo": "salt", "path": "tests/pytests/unit/state/test_state_compiler.py", "file_name": "test_state_compiler.py", "fun_name": "test_render_requisite_require_disabled", "commit_message": "Fix test cases with PermissionError on /var/cache/salt\n\nWhen running the test cases without root permission, some test cases fail:\n\n```\n$ python3 -m pytest -ra tests/pytests/unit/state/test_state_compiler.py tests/pytests/unit/test_minion.py\n[...]\nFAILED tests/pytests/unit/state/test_state_compiler.py::test_render_requisite_require_disabled - PermissionError: [Errno 13] Permission denied: '/var/cache/salt'\nFAILED tests/pytests/unit/state/test_state_compiler.py::test_render_requisite_require_in_disabled - PermissionError: [Errno 13] Permission denied: '/var/cache/salt'\nFAILED tests/pytests/unit/test_minion.py::test_minion_module_refresh - PermissionError: [Errno 13] Permission denied: '/var/cache/salt'\nFAILED tests/pytests/unit/test_minion.py::test_minion_module_refresh_beacons_refresh - PermissionError: [Errno 13] Permission denied: '/var/cache/salt'\n```\n\nFix these test cases by using a temporary directory as cache directory.\n\nSigned-off-by: Benjamin Drung ", "code": "def test_render_requisite_require_disabled(tmp_path):\n \n with patch(\"salt.state.State._gather_pillar\") as state_patch:\n high_data = {\n \"step_one\": OrderedDict(\n [\n (\n \"test\",\n [\n OrderedDict(\n [(\"require\", [OrderedDict([(\"test\", \"step_two\")])])]\n ),\n \"succeed_with_changes\",\n {\"order\": 10000},\n ],\n ),\n (\"__sls__\", \"test.disable_require\"),\n (\"__env__\", \"base\"),\n ]\n ),\n \"step_two\": {\n \"test\": [\"succeed_with_changes\", {\"order\": 10001}],\n \"__env__\": \"base\",\n \"__sls__\": \"test.disable_require\",\n },\n }\n\n minion_opts = salt.config.DEFAULT_MINION_OPTS.copy()\n minion_opts[\"cachedir\"] = str(tmp_path)\n minion_opts[\"disabled_requisites\"] = [\"require\"]\n state_obj = salt.state.State(minion_opts)\n ret = state_obj.call_high(high_data)\n run_num = ret[\"test_|-step_one_|-step_one_|-succeed_with_changes\"][\n \"__run_num__\"\n ]\n assert run_num == 0\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 25, "n_whitespaces": 557, "n_words": 67, "vocab_size": 53, "complexity": 1, "nloc": 34, "token_counts": 160, "n_ast_nodes": 288, "n_identifiers": 18, "random_cut": "def test_render_requisite_require_disabled(tmp_path):\n \n with patch(\"salt.state.State._gather_pillar\") as state_patch:\n high_data = {\n \"step_one\": OrderedDict(\n [\n (\n \"test\",\n [\n OrderedDict(\n [(\"require\", [OrderedDict([(\"test\", \"step_two\")])])]\n ),\n \"succeed_w" }, { "id": 204451, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/checks/security/sessions.py", "file_name": "sessions.py", "fun_name": "_session_middleware", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _session_middleware():\n return \"django.contrib.sessions.middleware.SessionMiddleware\" in settings.MIDDLEWARE\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 8, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "random_cut": "def _session_middleware():\n return \"d" }, { "id": 115854, "commit_id": "74977c69effc89a56080357449d5d337988daab7", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py", "file_name": "test_lightwood_handler.py", "fun_name": "test_13_train_ts_predictor_no_gby_hor1", "commit_message": "TS tests pass", "code": "def test_13_train_ts_predictor_no_gby_hor1(self):\n query = f\n if self.test_model_2 not in self.handler.get_tables().data_frame.values:\n self.handler.native_query(query)\n else:\n self.handler.native_query(f\"DROP PREDICTOR {self.test_model_2}\")\n self.handler.native_query(query)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 69, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 14, "token_counts": 54, "n_ast_nodes": 114, "n_identifiers": 11, "random_cut": "def test_13_train_ts_predictor_no_gby_hor1(self):\n query = f\n if self.test_" }, { "id": 166205, "commit_id": "90140f055892a46f473bd26affab88a7f171e394", "repo": "pandas", "path": "pandas/core/exchange/column.py", "file_name": "column.py", "fun_name": "_get_offsets_buffer", "commit_message": "ENH: Implement DataFrame interchange protocol (#46141)", "code": "def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]:\n \n if self.dtype[0] == DtypeKind.STRING:\n # For each string, we need to manually determine the next offset\n values = self._col.to_numpy()\n ptr = 0\n offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64)\n for i, v in enumerate(values):\n # For missing values (in this case, `np.nan` values)\n # we don't increment the pointer\n if isinstance(v, str):\n b = v.encode(encoding=\"utf-8\")\n ptr += len(b)\n\n offsets[i + 1] = ptr\n\n # Convert the offsets to a Pandas \"buffer\" using\n # the NumPy array as the backing store\n buffer = PandasBuffer(offsets)\n\n # Assemble the buffer dtype info\n dtype = (\n DtypeKind.INT,\n 64,\n ArrowCTypes.INT64,\n Endianness.NATIVE,\n ) # note: currently only support native endianness\n else:\n raise NoBufferPresent(\n \"This column has a fixed-length dtype so \"\n \"it does not have an offsets buffer\"\n )\n\n return buffer, dtype\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 490, "n_words": 130, "vocab_size": 97, "complexity": 4, "nloc": 29, "token_counts": 139, "n_ast_nodes": 228, "n_identifiers": 33, "random_cut": "def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]:\n \n if self.dtype[0] == DtypeKind.STRING:\n # For each string, we need to manually determine the next offset\n values = self._col.to_numpy()\n ptr = 0\n offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64)\n for i, v in enumerate(values):\n # For missing values (in this case, `np.nan` values)\n # we don't increment the pointer\n if isinstance(v, str):\n b = v.encode(encoding=\"utf-8\")\n ptr += len(b)\n\n offsets[i + 1] = ptr\n\n # Convert the offsets to a Pandas \"buffer\" using\n # the NumPy array as the backing store\n buffer = PandasBuffer(offsets)\n\n # Assemble the buffer dtype info\n dtype = (\n DtypeKind.INT,\n 64,\n ArrowCTypes.INT64,\n Endianness.NATIVE,\n ) # note: currently only support native endianness\n else:\n raise NoBufferPresent(\n \"This column has a fixed-length dtype so \"\n \"it does not have an offsets buffer\"\n )\n\n return buffer, dtype\n" }, { "id": 137777, "commit_id": "8e680c483ce326cefc62e44f68ab1a6948b1c3d2", "repo": "ray", "path": "rllib/evaluate.py", "file_name": "evaluate.py", "fun_name": "append_step", "commit_message": "[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)", "code": "def append_step(self, obs, action, next_obs, reward, terminated, truncated, info):\n \n if self._outfile:\n if self._save_info:\n self._current_rollout.append(\n [obs, action, next_obs, reward, terminated, truncated, info]\n )\n else:\n self._current_rollout.append(\n [obs, action, next_obs, reward, terminated, truncated]\n )\n self._total_steps += 1\n\n\n@eval_app.command()", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@eval_app.command()", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 175, "n_words": 35, "vocab_size": 22, "complexity": 3, "nloc": 11, "token_counts": 79, "n_ast_nodes": 120, "n_identifiers": 16, "random_cut": "def append_step(self, obs, action, next_obs, reward, terminated, truncated, info):\n \n if self._outfile:\n if self._save_info:\n self._current_rollout.append(\n [obs, action, next_obs, reward, terminated, truncated, info]\n )\n else:\n self._cur" }, { "id": 203991, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/gdal/libgdal.py", "file_name": "libgdal.py", "fun_name": "err_handler", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def err_handler(error_class, error_number, message):\n logger.error(\"GDAL_ERROR %d: %s\", error_number, message)\n\n\nerr_handler = CPLErrorHandler(err_handler)\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 39, "n_identifiers": 7, "random_cut": "def err_handler(error_class, error_number, message):\n logger.error(\"GDAL_ERROR %d: %s\", error_number, messa" }, { "id": 133783, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/pg/pg.py", "file_name": "pg.py", "fun_name": "get_default_policy_class", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_default_policy_class(self, config) -> Type[Policy]:\n return PGTorchPolicy if config.get(\"framework\") == \"torch\" else PGTFPolicy\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 19, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 2, "token_counts": 25, "n_ast_nodes": 41, "n_identifiers": 8, "random_cut": "def get_default_policy_class(self, config) -> Type[Policy]:\n return PGTorchPolicy if config.get(\"framework\") == \"torch\" else PGTFPolicy\n" }, { "id": 11806, "commit_id": "9bdefb0a693c4556e0902ef913f4426e3ef5ff65", "repo": "jina", "path": "jina/orchestrate/flow/base.py", "file_name": "base.py", "fun_name": "_get_address_table", "commit_message": "fix: properly disable graphql endpoint by default (#4471)", "code": "def _get_address_table(self, address_table):\n address_table.add_row('🔗', 'Protocol: ', f'{self.protocol}')\n address_table.add_row(\n '🏠',\n 'Local access: ',\n f'[underline]{self.host}:{self.port}[/underline]',\n )\n address_table.add_row(\n '🔒',\n 'Private network: ',\n f'[underline]{self.address_private}:{self.port}[/underline]',\n )\n\n if self.address_public:\n address_table.add_row(\n '🌐',\n 'Public address: ',\n f'[underline]{self.address_public}:{self.port}[/underline]',\n )\n\n if self.protocol == GatewayProtocolType.HTTP:\n address_table.add_row(\n '💬',\n 'Swagger UI: ',\n f'[underline]http://localhost:{self.port}/docs[/underline]',\n )\n\n address_table.add_row(\n '📚',\n 'Redoc: ',\n f'[underline]http://localhost:{self.port}/redoc[/underline]',\n )\n if self.args.expose_graphql_endpoint:\n address_table.add_row(\n '💬',\n 'GraphQL UI: ',\n f'[underline][cyan]http://localhost:{self.port}/graphql[/underline][/cyan]',\n )\n\n return address_table\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 478, "n_words": 58, "vocab_size": 38, "complexity": 4, "nloc": 36, "token_counts": 113, "n_ast_nodes": 263, "n_identifiers": 13, "random_cut": "def _get_address_table(self, address_table):\n address_table.add_row('🔗', 'Protocol: ', f'{self.protocol}')\n address_table.add_row(\n '🏠',\n 'Local access: ',\n f'[underline]{self.host}:{self.port}[/underline]',\n )\n address_table.add_row(\n '🔒',\n 'Private network: ',\n f'[underline]{self.address_private}:{self.port}[/underline]',\n )\n\n if self.address_public:\n address_table.add_row(\n '🌐',\n 'Public address: ',\n f'[underline]{self.address_public}:{self.port}[/underline]',\n )\n\n if self.protocol == GatewayProtocolType.HTTP:\n address_table.add_row(\n '💬',\n 'Swagger UI: ',\n f'[underline]http://localhost:{self.port}/docs[/underline]',\n )\n\n address_table.add_row(\n '📚',\n 'Redoc: ',\n f'[underline]http://localhost:{self.port}/redoc[/underline]',\n )\n if self.args.expose_graphql_e" }, { "id": 191954, "commit_id": "3e79d149a16e9fc9b9b6747609615cf8f8607346", "repo": "vision", "path": "test/test_prototype_builtin_datasets.py", "file_name": "test_prototype_builtin_datasets.py", "fun_name": "test_sample", "commit_message": "refactor prototype datasets tests (#5136)\n\n* refactor prototype datasets tests\r\n\r\n* skip tests with insufficient third party dependencies", "code": "def test_sample(self, dataset_mock, config):\n dataset, _ = dataset_mock.load(config)\n\n try:\n sample = next(iter(dataset))\n except Exception as error:\n raise AssertionError(\"Drawing a sample raised the error above.\") from error\n\n if not isinstance(sample, dict):\n raise AssertionError(f\"Samples should be dictionaries, but got {type(sample)} instead.\")\n\n if not sample:\n raise AssertionError(\"Sample dictionary is empty.\")\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 125, "n_words": 47, "vocab_size": 40, "complexity": 4, "nloc": 10, "token_counts": 66, "n_ast_nodes": 119, "n_identifiers": 16, "random_cut": "def test_sample(self, dataset_mock, config):\n dataset, _ = dataset_mock.load(config)\n\n try:\n sample = next(iter(dataset))\n except Exception as error:\n raise AssertionError(\"Drawing a sample raised the error above.\") from error\n\n if not isinstance(sample, dict):\n " }, { "id": 292279, "commit_id": "3bf2be1765f7a33fbce06cbabeb2e2115f2f07c7", "repo": "core", "path": "tests/components/http/test_init.py", "file_name": "test_init.py", "fun_name": "_setup_broken_ssl_pem_files", "commit_message": "Startup with an emergency self signed cert if the ssl certificate cannot be loaded (#66707)", "code": "def _setup_broken_ssl_pem_files(tmpdir):\n test_dir = tmpdir.mkdir(\"test_broken_ssl\")\n cert_path = pathlib.Path(test_dir) / \"cert.pem\"\n cert_path.write_text(\"garbage\")\n key_path = pathlib.Path(test_dir) / \"key.pem\"\n key_path.write_text(\"garbage\")\n return cert_path, key_path\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 20, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 49, "n_ast_nodes": 90, "n_identifiers": 9, "random_cut": "def _setup_broken_ssl_pem_files(tmpdir):\n test_dir = tmpdir.mkdir(\"test_broken_ssl\")\n cert_path = pathlib.Path(test_dir) / \"cert.pem\"\n cert_path.write_text(\"garbage\")\n key_path = pathlib.Path(test_dir) / \"key.pem\"\n key_path.write_text(\"garbage\")\n return cert_path, key_path\n\n" }, { "id": 59097, "commit_id": "852644aa77ce2a6377e070ed0182859b0fdd0b98", "repo": "prefect", "path": "src/prefect/client/orion.py", "file_name": "orion.py", "fun_name": "api_healthcheck", "commit_message": "Improve client stability (#7090)", "code": "async def api_healthcheck(self) -> Optional[Exception]:\n \n try:\n await self._client.get(\"/health\")\n return None\n except Exception as exc:\n return exc\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 70, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 12, "token_counts": 31, "n_ast_nodes": 57, "n_identifiers": 7, "random_cut": "async def api_healthcheck(self) -> Optional[Exception]:\n \n try:\n await self._client.get(\"/health\")\n return None\n except Exception as exc:\n return exc\n" }, { "id": 7477, "commit_id": "15961c07d8a0d7fc25c5b7395b11f191eab840bd", "repo": "ludwig", "path": "ludwig/backend/ray.py", "file_name": "ray.py", "fun_name": "get_resources_per_worker", "commit_message": "Respect the resource requests in RayPredictor (#2359)\n\n* fix ordering to respect the passed down resources\r\n\r\n* clean up", "code": "def get_resources_per_worker(self) -> Tuple[int, int]:\n trainer_kwargs = self.get_trainer_kwargs()\n resources_per_worker = trainer_kwargs.get(\"resources_per_worker\", {})\n num_gpus = resources_per_worker.get(\"GPU\", 0)\n num_cpus = resources_per_worker.get(\"CPU\", (1 if num_gpus == 0 else 0))\n return num_cpus, num_gpus\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 29, "vocab_size": 24, "complexity": 2, "nloc": 6, "token_counts": 62, "n_ast_nodes": 100, "n_identifiers": 10, "random_cut": "def get_resources_per_worker(self) -> Tuple[int, int]:\n trainer_kwargs = self.get_trainer_kwargs()\n resources_per_worker = trainer_kwargs.get(\"resources_per_worker\", {})\n num_gpus = resources_per_worker.get(\"GPU\", 0)\n num_cpus = resources_per_worker.get(\"CPU\", (1 if num_g" }, { "id": 282658, "commit_id": "fae93c67adc9015c1466712f9c8ffa35a8b70872", "repo": "OpenBBTerminal", "path": "bots/stocks/technical_analysis/aroon.py", "file_name": "aroon.py", "fun_name": "aroon_command", "commit_message": "Refactor Bot (#1326)\n\n* First commit\r\n\r\n* Renamed to bot\r\n\r\n* change from discordbot to bots imports\r\n\r\n* refractor cmds\r\n\r\n+ autocomplete helpers\r\n+ updated dps plots/candle\r\n+ quote cmd\r\n\r\n* isort fix\r\n\r\n* Update helpers.py\r\n\r\n* Added first refactor\r\n\r\n* Added commands and refactor\r\n\r\n* Added description handler\r\n\r\n* Through grains\r\n\r\n* ta + image border helper\r\n\r\n* Added indices\r\n\r\n* merged\r\n\r\n* Finished econ\r\n\r\n* options refactor\r\n\r\n* dark_pool_shorts, candle, quote done\r\n\r\n* pagination/dropdown logic to helpers + fixes\r\n\r\n* dd/disc/insider done + add image logic to helpers\r\n\r\n* linting\r\n\r\n* removed plt style\r\n\r\n* Almost done with Gov\r\n\r\n* fixing conflicts\r\n\r\n* Finished gov (excpect ones mentioned)\r\n\r\n* screener done\r\n\r\n* Test bug refactor\r\n\r\n* Store changes to switch\r\n\r\n* Finished refactor\r\n\r\n* Fixed error\r\n\r\n* Delete ::40\r\n\r\n* Fixed issue\r\n\r\n* Fixed black\r\n\r\n* finished\r\n\r\n* fixes\r\n\r\nCo-authored-by: teh_coderer \r\nCo-authored-by: didierlopes.eth ", "code": "def aroon_command(ticker=\"\", length=\"25\", scalar=\"100\", start=\"\", end=\"\"):\n \n\n # Debug\n if cfg.DEBUG:\n logger.debug(\n \"ta-aroon %s %s %s %s %s\",\n ticker,\n length,\n scalar,\n start,\n end,\n )\n\n # Check for argument\n if ticker == \"\":\n raise Exception(\"Stock ticker is required\")\n\n if start == \"\":\n start = datetime.now() - timedelta(days=365)\n else:\n start = datetime.strptime(start, cfg.DATE_FORMAT)\n\n if end == \"\":\n end = datetime.now()\n else:\n end = datetime.strptime(end, cfg.DATE_FORMAT)\n\n if not length.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n length = int(length)\n if not scalar.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n scalar = float(scalar)\n\n ticker = ticker.upper()\n df_stock = helpers.load(ticker, start)\n if df_stock.empty:\n raise Exception(\"Stock ticker is invalid\")\n\n # Retrieve Data\n df_stock = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]\n\n df_ta = trend_indicators_model.aroon(\n df_stock[\"High\"], df_stock[\"Low\"], length, scalar\n )\n\n # Output Data\n aadown = df_ta.columns[0].replace(\"_\", \" \")\n aaup = df_ta.columns[1].replace(\"_\", \" \")\n aaosc = df_ta.columns[2].replace(\"_\", \" \")\n\n fig = make_subplots(\n rows=3,\n cols=1,\n shared_xaxes=True,\n vertical_spacing=0.07,\n row_width=[0.2, 0.2, 0.2],\n )\n fig.add_trace(\n go.Scatter(\n name=ticker,\n x=df_stock.index,\n y=df_stock[\"Adj Close\"].values,\n line=dict(color=\"#fdc708\", width=2),\n opacity=1,\n showlegend=False,\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n name=f\"Aroon DOWN ({aadown})\",\n x=df_ta.index,\n y=df_ta.iloc[:, 0].values,\n opacity=1,\n ),\n row=2,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n name=f\"Aroon UP ({aaup})\",\n x=df_ta.index,\n y=df_ta.iloc[:, 1].values,\n opacity=1,\n ),\n row=2,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n name=f\"Aroon OSC ({aaosc})\",\n x=df_ta.index,\n y=df_ta.iloc[:, 2].values,\n opacity=1,\n ),\n row=3,\n col=1,\n )\n fig.add_hline(\n y=50,\n fillcolor=\"grey\",\n opacity=1,\n layer=\"below\",\n line_width=3,\n line=dict(color=\"grey\", dash=\"dash\"),\n row=2,\n col=1,\n )\n fig.update_layout(\n margin=dict(l=0, r=20, t=30, b=20),\n template=cfg.PLT_TA_STYLE_TEMPLATE,\n colorway=cfg.PLT_TA_COLORWAY,\n title=f\"Aroon on {ticker}\",\n title_x=0.5,\n yaxis_title=\"Stock Price ($)\",\n yaxis=dict(\n fixedrange=False,\n ),\n xaxis=dict(\n rangeslider=dict(visible=False),\n type=\"date\",\n ),\n dragmode=\"pan\",\n legend=dict(yanchor=\"top\", y=0.99, xanchor=\"left\", x=0.01),\n )\n config = dict({\"scrollZoom\": True})\n imagefile = \"ta_aroon.png\"\n\n # Check if interactive settings are enabled\n plt_link = \"\"\n if cfg.INTERACTIVE:\n html_ran = helpers.uuid_get()\n fig.write_html(f\"in/aroon_{html_ran}.html\", config=config)\n plt_link = f\"[Interactive]({cfg.INTERACTIVE_URL}/aroon_{html_ran}.html)\"\n\n fig.update_layout(\n width=800,\n height=500,\n )\n imagefile = helpers.image_border(imagefile, fig=fig)\n\n return {\n \"title\": f\"Stocks: Aroon-Indicator {ticker}\",\n \"description\": plt_link,\n \"imagefile\": imagefile,\n }\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 1149, "n_words": 288, "vocab_size": 178, "complexity": 9, "nloc": 130, "token_counts": 757, "n_ast_nodes": 1200, "n_identifiers": 97, "random_cut": "def aroon_command(ticker=\"\", length=\"25\", scalar=\"100\", start=\"\", end=\"\"):\n \n\n # Debug\n if cfg.DEBUG:\n logger.debug(\n \"ta-aroon %s %s %s %s %s\",\n ticker,\n length,\n scalar,\n start,\n end,\n )\n\n # Check for argument\n if ticker == \"\":\n raise Exception(\"Stock ticker is required\")\n\n if start == \"\":\n start = datetime.now() - timedelta(days=365)\n else:\n start = datetime.strptime(start, cfg.DATE_FORMAT)\n\n if end == \"\":\n end = datetime.now()\n else:\n end = datetime.strptime(end, cfg.DATE_FORMAT)\n\n if not length.lstrip(\"-\").isnumeric():\n raise Excep" }, { "id": 145616, "commit_id": "0bab8dbfe0b5a58a06dd226950bdd70b0b186655", "repo": "ray", "path": "python/ray/serve/config.py", "file_name": "config.py", "fun_name": "to_proto_bytes", "commit_message": "[Serve] Add test for controller managing Java Replica (#22628)", "code": "def to_proto_bytes(self):\n data = self.dict()\n if data.get(\"user_config\"):\n data[\"user_config\"] = pickle.dumps(data[\"user_config\"])\n if data.get(\"autoscaling_config\"):\n data[\"autoscaling_config\"] = AutoscalingConfigProto(\n **data[\"autoscaling_config\"]\n )\n return DeploymentConfigProto(**data).SerializeToString()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 94, "n_words": 19, "vocab_size": 16, "complexity": 3, "nloc": 9, "token_counts": 65, "n_ast_nodes": 117, "n_identifiers": 10, "random_cut": "def to_proto_bytes(self):\n data = self.dict()\n if data.get(\"user_config\"):\n data[\"user_conf" }, { "id": 306988, "commit_id": "52b5e1779f1ed6e5005dc0bdff4137040d7216fb", "repo": "core", "path": "homeassistant/components/plex/media_player.py", "file_name": "media_player.py", "fun_name": "session", "commit_message": "Use new media player enums [p] (#78058)", "code": "def session(self, session):\n self._session = session\n if session:\n self.session_device = self.session.player\n self.update_state(self.session.state)\n else:\n self._attr_state = MediaPlayerState.IDLE\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 7, "token_counts": 43, "n_ast_nodes": 69, "n_identifiers": 10, "random_cut": "def session(self, session):\n self._session = session\n if session:\n self.session_device = self.session" }, { "id": 222505, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/difflib.py", "file_name": "difflib.py", "fun_name": "get_grouped_opcodes", "commit_message": "add python 3.10.4 for windows", "code": "def get_grouped_opcodes(self, n=3):\n \n\n codes = self.get_opcodes()\n if not codes:\n codes = [(\"equal\", 0, 1, 0, 1)]\n # Fixup leading and trailing groups if they show no changes.\n if codes[0][0] == 'equal':\n tag, i1, i2, j1, j2 = codes[0]\n codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2\n if codes[-1][0] == 'equal':\n tag, i1, i2, j1, j2 = codes[-1]\n codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)\n\n nn = n + n\n group = []\n for tag, i1, i2, j1, j2 in codes:\n # End the current group and start a new one whenever\n # there is a large range with no changes.\n if tag == 'equal' and i2-i1 > nn:\n group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))\n yield group\n group = []\n i1, j1 = max(i1, i2-n), max(j1, j2-n)\n group.append((tag, i1, i2, j1 ,j2))\n if group and not (len(group)==1 and group[0][0] == 'equal'):\n yield group\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 386, "n_words": 146, "vocab_size": 76, "complexity": 10, "nloc": 21, "token_counts": 276, "n_ast_nodes": 410, "n_identifiers": 16, "random_cut": "def get_grouped_opcodes(self, n=3):\n \n\n codes = self.get_opcodes()\n if not codes:\n codes = [(\"equal\", 0, 1, 0, 1)]\n # Fixup leading and trailing groups if they show no changes.\n if codes[0][0] == 'equal':\n tag, i1, i2, j1, j2 = codes[0]\n codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2\n if codes[-1][0] == 'equal':\n tag, i1, i2, j1, j2 = codes[-1]\n codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)\n\n nn = n + n\n group = []\n for tag, i1, i2, j1, j2 in codes:\n # End the current group and start a new one whenever\n # there is a large range with no changes.\n if tag == 'equal' and i2-i1 > nn:\n group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))\n yield group\n group = []\n i1, j1 = max(i1, i2-n), max(j1, j2-n)\n " }, { "id": 70522, "commit_id": "d6d43338efdeb85395918d106a1cb3f187ab6fa7", "repo": "wagtail", "path": "wagtail/admin/forms/pages.py", "file_name": "pages.py", "fun_name": "is_valid", "commit_message": "Drop support for Django <3.2", "code": "def is_valid(self):\n comments = self.formsets.get('comments')\n # Remove the comments formset if the management form is invalid\n if comments and not comments.management_form.is_valid():\n del self.formsets['comments']\n return super().is_valid()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 25, "vocab_size": 21, "complexity": 3, "nloc": 5, "token_counts": 42, "n_ast_nodes": 74, "n_identifiers": 7, "random_cut": "def is_valid(self):\n comments = self.formsets.get('comments')\n # Remove the comments formset if the management form is invalid\n if comments and not comments.management_form.is_valid" }, { "id": 12109, "commit_id": "fab9f0736c8d99558d93020cb3f27108627218f1", "repo": "jina", "path": "tests/unit/hubble/test_hubio.py", "file_name": "test_hubio.py", "fun_name": "test_push", "commit_message": "feat(hub): add --no-cache option to \"jina hub push\" cli (#4594)", "code": "def test_push(mocker, monkeypatch, path, mode, tmpdir, force, tag, no_cache):\n mock = mocker.Mock()\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 14, "n_words": 12, "vocab_size": 12, "complexity": 8, "nloc": 43, "token_counts": 313, "n_ast_nodes": 35, "n_identifiers": 11, "random_cut": "def test_push(mocker, monkeypatch, path, mode, tmpdir, force, tag, no_cache):\n mock = mocker.Mock()\n" }, { "id": 177832, "commit_id": "5e054ce3ab751f3445d2fc1b2923bb604048c40b", "repo": "label-studio", "path": "label_studio/data_manager/managers.py", "file_name": "managers.py", "fun_name": "apply_filters", "commit_message": "fix: DEV-2214: Use contain instead of icontain in annotation result (#2308)\n\n* fix: Annotation result performance with contain\r\n\r\n* Back\r\n\r\n* Works!\r\n\r\n* Some\r\n\r\n* Try to fix completed at performance\r\n\r\n* Fix tests\r\n\r\n* Add merge migration", "code": "def apply_filters(queryset, filters, project):\n if not filters:\n return queryset\n\n # convert conjunction to orm statement\n filter_expressions = []\n\n for _filter in filters.items:\n\n # we can also have annotations filters\n if not _filter.filter.startswith(\"filter:tasks:\") or _filter.value is None:\n continue\n\n # django orm loop expression attached to column name\n preprocess_field_name = load_func(settings.PREPROCESS_FIELD_NAME)\n field_name, _ = preprocess_field_name(_filter.filter, project.only_undefined_field)\n\n # filter preprocessing, value type conversion, etc..\n preprocess_filter = load_func(settings.DATA_MANAGER_PREPROCESS_FILTER)\n _filter = preprocess_filter(_filter, field_name)\n\n # custom expressions for enterprise\n custom_filter_expressions = load_func(settings.DATA_MANAGER_CUSTOM_FILTER_EXPRESSIONS)\n filter_expression = custom_filter_expressions(_filter, field_name)\n if filter_expression:\n filter_expressions.append(filter_expression)\n continue\n\n # annotators\n if field_name == 'annotators' and _filter.operator == Operator.CONTAINS:\n filter_expressions.append(Q(annotations__completed_by=int(_filter.value)))\n continue\n elif field_name == 'annotators' and _filter.operator == Operator.NOT_CONTAINS:\n filter_expressions.append(~Q(annotations__completed_by=int(_filter.value)))\n continue\n elif field_name == 'annotators' and _filter.operator == Operator.EMPTY:\n value = cast_bool_from_str(_filter.value)\n filter_expressions.append(Q(annotations__completed_by__isnull=value))\n continue\n\n # annotations results & predictions results\n if field_name in ['annotations_results', 'predictions_results']:\n result = add_result_filter(field_name, _filter, filter_expressions, project)\n if result == 'exit':\n return queryset.none()\n elif result == 'continue':\n continue\n\n # annotation ids\n if field_name == 'annotations_ids':\n field_name = 'annotations__id'\n if 'contains' in _filter.operator:\n # convert string like \"1 2,3\" => [1,2,3]\n _filter.value = [int(value)\n for value in re.split(',|;| ', _filter.value)\n if value and value.isdigit()]\n _filter.operator = 'in_list' if _filter.operator == 'contains' else 'not_in_list'\n elif 'equal' in _filter.operator:\n if not _filter.value.isdigit():\n _filter.value = 0\n\n # annotators\n if field_name == 'annotators' and _filter.operator == Operator.CONTAINS:\n filter_expressions.append(Q(annotations__completed_by=int(_filter.value)))\n continue\n elif field_name == 'annotators' and _filter.operator == Operator.NOT_CONTAINS:\n filter_expressions.append(~Q(annotations__completed_by=int(_filter.value)))\n continue\n elif field_name == 'annotators' and _filter.operator == Operator.EMPTY:\n value = cast_bool_from_str(_filter.value)\n filter_expressions.append(Q(annotations__completed_by__isnull=value))\n continue\n\n # predictions model versions\n if field_name == 'predictions_model_versions' and _filter.operator == Operator.CONTAINS:\n q = Q()\n for value in _filter.value:\n q |= Q(predictions__model_version__contains=value)\n filter_expressions.append(q)\n continue\n elif field_name == 'predictions_model_versions' and _filter.operator == Operator.NOT_CONTAINS:\n q = Q()\n for value in _filter.value:\n q &= ~Q(predictions__model_version__contains=value)\n filter_expressions.append(q)\n continue\n elif field_name == 'predictions_model_versions' and _filter.operator == Operator.EMPTY:\n value = cast_bool_from_str(_filter.value)\n filter_expressions.append(Q(predictions__model_version__isnull=value))\n continue\n\n # use other name because of model names conflict\n if field_name == 'file_upload':\n field_name = 'file_upload_field'\n\n # annotate with cast to number if need\n if _filter.type == 'Number' and field_name.startswith('data__'):\n json_field = field_name.replace('data__', '')\n queryset = queryset.annotate(**{\n f'filter_{json_field.replace(\"$undefined$\", \"undefined\")}':\n Cast(KeyTextTransform(json_field, 'data'), output_field=FloatField())\n })\n clean_field_name = f'filter_{json_field.replace(\"$undefined$\", \"undefined\")}'\n else:\n clean_field_name = field_name\n\n # special case: predictions, annotations, cancelled --- for them 0 is equal to is_empty=True\n if clean_field_name in ('total_predictions', 'total_annotations', 'cancelled_annotations') and \\\n _filter.operator == 'empty':\n _filter.operator = 'equal' if cast_bool_from_str(_filter.value) else 'not_equal'\n _filter.value = 0\n\n # get type of annotated field\n value_type = 'str'\n if queryset.exists():\n value_type = type(queryset.values_list(field_name, flat=True)[0]).__name__\n\n if (value_type == 'list' or value_type == 'tuple') and 'equal' in _filter.operator:\n raise Exception('Not supported filter type')\n\n # special case: for strings empty is \"\" or null=True\n if _filter.type in ('String', 'Unknown') and _filter.operator == 'empty':\n value = cast_bool_from_str(_filter.value)\n if value: # empty = true\n q = Q(\n Q(**{field_name: None}) | Q(**{field_name+'__isnull': True})\n )\n if value_type == 'str':\n q |= Q(**{field_name: ''})\n if value_type == 'list':\n q = Q(**{field_name: [None]})\n\n else: # empty = false\n q = Q(\n ~Q(**{field_name: None}) & ~Q(**{field_name+'__isnull': True})\n )\n if value_type == 'str':\n q &= ~Q(**{field_name: ''})\n if value_type == 'list':\n q = ~Q(**{field_name: [None]})\n\n filter_expressions.append(q)\n continue\n\n # regex pattern check\n elif _filter.operator == 'regex':\n try:\n re.compile(pattern=str(_filter.value))\n except Exception as e:\n logger.info('Incorrect regex for filter: %s: %s', _filter.value, str(e))\n return queryset.none()\n\n # append operator\n field_name = f\"{clean_field_name}{operators.get(_filter.operator, '')}\"\n\n # in\n if _filter.operator == \"in\":\n cast_value(_filter)\n filter_expressions.append(\n Q(\n **{\n f\"{field_name}__gte\": _filter.value.min,\n f\"{field_name}__lte\": _filter.value.max,\n }\n ),\n\n )\n\n # not in\n elif _filter.operator == \"not_in\":\n cast_value(_filter)\n filter_expressions.append(\n ~Q(\n **{\n f\"{field_name}__gte\": _filter.value.min,\n f\"{field_name}__lte\": _filter.value.max,\n }\n ),\n\n )\n\n # in list\n elif _filter.operator == \"in_list\":\n filter_expressions.append(\n Q(**{f\"{field_name}__in\": _filter.value}),\n\n )\n\n # not in list\n elif _filter.operator == \"not_in_list\":\n filter_expressions.append(\n ~Q(**{f\"{field_name}__in\": _filter.value}),\n\n )\n\n # empty\n elif _filter.operator == 'empty':\n if cast_bool_from_str(_filter.value):\n filter_expressions.append(Q(**{field_name: True}))\n else:\n filter_expressions.append(~Q(**{field_name: True}))\n\n # starting from not_\n elif _filter.operator.startswith(\"not_\"):\n cast_value(_filter)\n filter_expressions.append(~Q(**{field_name: _filter.value}))\n\n # all others\n else:\n cast_value(_filter)\n filter_expressions.append(Q(**{field_name: _filter.value}))\n\n logger.debug(f'Apply filter: {filter_expressions}')\n if filters.conjunction == ConjunctionEnum.OR:\n result_filter = Q()\n for filter_expression in filter_expressions:\n result_filter.add(filter_expression, Q.OR)\n queryset = queryset.filter(result_filter)\n else:\n for filter_expression in filter_expressions:\n queryset = queryset.filter(filter_expression)\n return queryset\n\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 2694, "n_words": 643, "vocab_size": 267, "complexity": 66, "nloc": 164, "token_counts": 1194, "n_ast_nodes": 2132, "n_identifiers": 74, "random_cut": "def apply_filters(queryset, filters, project):\n if not filters:\n return queryset\n\n # convert conjunction to orm statement\n filter_expressions = []\n\n for _filter in filters.items:\n\n # we can also have annotations filters\n if not _filter.filter.startswith(\"filter:tasks:\") or _filter.value is None:\n continue\n\n # django orm loop expression attached to column name\n preprocess_field_name = load_func(settings.PREPROCESS_FIELD_NAME)\n field_name, _ = preprocess_field_name(_filter.filter, project.only_undefined_field)\n\n # filter preprocessing, value type conversion, etc..\n preprocess_filter = load_func(settings.DATA_MANAGER_PREPROCESS_FILTER)\n _filter = preprocess_filter(_filter, field_name)\n\n # custom expressions for enterprise\n custom_filter_expressions = load_func(settings.DATA_MANAGER_CUSTOM_FILTER_EXPRESSIONS)\n filter_expression = custom_filter_expressions(_filter, field_name)\n if filter_expression:\n filter_expressions.append(filter_expression)\n continue\n\n # annotators\n if field_name == 'annotators' and _filter.operator == Operator.CONTAINS:\n filter_expressions.append(Q(annotations__completed_by=int(_filter.value)))\n continue\n elif field_name == 'annotators' and _filter.operator == Operator.NOT_CONTAINS:\n filter_expressions.append(~Q(annotations__completed_by=int(_filter.value)))\n continue\n elif field_name == 'annotators' and _filter.operator == Operator.EMPTY:\n value = cast_bool_from_str(_filter.value)\n filter_expressions.append(Q(annotations__completed_by__isnull=value))\n continue\n\n # annotations results & predictions results\n if field_name in ['annotations_results', 'predictions_results']:\n result = add_result_filter(field_name, _filter, filter_expressions, project)\n if result == 'exit':\n return queryset.none()\n elif result == 'continue':\n continue\n\n # annotation ids\n if field_name == 'annotations_ids':\n field_name = 'annotations__id'\n if 'contains' in _filter.operator:\n # convert string like \"1 2,3\" => [1,2,3]\n _filter.value = [int(value)\n for value in re.split(',|;| ', _filter.value)\n if value and value.isdigit()]\n _filter.operator = 'in_list' if _filter.operator == 'contains' else 'not_in_list'\n elif 'equal' in _filter.operator:\n if not _filter.value.isdigit():\n _filter.value = 0\n\n # annotators\n if field_name == 'annotators' and _filter.operator == Operator.CONTAINS:\n filter_expressions.append(Q(annotations__completed_by=int(_filter.value)))\n continue\n elif field_name == 'annotators' and _filter.operator == Operator.NOT_CONTAINS:\n filter_expressions.append(~Q(annotations__completed_by=int(_filter.value)))\n continue\n elif field_name == 'annotators' and _filter.operator == Operator.EMPTY:\n value = cast_bool_from_str(_filter.value)\n filter_expressions.append(Q(annotations__completed_by__isnull=value))\n continue\n\n # predictions model versions\n if field_name == 'predictions_model_versions' and _filter.operator == Operator.CONTAINS:\n q = Q()\n for value in _filter.value:\n q |= Q(predictions__model_version__contains=value)\n filter_expressions.append(q)\n continue\n elif field_name == 'predictions_model_versions' and _filter.operator == Operator.NOT_CONTAINS:\n q = Q()\n for value in _filter.value:\n q &= ~Q(predictions__model_version__contains=value)\n filter_expressions.append(q)\n continue\n elif field_name == 'predictions_model_versions' and _filter.operator == Operator.EMPTY:\n value = cast_bool_from_str(_filter.value)\n filter_expressions.append(Q(predictions__model_version__isnull=value))\n continue\n\n # use other name because of model names conflict\n if field_name == 'file_upload':\n field_name = 'file_upload_field'\n\n # annotate with cast to number if need\n if _filter.type == 'Number' and field_name.startswith('data__'):\n json_field = field_name.replace('data__', '')\n queryset = queryset.annotate(**{\n f'filter_{json_field.replace(\"$undefined$\", \"undefined\")}':\n Cast(KeyTextTransform(json_field, 'data'), output_field=FloatField())\n })\n clean_field_name = f'filter_{json_field.replace(\"$undefined$\", \"undefined\")}'\n else:\n clean_field_name = field_name\n\n # special case: predictions, annotations, cancelled --- for them 0 is equal to is_empty=True\n if clean_field_name in ('total_predictions', 'total_annotations', 'cancelled_annotations') and \\\n _filter.operator == 'empty':\n _filter.operator = 'equal' if cast_bool_from_str(_filter.value) else 'not_equal'\n _filter.value = 0\n\n # get type of annotated field\n value_type = 'str'\n if queryset.exists():\n value_type = type(queryset.values_list(field_name, flat=True)[0]).__name__\n\n if (value_type == 'list' or value_type == 'tuple') and 'equal' in _filter.operator:\n raise Exception('Not supported filter type')\n\n # special case: for strings empty is \"\" or null=True\n if _filter.type in ('String', 'Unknown') and _filter.operator == 'empty':\n value = cast_bool_from_str(_filter.value)\n if value: # empty = true\n q = Q(\n Q(**{field_name: None}) | Q(**{field_name+'__isnull': True})\n )\n if value_type == 'str':\n q |= Q(**{field_name: ''})\n if value_type == 'list':\n q = Q(**{field_name: [None]})\n\n else: # empty = false\n q = Q(\n ~Q(**{field_name: None}) & ~Q(**{field_name+'__isnull': True})\n )\n if value_type == 'str':\n q &= ~Q(**{field_name: ''})\n if value_type == 'list':\n q = ~Q(**{field_name: [None]})\n\n filter_expressions.append(q)\n continue\n\n # regex pattern check\n elif _filter.operator == 'regex':\n try:\n re.compile(pattern=str(_filter.value))\n except Exception as e:\n logger.info('Incorrect regex for filter: %s: %s', _filter.value, str(e))\n return queryset.none()\n\n # append operator\n field_name = f\"{clean_field_name}{operators.get(_filter.operator, '')}\"\n\n # in\n if _filter.operator == \"in" }, { "id": 303375, "commit_id": "9af64b1c3b1a712d24fc7b86ed2cc5e1fa613f26", "repo": "core", "path": "homeassistant/components/zha/light.py", "file_name": "light.py", "fun_name": "async_update", "commit_message": "Improve type hints in zha light (#75947)", "code": "async def async_update(self) -> None:\n \n if self._transitioning:\n self.debug(\"skipping async_update while transitioning\")\n return\n await self.async_get_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 57, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 26, "n_ast_nodes": 50, "n_identifiers": 5, "random_cut": "async def async_update(self) -> None:\n \n if self._transitioning:\n self.debug(\"skipping asy" }, { "id": 285345, "commit_id": "9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/stocks/discovery/test_finnhub_view.py", "file_name": "test_finnhub_view.py", "fun_name": "test_past_ipo_empty_df", "commit_message": "Here we merge all API Refactor related branches (#2236)\n\n* Update api.py\r\n\r\n* Updated forex menu\r\n\r\n* refactor ycrv command\r\n\r\n* refactor ycrv command black\r\n\r\n* refactor ecocal command\r\n\r\n* Minh changes\r\n\r\n* Adding space to test pushing\r\n\r\n* title fix ecocal df\r\n\r\n* get economic calendar annotation\r\n\r\n* fix investingcom tests\r\n\r\n* refactor index command\r\n\r\n* refactor overview command\r\n\r\n* give defaults to wsj view function args\r\n\r\n* rename date args investincom\r\n\r\n* refacto bigmac command\r\n\r\n* fix ecocal typo\r\n\r\n* refactor rtps command\r\n\r\n* alphavantage gdp\r\n\r\n* alphavantage gdp per capita\r\n\r\n* alphavantage cpi\r\n\r\n* alphavantage tyld\r\n\r\n* alphavantage inf\r\n\r\n* refactor macro command\r\n\r\n* refactor macro command w helpers\r\n\r\n* refactor treasury command\r\n\r\n* fix macro on terminal\r\n\r\n* treasury labels\r\n\r\n* refactor maturities\r\n\r\n* update treasury maturities doc strings\r\n\r\n* refactor get economic calendar finhub\r\n\r\n* refactor map command api\r\n\r\n* display map filter choices\r\n\r\n* route economy api to performance map\r\n\r\n* route economy api to performance map\r\n\r\n* display group choices on valuation command\r\n\r\n* refactor performance and valuation commands\r\n\r\n* refactor spectrum model and view\r\n\r\n* add choices to spectrum controller\r\n\r\n* delete image after view\r\n\r\n* fix model tests finviz\r\n\r\n* fix finciz view tests\r\n\r\n* refactor futures\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix controller test\r\n\r\n* refactor fred series notes\r\n\r\n* update fred notes docstring\r\n\r\n* refacto fred series ids\r\n\r\n* fix pred and qa when empty datasets\r\n\r\n* refactor fred\r\n\r\n* uncomment stuff\r\n\r\n* refacto get series data\r\n\r\n* fix some tests\r\n\r\n* set defaults on args\r\n\r\n* refactor fred yield curve\r\n\r\n* black\r\n\r\n* fix spell and remove ecocal names\r\n\r\n* fix linting\r\n\r\n* linting\r\n\r\n* pylint fix\r\n\r\n* change dangerous defaults\r\n\r\n* Working through crypto fixes (#2256)\r\n\r\n* Working through crypto fixes\r\n\r\n* Continued adding crypto stuff\r\n\r\n* Added crypto overview\r\n\r\n* Added test fixes\r\n\r\n* Added fixtures\r\n\r\n* Fixed tests\r\n\r\n* Fixed charting issue\r\n\r\n* Removed broken APIs\r\n\r\n* Final adjustments\r\n\r\n* Added test fixes\r\n\r\n* map get groups and get ycrv countries into old api\r\n\r\n* exposed econdb helper funcs\r\n\r\n* remove helpers\r\n\r\n* refactor search indices\r\n\r\n* linting\r\n\r\n* refactor arg currency\r\n\r\n* pylint from currency\r\n\r\n* Started switching crpyto ascending to ascend\r\n\r\n* Merging\r\n\r\n* Portfolio model arguements, params, and docstring\r\n\r\n* Refactored for etf commands (#2292)\r\n\r\n* Refactored for etf commands\r\n\r\n* Fixed tests\r\n\r\n* Added load command\r\n\r\n* Fixed menu\r\n\r\n* Portfolio logic fixes\r\n\r\n* Added econometrics (#2260)\r\n\r\n* Added econometrics\r\n\r\n* Fixed tests\r\n\r\n* Simplified API\r\n\r\n* Added test fixes\r\n\r\n* Added test csv\r\n\r\n* Allowed examples to be loaded\r\n\r\n* Fund refactor (#2291)\r\n\r\n* Fund refactor\r\n\r\n* Changed fund_name and fund to name\r\n\r\n* Changed ascending to ascend\r\n\r\n* Stock menu refactoring for easier API usage (#2194)\r\n\r\n* Stocks refactoring for easier API usage\r\n\r\n* Linting\r\n\r\n* Refactor newly added features\r\n\r\n* Linting\r\n\r\n* Fixing tests\r\n\r\n* Refactor common files used by stocks menu\r\n\r\n* Fixing flake8\r\n\r\n* Fix linting and tests\r\n\r\n* Linting\r\n\r\n* Fix flake8\r\n\r\n* refactor insider_data\r\n\r\n* refactor mentions\r\n\r\n* refactor watchlist\r\n\r\n* refactor sentiment\r\n\r\n* refactor sentiment\r\n\r\n* fix yahoofinance tests\r\n\r\n* refactor load and candle\r\n\r\n* refactor get_news and display_news\r\n\r\n* refactor stocks.ins.act\r\n\r\n* candle default matplotlib\r\n\r\n* fix yahoofinance_view tests\r\n\r\n* fix ark model tests\r\n\r\n* fix ark view tests\r\n\r\n* fix business insider model\r\n\r\n* fix business insider view\r\n\r\n* refactor csimarket model\r\n\r\n* fix tests csi market model\r\n\r\n* update dd controller\r\n\r\n* fix get suppliers tests\r\n\r\n* fix dd controller tests\r\n\r\n* fix finhub tests\r\n\r\n* fix finviz tests\r\n\r\n* fix fmp tests\r\n\r\n* fix marketwatch tests\r\n\r\n* corrected argument keywords in test_bt_model\r\n\r\n* corrected argument keywords in test_bt_view\r\n\r\n* refactor fa controller\r\n\r\n* refactor marketwatch view\r\n\r\n* refactor gov controller\r\n\r\n* fix tests fa av\r\n\r\n* fix tests elect\r\n\r\n* fix dcf tests\r\n\r\n* fix polygon tests\r\n\r\n* fix fmp tests\r\n\r\n* fix quiverquant tests\r\n\r\n* fix yahoofinance fa tests\r\n\r\n* fix more fa tests\r\n\r\n* fix insider tests\r\n\r\n* fix more tests\r\n\r\n* fix more tests\r\n\r\n* fix options tests\r\n\r\n* fix stock gov tests\r\n\r\n* fix tests test_ba_controller\r\n\r\n* fix tests for test_finviz_compare_model.py\r\n\r\n* fixed 2 tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fix final tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* Fix tests\r\n\r\n* black\r\n\r\n* forgot to black tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* flakefix\r\n\r\n* Tests + code : Stocks / Discovery\r\n\r\n* fix tests\r\n\r\n* added recorder\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* black\r\n\r\n* black\r\n\r\n* remove unused imports\r\n\r\n* refactor display raw\r\n\r\n* sia dicts fix\r\n\r\n* pylint\r\n\r\n* linting\r\n\r\n* remove dangerous default\r\n\r\n* fix tests\r\n\r\n* fix beta model test\r\n\r\n* black\r\n\r\n* skip screener qa test\r\n\r\n* change sector path to sectors\r\n\r\n* update tests readme\r\n\r\n* fix metric defaults\r\n\r\n* black\r\n\r\n* substitute lost ticker\r\n\r\n* defaults cpic\r\n\r\n* another round on sia\r\n\r\n* refactor cramer\r\n\r\n* reduce default tweets on sentiment\r\n\r\n* refactor yf hist, corr, volume\r\n\r\n* arkorders default\r\n\r\n* refactor income, balance, cashflow\r\n\r\n* refacto scorr, screener, getfinnhub\r\n\r\n* refactor stockgrid\r\n\r\n* ibkr refactor\r\n\r\n* another round on stockgrid\r\n\r\n* add dividens end point\r\n\r\n* refactor discovery endpoints\r\n\r\n* update docstrings with similar input\r\n\r\n* refactor messages\r\n\r\n* refactor ba\r\n\r\n* refactor regioons\r\n\r\n* refactor twitter sentiment\r\n\r\n* refactor hist\r\n\r\n* refactor regions\r\n\r\n* give default to timeframe\r\n\r\n* refactor bunch of defaults and arg names\r\n\r\n* remove leftover imports\r\n\r\n* refactor vwap\r\n\r\n* let tests run\r\n\r\n* fix tests\r\n\r\n* fix stock tests\r\n\r\n* fix stockanalysis tests\r\n\r\n* flake\r\n\r\n* MYPY\r\n\r\n* Made important changes\r\n\r\n* added fixes\r\n\r\n* Fixed big issue\r\n\r\n* Added fixes to tests\r\n\r\n* fix qa tests\r\n\r\n* fix tests\r\n\r\n* fix 1 more test\r\n\r\n* last stocks failing\r\n\r\n* fix crypto test\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: montezdesousa \r\nCo-authored-by: hjoaquim \r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\n\r\n* fix portfolio tests\r\n\r\n* change period to window\r\n\r\n* update ca docstrings\r\n\r\n* refactor get_similar_companies func\r\n\r\n* Fixed\r\n\r\n* Update CI\r\n\r\n* Update CI 2\r\n\r\n* Update CI 3\r\n\r\n* Update dependencies\r\n\r\nCo-authored-by: colin99d \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: montezdesousa \r\nCo-authored-by: James Simmons \r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com>\r\nCo-authored-by: hjoaquim ", "code": "def test_past_ipo_empty_df(mocker):\n mocker.patch(\n \"openbb_terminal.stocks.discovery.finnhub_view.finnhub_model.get_ipo_calendar\",\n return_value=pd.DataFrame(),\n )\n\n finnhub_view.past_ipo(\n num_days_behind=2, start_date=\"2021-12-01\", limit=20, export=\"\"\n )\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.record_stdout", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.record_stdout", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 44, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 8, "token_counts": 40, "n_ast_nodes": 95, "n_identifiers": 17, "random_cut": "def test_past_ipo_empty_df(mocker):\n mocker.patch(\n " }, { "id": 259194, "commit_id": "a794c58692a1f3e7a85a42d8c7f7ddd5fcf18baa", "repo": "scikit-learn", "path": "sklearn/semi_supervised/tests/test_self_training.py", "file_name": "test_self_training.py", "fun_name": "test_base_estimator_meta_estimator", "commit_message": "MNT Replace if_delegate_has_method with available_if in ensemble and semi_supervised (#20545)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def test_base_estimator_meta_estimator():\n # Check that a meta-estimator relying on an estimator implementing\n # `predict_proba` will work even if it does expose this method before being\n # fitted.\n # Non-regression test for:\n # https://github.com/scikit-learn/scikit-learn/issues/19119\n\n base_estimator = StackingClassifier(\n estimators=[\n (\"svc_1\", SVC(probability=True)),\n (\"svc_2\", SVC(probability=True)),\n ],\n final_estimator=SVC(probability=True),\n cv=2,\n )\n\n assert hasattr(base_estimator, \"predict_proba\")\n clf = SelfTrainingClassifier(base_estimator=base_estimator)\n clf.fit(X_train, y_train_missing_labels)\n clf.predict_proba(X_test)\n\n base_estimator = StackingClassifier(\n estimators=[\n (\"svc_1\", SVC(probability=False)),\n (\"svc_2\", SVC(probability=False)),\n ],\n final_estimator=SVC(probability=False),\n cv=2,\n )\n\n assert not hasattr(base_estimator, \"predict_proba\")\n clf = SelfTrainingClassifier(base_estimator=base_estimator)\n with pytest.raises(AttributeError):\n clf.fit(X_train, y_train_missing_labels)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 231, "n_words": 77, "vocab_size": 53, "complexity": 1, "nloc": 25, "token_counts": 155, "n_ast_nodes": 250, "n_identifiers": 19, "random_cut": "def test_base_estimator_meta_estimator():\n # Check that a meta-estimator relying on an estimator implementing\n # `predict_proba` will work even if it does expose this method before being\n # fitted.\n # Non-regression test for:\n # https://github.com/scikit-learn/scikit-learn/issues/19119\n\n base_estimator = StackingClassifier(\n estimators=[\n (\"svc_1\", SVC(probability=True)),\n (\"svc_2\", SVC(probability=True)),\n ],\n final_estimator=SVC(probability=True),\n cv=2,\n )\n\n assert hasat" }, { "id": 128982, "commit_id": "abd3bef63b486fe3e43c1608d93205a702880414", "repo": "ray", "path": "rllib/tests/test_io.py", "file_name": "test_io.py", "fun_name": "test_agent_input_eval_sim", "commit_message": "[RLlib] QMIX better defaults + added to CI learning tests (#21332)", "code": "def test_agent_input_eval_sim(self):\n for fw in framework_iterator():\n self.write_outputs(self.test_dir, fw)\n agent = PGTrainer(\n env=\"CartPole-v0\",\n config={\n \"input\": self.test_dir + fw,\n \"input_evaluation\": [\"simulation\"],\n \"framework\": fw,\n })\n for _ in range(50):\n result = agent.train()\n if not np.isnan(result[\"episode_reward_mean\"]):\n return # simulation ok\n time.sleep(0.1)\n assert False, \"did not see any simulation results\"\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 262, "n_words": 45, "vocab_size": 39, "complexity": 4, "nloc": 16, "token_counts": 93, "n_ast_nodes": 154, "n_identifiers": 18, "random_cut": "def test_agent_input_eval_sim(self):\n for fw in framework_iterator():\n self.write_outputs(self.test_dir, fw)\n agent = PGTrainer(\n env=\"CartPole-v0\",\n config={\n \"input\": self.test_dir + fw" }, { "id": 211220, "commit_id": "b4727677751081b257c6fa23c3c124ab9e5a32a1", "repo": "PaddleDetection", "path": "ppdet/modeling/heads/s2anet_head.py", "file_name": "s2anet_head.py", "fun_name": "get_bboxes_single", "commit_message": "refactor s2anet (#6604)\n\n* refactor s2anet to support batch_size > 1\r\n\r\n* fix problem of inference\r\n\r\n* support batch_size > 1 for training\r\n\r\n* fix empty results\r\n\r\n* fix dota eval\r\n\r\n* fix configs of s2anet_head\r\n\r\n* modify s2anet_spine_1x to 73 mAP", "code": "def get_bboxes_single(self, cls_score_list, bbox_pred_list):\n mlvl_bboxes = []\n mlvl_scores = []\n\n for cls_score, bbox_pred in zip(cls_score_list, bbox_pred_list):\n if self.use_sigmoid_cls:\n scores = F.sigmoid(cls_score)\n else:\n scores = F.softmax(cls_score, axis=-1)\n\n if scores.shape[0] > self.nms_pre:\n # Get maximum scores for foreground classes.\n if self.use_sigmoid_cls:\n max_scores = paddle.max(scores, axis=1)\n else:\n max_scores = paddle.max(scores[:, :-1], axis=1)\n\n topk_val, topk_inds = paddle.topk(max_scores, self.nms_pre)\n bbox_pred = paddle.gather(bbox_pred, topk_inds)\n scores = paddle.gather(scores, topk_inds)\n\n mlvl_bboxes.append(bbox_pred)\n mlvl_scores.append(scores)\n\n mlvl_bboxes = paddle.concat(mlvl_bboxes)\n mlvl_scores = paddle.concat(mlvl_scores)\n\n mlvl_polys = rbox2poly(mlvl_bboxes).unsqueeze(0)\n mlvl_scores = paddle.transpose(mlvl_scores, [1, 0]).unsqueeze(0)\n\n bbox, bbox_num, _ = self.nms(mlvl_polys, mlvl_scores)\n if bbox.shape[0] <= 0:\n bbox = self.fake_bbox\n bbox_num = self.fake_bbox_num\n\n return bbox, bbox_num\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 401, "n_words": 97, "vocab_size": 62, "complexity": 6, "nloc": 27, "token_counts": 237, "n_ast_nodes": 367, "n_identifiers": 36, "random_cut": "def get_bboxes_single(self, cls_score_list, bbox_pred_list):\n mlvl_bboxes = []\n mlvl_scores = []\n\n for cls_score, bbox_pred in zip(cls_score_list, bbox_pred_list):\n if self.use_sigmoid_cls:\n scores = F.sigmoid(cls_score)\n else:\n scores = F.softmax(cls_score, axis=-1)\n\n if scores.shape[0] > self.nms_pre:\n # Get maximum scores for foreground classes.\n if self.use_sigmoid_cls:\n max_scores = paddle.max(scores, axis=1)\n else:\n max_scores = paddle.max(scores[:, :-1], axis=1)\n\n topk_val, topk_inds = paddle.topk(max_scores, self.nms_pre)\n bbox_pred = paddle.gather(bbox_pred, topk_inds)\n scores = paddle.gather(scores, topk_inds)\n\n mlvl_bboxes.append(bbox_pred)\n mlvl_scores.append(scores)\n\n mlvl_bboxes = paddle.concat(mlvl_bboxes)\n mlvl_scores = paddle.concat(ml" }, { "id": 296882, "commit_id": "22db21b9d4a7c5c2a79d43d8edf5295aeb99c13d", "repo": "core", "path": "tests/components/onvif/__init__.py", "file_name": "__init__.py", "fun_name": "setup_mock_device", "commit_message": "Add diagnostics to ONVIF (#69708)", "code": "def setup_mock_device(mock_device):\n \n mock_device.async_setup = AsyncMock(return_value=True)\n mock_device.available = True\n mock_device.name = NAME\n mock_device.info = DeviceInfo(\n MANUFACTURER,\n MODEL,\n FIRMWARE_VERSION,\n SERIAL_NUMBER,\n MAC,\n )\n mock_device.capabilities = Capabilities()\n mock_device.profiles = []\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 85, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 15, "token_counts": 63, "n_ast_nodes": 89, "n_identifiers": 18, "random_cut": "def setup_mock_device(mock_device):\n \n mock_device.async_setup = AsyncMock(return_value=True)\n mock_device.a" }, { "id": 93205, "commit_id": "db35e231ceababe8c9f5ca7b5d2ca685f07c7d5b", "repo": "sentry", "path": "tests/sentry/integrations/msteams/test_message_builder.py", "file_name": "test_message_builder.py", "fun_name": "test_resolved_issue_message", "commit_message": "test(msteams): Add tests for building group card (#36834)\n\nAdd tests for build_group_card which builds issues cards. Does NOT test all visual aspects of the card. Only ensures that certain important elements are present and the basic structure of the card is correct.", "code": "def test_resolved_issue_message(self):\n self.group1.status = GroupStatus.RESOLVED\n self.group1.save()\n\n issue_card = build_group_card(\n group=self.group1, event=self.event1, rules=self.rules, integration=self.integration\n )\n\n action_set = issue_card[\"body\"][2][\"items\"][0]\n\n resolve_action = action_set[\"actions\"][0]\n assert ActionType.SUBMIT == resolve_action[\"type\"]\n assert \"Unresolve\" == resolve_action[\"title\"]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 94, "n_words": 28, "vocab_size": 23, "complexity": 1, "nloc": 10, "token_counts": 89, "n_ast_nodes": 144, "n_identifiers": 18, "random_cut": "def test_resolved_issue_message(self):\n self.group1.status = " }, { "id": 31875, "commit_id": "3cff4cc58730409c68f8afa2f3b9c61efa0e85c6", "repo": "transformers", "path": "tests/models/mvp/test_modeling_mvp.py", "file_name": "test_modeling_mvp.py", "fun_name": "test_shift_tokens_right", "commit_message": "Add MVP model (#17787)\n\n* Add MVP model\r\n\r\n* Update README\r\n\r\n* Remove useless module\r\n\r\n* Update docs\r\n\r\n* Fix bugs in tokenizer\r\n\r\n* Remove useless test\r\n\r\n* Remove useless module\r\n\r\n* Update vocab\r\n\r\n* Remove specifying\r\n\r\n* Remove specifying\r\n\r\n* Add #Copied ... statement\r\n\r\n* Update paper link\r\n\r\n* Remove useless TFMvp\r\n\r\n* Add #Copied ... statement\r\n\r\n* Fix style in test mvp model\r\n\r\n* Fix some typos\r\n\r\n* Fix properties of unset special tokens in non verbose mode\r\n\r\n* Update paper link\r\n\r\n* Update MVP doc\r\n\r\n* Update MVP doc\r\n\r\n* Fix README\r\n\r\n* Fix typos in docs\r\n\r\n* Update docs", "code": "def test_shift_tokens_right(self):\n input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long)\n shifted = shift_tokens_right(input_ids, 1, 2)\n n_pad_before = input_ids.eq(1).float().sum()\n n_pad_after = shifted.eq(1).float().sum()\n self.assertEqual(shifted.shape, input_ids.shape)\n self.assertEqual(n_pad_after, n_pad_before - 1)\n self.assertTrue(torch.eq(shifted[:, 0], 2).all())\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 87, "n_words": 39, "vocab_size": 33, "complexity": 1, "nloc": 8, "token_counts": 137, "n_ast_nodes": 200, "n_identifiers": 18, "random_cut": "def test_shift_tokens_right(self):\n input_ids = torch.tensor([[71, 82, 18, 33," }, { "id": 223027, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/test_archive_util.py", "file_name": "test_archive_util.py", "fun_name": "test_make_archive_tar", "commit_message": "add python 3.10.4 for windows", "code": "def test_make_archive_tar(self):\n base_dir = self._create_files()\n base_name = os.path.join(self.mkdtemp() , 'archive')\n res = make_archive(base_name, 'tar', base_dir, 'dist')\n self.assertTrue(os.path.exists(res))\n self.assertEqual(os.path.basename(res), 'archive.tar')\n self.assertEqual(self._tarinfo(res), self._created_files)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 83, "n_ast_nodes": 137, "n_identifiers": 17, "random_cut": "def test_make_archive_tar(self):\n base_dir = self._create_files()\n base_name = os.path.join(self.mkdtemp() , 'archive')\n res = make_archive(base_name, 'tar', base_dir, 'dist')\n self.assertTrue(os.path.exists(res))\n self.assertEqual(os.path.basename(res), 'archive.tar')\n self.assertEqual(self._tarinfo(res), self._created_files)\n" }, { "id": 295087, "commit_id": "0f6296e4b520ec8daf0f12e7b6db3c863c811ae8", "repo": "core", "path": "homeassistant/components/zha/lock.py", "file_name": "lock.py", "fun_name": "async_lock", "commit_message": "Bump zigpy to 0.44.1 and zha-quirks to 0.0.69 (#68921)\n\n* Make unit tests pass\r\n\r\n* Flip response type check to not rely on it being a list\r\nhttps://github.com/zigpy/zigpy/pull/716#issuecomment-1025236190\r\n\r\n* Bump zigpy and quirks versions to ZCLR8 releases\r\n\r\n* Fix renamed zigpy cluster attributes\r\n\r\n* Handle the default response for ZLL `get_group_identifiers`\r\n\r\n* Add more error context to `stage failed` errors\r\n\r\n* Fix unit test returning lists as ZCL request responses\r\n\r\n* Always load quirks when testing ZHA\r\n\r\n* Bump zha-quirks to 0.0.69", "code": "async def async_lock(self, **kwargs):\n \n result = await self._doorlock_channel.lock_door()\n if isinstance(result, Exception) or result[0] is not Status.SUCCESS:\n self.error(\"Error with lock_door: %s\", result)\n return\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 23, "vocab_size": 23, "complexity": 3, "nloc": 6, "token_counts": 51, "n_ast_nodes": 86, "n_identifiers": 12, "random_cut": "async def async_lock(self, **kwargs):\n \n result = await self._doorlock_channel.lock_door()\n if isinstance(result, Exception) or result[0] is not Status.SUCCESS:\n " }, { "id": 38741, "commit_id": "2e7e4280aa6f380a4e3afad6524295a17901c56c", "repo": "transformers", "path": "src/transformers/utils/fx.py", "file_name": "fx.py", "fun_name": "__setitem__", "commit_message": "Traced models serialization and torchscripting fix (#17206)\n\n* Fix torch.jit.script and pickling issues\r\n\r\n* Fix get_attr issues\r\n\r\n* Fix import in function\r\n\r\n* Fix GPT-J and T5 tracing for torch=1.11\r\n\r\n* Gate graph surgery on torch version\r\n\r\n* Modeling minor changes to enable TorchScripting\r\n\r\n* Model serialization / deserialization test\r\n\r\n* Remove _assert_is_none users", "code": "def __setitem__(self, indices, values):\n return self.tracer.create_proxy(\"call_function\", operator.setitem, (self, indices, values), {})\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 33, "n_ast_nodes": 47, "n_identifiers": 8, "random_cut": "def __setitem__(self, indices, values):\n return self.tracer.create_proxy(\"call_function\", operator.setitem, (self, in" }, { "id": 275632, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizers_test.py", "file_name": "optimizers_test.py", "fun_name": "test_rmsprop", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_rmsprop(self):\n with self.cached_session():\n self._test_optimizer(optimizer_v1.RMSprop())\n self._test_optimizer(optimizer_v1.RMSprop(decay=1e-3))\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 34, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 61, "n_identifiers": 7, "random_cut": "def test_rmsprop(self):\n with self.cached_session():\n self._te" }, { "id": 300151, "commit_id": "5e737bfe4fbc5a724f5fdf04ea9319c2224cb114", "repo": "core", "path": "homeassistant/components/ws66i/media_player.py", "file_name": "media_player.py", "fun_name": "async_set_volume_level", "commit_message": "Add ws66i core integration (#56094)\n\n* Add ws66i core integration\r\n\r\n* Remove all ws66i translations\r\n\r\n* Update ws66i unit tests to meet minimum code coverage\r\n\r\n* Update ws66i based on @bdraco review\r\n\r\n* General improvements after 2nd PR review\r\n\r\n* Disable entities if amp shutoff, set default source names, set 30sec polling\r\n\r\n* Add _attr_ and change async_on_unload\r\n\r\n* Improve entity generation\r\n\r\n* Implement coordinator\r\n\r\n* Made options fields required, retry connection on failed attempts, use ZoneStatus for attributes\r\n\r\n* Refactor WS66i entity properties, raise HomeAssistantError on restore service if no snapshot\r\n\r\n* Update to pyws66i v1.1\r\n\r\n* Add quality scale of silver to manifest\r\n\r\n* Update config_flow test", "code": "async def async_set_volume_level(self, volume):\n \n await self.hass.async_add_executor_job(\n self._ws66i.set_volume, self._zone_id, int(volume * MAX_VOL)\n )\n self._status.volume = int(volume * MAX_VOL)\n self._async_update_attrs_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 49, "n_ast_nodes": 80, "n_identifiers": 12, "random_cut": "async def async_set_volume_level(self, volume):\n \n await self.hass.async_add_executor_job(\n self._ws66i.set_volume, self._zone_id, int(" }, { "id": 147686, "commit_id": "434265edd0926f7838cca4dbce00e88149e6bbf0", "repo": "ray", "path": "rllib/examples/two_trainer_workflow.py", "file_name": "two_trainer_workflow.py", "fun_name": "get_default_config", "commit_message": "[RLlib] Examples folder: All `training_iteration` translations. (#23712)", "code": "def get_default_config(cls) -> TrainerConfigDict:\n # Run this Trainer with new `training_iteration` API and set some PPO-specific\n # parameters.\n return with_common_config(\n {\n \"_disable_execution_plan_api\": True,\n \"num_sgd_iter\": 10,\n \"sgd_minibatch_size\": 128,\n }\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 123, "n_words": 29, "vocab_size": 28, "complexity": 1, "nloc": 8, "token_counts": 25, "n_ast_nodes": 45, "n_identifiers": 4, "random_cut": "def get_default_config(cls) -> TrainerConfigDict:\n # Run this Trainer with new `training_iteration` API and set some PPO-specific\n # parameters.\n return with_common_config(\n " }, { "id": 129516, "commit_id": "f8e41215b3fc8f45660e6afac4fe6faad73287f4", "repo": "ray", "path": "python/ray/_private/runtime_env/utils.py", "file_name": "utils.py", "fun_name": "has_conda", "commit_message": "[1/n][cross-language runtime env] runtime env protobuf refactor (#21551)\n\nWe need to support runtime env for java、c++ and cross-language. This PR only do a refactor of protobuf.\r\nRelated issue #21731", "code": "def has_conda(self) -> str:\n return self._proto_runtime_env.python_runtime_env.HasField(\n \"conda_runtime_env\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 24, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 31, "n_identifiers": 6, "random_cut": "def has_conda(self) -> str:\n return self._proto_runtime_env.python_runtime_env.HasField(\n \"conda_runtime_en" }, { "id": 67202, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/eway_bill/eway_bill.py", "file_name": "eway_bill.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data(filters):\n\n\tconditions = get_conditions(filters)\n\n\tdata = frappe.db.sql(\n\t\t\n\t\t% conditions,\n\t\tas_dict=1,\n\t)\n\n\tunit = {\n\t\t\"Bag\": \"BAGS\",\n\t\t\"Bottle\": \"BOTTLES\",\n\t\t\"Kg\": \"KILOGRAMS\",\n\t\t\"Liter\": \"LITERS\",\n\t\t\"Meter\": \"METERS\",\n\t\t\"Nos\": \"NUMBERS\",\n\t\t\"PKT\": \"PACKS\",\n\t\t\"Roll\": \"ROLLS\",\n\t\t\"Set\": \"SETS\",\n\t}\n\n\t# Regular expression set to remove all the special characters\n\tspecial_characters = r\"[$%^*()+\\\\[\\]{};':\\\"\\\\|<>.?]\"\n\n\tfor row in data:\n\t\tset_defaults(row)\n\t\tset_taxes(row, filters)\n\t\tset_address_details(row, special_characters)\n\n\t\t# Eway Bill accepts date as dd/mm/yyyy and not dd-mm-yyyy\n\t\trow.posting_date = \"/\".join(str(row.posting_date).replace(\"-\", \"/\").split(\"/\")[::-1])\n\t\trow.lr_date = \"/\".join(str(row.lr_date).replace(\"-\", \"/\").split(\"/\")[::-1])\n\n\t\tif row.gst_vehicle_type == \"Over Dimensional Cargo (ODC)\":\n\t\t\trow.gst_vehicle_type = \"ODC\"\n\n\t\trow.item_name = re.sub(special_characters, \" \", row.item_name)\n\t\trow.description = row.item_name\n\n\t\trow.uom = unit.get(row.uom, row.uom)\n\n\t\t# For removing special charactes and numbers from customer.\n\t\trow.customer = re.sub(special_characters[:-1] + \"&0-9\" + \"]\", \"\", row.customer)\n\n\treturn data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 83, "n_words": 117, "vocab_size": 98, "complexity": 3, "nloc": 39, "token_counts": 235, "n_ast_nodes": 423, "n_identifiers": 29, "random_cut": "def get_data(filters):\n\n\tconditions = get_conditions(filters)\n\n\tdata = frappe.db.sql(\n\t\t\n\t\t% conditions,\n\t\tas_dict=1,\n\t)\n\n\tunit = {\n\t\t\"Bag\": \"BAGS\",\n\t\t\"Bottle\": \"BOTTLES\",\n\t\t\"Kg\": \"KILOGRAMS\",\n\t\t\"Liter\": \"LITERS\",\n\t\t\"Meter\": \"METERS\",\n\t\t\"Nos\": \"NUMBERS\",\n\t\t\"PKT\": \"PACKS\",\n\t\t\"Roll\": \"ROLLS\",\n\t\t\"Set\": \"SETS\",\n\t}\n\n\t# Regular expression set to remove all the special characters\n\tspecial_characters " }, { "id": 89383, "commit_id": "6e2d3d461e9638981b6619952f59f78e44a93917", "repo": "sentry", "path": "tests/sentry/replays/test_organization_issue_replay_count.py", "file_name": "test_organization_issue_replay_count.py", "fun_name": "setUp", "commit_message": "feat(replays): Add issue replay count endpoint (#41996)\n\n## What this PR does\r\n\r\nWe create an endpoint that provides accurate counts of replay_ids\r\nassociated with issue ids within a timeframe. The endpoint will return a\r\nformat like so:\r\n```\r\n{\r\n issue_id_1: 1,\r\n issue_id_2: 20,\r\n etc. etc.\r\n}\r\n```\r\n\r\n### Constraints \r\n- Between 1 and 25 issue_ids may be passed in (25 being the current\r\npagination size of the issue stream)\r\n- We will only count up to 50 replay_ids, 50 chosen somewhat arbitrarily\r\nbut seems a reasonable size.\r\n- We will only retrieve up to 100 replay_ids per issue_id. this means\r\nthat if over half of replay_ids are sampled, it's possible that we may\r\nundercount replays as we will miss some. This is a small edge case and\r\nis acceptable.\r\n\r\n## Modifications:\r\n\r\n- We modify discover.py to allow for private_use of the `groupArray`\r\nclickhouse function\r\n- In our endpoint we use QueryBuilder to hit the discover dataset, then\r\nquery the replays dataset with the replay_ids returned\r\n- We then count up each replay_id confirmed to exist by its associated\r\nissue_id, and return the resulting dict\r\n\r\n\r\n### Why are we doing this?\r\nBecause of sampling / dropped data, events can have replay_ids that\r\ndon't exist. this is nominally fine, although it results in missing\r\ncounts / the product not seeming trustworthy in places.\r\n\r\n\r\nFixes https://github.com/getsentry/replay-backend/issues/190", "code": "def setUp(self):\n super().setUp()\n self.min_ago = before_now(minutes=1)\n self.login_as(user=self.user)\n self.url = reverse(\n \"sentry-api-0-organization-issue-replay-count\",\n kwargs={\"organization_slug\": self.project.organization.slug},\n )\n self.features = {\"organizations:session-replay\": True}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 81, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 9, "token_counts": 64, "n_ast_nodes": 108, "n_identifiers": 15, "random_cut": "def setUp(self):\n super().setUp()\n self.min_ago = before_now(minutes=1)\n self.login_as(user=self.user)\n self.url = reverse(\n \"sentry-api-0-organization-issue-replay-count\",\n " }, { "id": 30686, "commit_id": "4d727bd2dff377caeab21ff4e1bf4b26c2397c8a", "repo": "transformers", "path": "tests/models/opt/test_modeling_opt.py", "file_name": "test_modeling_opt.py", "fun_name": "test_inference_no_head", "commit_message": "Fix expected value for OPT test `test_inference_no_head` (#17395)\n\n* Fix expected value\r\n\r\n* 5e-5\r\n\r\nCo-authored-by: ydshieh ", "code": "def test_inference_no_head(self):\n model = OPTModel.from_pretrained(\"facebook/opt-350m\").to(torch_device)\n input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])\n\n with torch.no_grad():\n output = model(input_ids=input_ids).last_hidden_state\n\n expected_shape = torch.Size((1, 11, 512))\n self.assertEqual(output.shape, expected_shape)\n # expected value works for CPU, as well as GPU (with TF32 disabled)\n expected_slice = torch.tensor(\n [\n [-0.28726277, -1.9241608, -0.3058734],\n [-1.2737825, -0.13332152, -0.18766522],\n [0.41159445, 0.1191957, -1.3107123],\n ],\n device=torch_device,\n )\n assert_tensors_close(output[0, :3, :3], expected_slice, atol=5e-5)\n\n\n@require_torch\n@slow", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@require_torch\n@slow", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 215, "n_words": 66, "vocab_size": 61, "complexity": 1, "nloc": 16, "token_counts": 173, "n_ast_nodes": 229, "n_identifiers": 24, "random_cut": "def test_inference_no_head(self):\n model = OPTModel.from_pretrained(\"facebook/opt-350m\").to(torch_device)\n i" }, { "id": 13648, "commit_id": "47da80beca9cd60db51081594f1fcc5bee6d1246", "repo": "jina", "path": "jina/parsers/orchestrate/runtimes/remote.py", "file_name": "remote.py", "fun_name": "mixin_gateway_protocol_parser", "commit_message": "docs: fix port and protocol description for the gateway (#5456)\n\nCo-authored-by: Jina Dev Bot ", "code": "def mixin_gateway_protocol_parser(parser):\n \n\n from jina.enums import GatewayProtocolType\n\n parser.add_argument(\n '--protocol',\n '--protocols',\n nargs='+',\n type=GatewayProtocolType.from_string,\n choices=list(GatewayProtocolType),\n default=[GatewayProtocolType.GRPC],\n help=f'Communication protocol of the server exposed by the Gateway. This can be a single value or a list of protocols, depending on your chosen Gateway. Choose the convenient protocols from: {[protocol.to_string() for protocol in list(GatewayProtocolType)]}.',\n )\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 110, "n_words": 49, "vocab_size": 43, "complexity": 1, "nloc": 11, "token_counts": 51, "n_ast_nodes": 106, "n_identifiers": 16, "random_cut": "def mixin_gateway_protocol_parser(parser):\n \n\n from jina.enums import GatewayProtocolType\n\n parser.add_argument(\n '--protocol',\n '--protocols',\n nargs='+',\n type=GatewayProtocolType.from_string,\n choices=l" }, { "id": 161881, "commit_id": "97bffbc7b1640dfc7bc20809dc0b9d1b536d7644", "repo": "rich", "path": "tests/test_console.py", "file_name": "test_console.py", "fun_name": "test_console_null_file", "commit_message": "Test to ensure NullFile set as Console.file when stdout null", "code": "def test_console_null_file(monkeypatch):\n # When stdout and stderr are null, Console.file should be replaced with NullFile\n monkeypatch.setattr(\"sys.stdout\", None)\n monkeypatch.setattr(\"sys.stderr\", None)\n\n console = Console()\n assert isinstance(console.file, NullFile)\n\n", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 39, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 5, "token_counts": 35, "n_ast_nodes": 60, "n_identifiers": 8, "random_cut": "def test_console_null_file(monkeypatch):\n # When stdout and stderr are null, Console.file should be replaced with NullFile\n monkeypatch.setattr(\"sys.stdout\", None)\n " }, { "id": 71510, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_delete_page.py", "file_name": "test_delete_page.py", "fun_name": "setUp", "commit_message": "Reformat with black", "code": "def setUp(self):\n # Find root page\n self.root_page = Page.objects.get(id=2)\n\n # Add child page\n self.child_page = SimplePage(\n title=\"Hello world!\", slug=\"hello-world\", content=\"hello\"\n )\n self.root_page.add_child(instance=self.child_page)\n\n # Add a page with child pages of its own\n self.child_index = StandardIndex(title=\"Hello index\", slug=\"hello-index\")\n self.root_page.add_child(instance=self.child_index)\n self.grandchild_page = StandardChild(title=\"Hello Kitty\", slug=\"hello-kitty\")\n self.child_index.add_child(instance=self.grandchild_page)\n\n # Login\n self.user = self.login()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 150, "n_words": 49, "vocab_size": 38, "complexity": 1, "nloc": 11, "token_counts": 110, "n_ast_nodes": 189, "n_identifiers": 20, "random_cut": "def setUp(self):\n # Find root page\n self.root_page = Page.objects.get(id=2)\n\n # Add child page\n self.child_page = SimplePage(\n title=\"Hello world!\", slug=\"hello-world\", content=\"hello\"\n )\n self.root_page.add_child(instance=self.child_page)\n\n # Add a page with child pages of its own\n self.child_index = StandardIndex(title=\"Hello index\", slug=\"hello-index\")\n self.root_page.add_child(instance=self" }, { "id": 54216, "commit_id": "ef229fdb02297bcfff6aa95f210ce73e35074b99", "repo": "prefect", "path": "tests/test_client.py", "file_name": "test_client.py", "fun_name": "test_client_can_opt_out_of_lifespan_management", "commit_message": "Fix bug where log workers would access a cached client across event loops", "code": "async def test_client_can_opt_out_of_lifespan_management(self):\n startup, shutdown = MagicMock(), MagicMock()\n app = FastAPI(on_startup=[startup], on_shutdown=[shutdown])\n\n client = OrionClient(app, manage_ephemeral_lifespan=False)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 36, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 8, "token_counts": 57, "n_ast_nodes": 68, "n_identifiers": 12, "random_cut": "async def test_client_can_opt_out_of_lifespan_management(self):\n startup, shutdown = MagicMock(), MagicMock()\n app = FastAPI(on_startup=[startup], on_shutdown=[shutdown]" }, { "id": 164179, "commit_id": "bfe2d528a9398679acf05ffcdc60d3c181e0f17e", "repo": "pandas", "path": "pandas/tests/resample/test_base.py", "file_name": "test_base.py", "fun_name": "test_resample_empty_series", "commit_message": "TST: Use more xfail instead of skip (#45719)", "code": "def test_resample_empty_series(freq, empty_series_dti, resample_method, request):\n # GH12771 & GH12868\n\n if resample_method == \"ohlc\" and isinstance(empty_series_dti.index, PeriodIndex):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=f\"GH13083: {resample_method} fails for PeriodIndex\"\n )\n )\n\n ser = empty_series_dti\n result = getattr(ser.resample(freq), resample_method)()\n\n expected = ser.copy()\n expected.index = _asfreq_compat(ser.index, freq)\n\n tm.assert_index_equal(result.index, expected.index)\n assert result.index.freq == expected.index.freq\n tm.assert_series_equal(result, expected, check_dtype=False)\n\n\n@all_ts\n@pytest.mark.parametrize(\"freq\", [\"M\", \"D\", \"H\"])", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@all_ts\n@pytest.mark.parametrize(\"freq\", [\"M\", \"D\", \"H\"])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 128, "n_words": 53, "vocab_size": 48, "complexity": 3, "nloc": 14, "token_counts": 116, "n_ast_nodes": 219, "n_identifiers": 27, "random_cut": "def test_resample_empty_series(freq, empty_series_dti, resample_method, request):\n # GH12771 & GH12868\n\n if resample_method == \"ohlc\" and i" }, { "id": 102547, "commit_id": "8d05174defd689cb1cb2346e0cde5b7fa572814a", "repo": "pytorch", "path": "torch/testing/_comparison.py", "file_name": "_comparison.py", "fun_name": "_handle_meta_tensor_data_access", "commit_message": "make meta tensor data access error message for expressive in assert_close (#68802)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/68802\n\nWithout this patch, the error message of comparing meta tensors looks like this after #68722 was merged:\n\n```python\n>>> t = torch.empty((), device=\"meta\")\n>>> assert_close(t, t)\nNotImplementedError: Could not run 'aten::abs.out' with arguments from the 'Meta' backend. [...]\n[...]\nThe above exception was the direct cause of the following exception:\n[...]\nRuntimeError: Comparing\n\nTensorLikePair(\n id=(),\n actual=tensor(..., device='meta', size=()),\n expected=tensor(..., device='meta', size=()),\n rtol=1.3e-06,\n atol=1e-05,\n equal_nan=False,\n check_device=True,\n check_dtype=True,\n check_layout=True,\n check_stride=False,\n check_is_coalesced=True,\n)\n\nresulted in the unexpected exception above. If you are a user and see this message during normal operation please file an issue at https://github.com/pytorch/pytorch/issues. If you are a developer and working on the comparison functions, please except the previous error and raise an expressive `ErrorMeta` instead.\n```\n\nThus, we follow our own advice and turn it into an expected exception until #68592 is resolved:\n\n```python\n>>> t = torch.empty((), device=\"meta\")\n>>> assert_close(t, t)\nValueError: Comparing meta tensors is currently not supported\n```\n\nTest Plan: Imported from OSS\n\nReviewed By: ngimel\n\nDifferential Revision: D33542999\n\nPulled By: mruberry\n\nfbshipit-source-id: 0fe1ddee15b5decdbd4c5dd84f03804ca7eac95b", "code": "def _handle_meta_tensor_data_access(self):\n \n try:\n yield\n except NotImplementedError as error:\n if \"meta\" not in str(error).lower():\n raise error\n\n # TODO: See https://github.com/pytorch/pytorch/issues/68592\n raise self._make_error_meta(NotImplementedError, \"Comparing meta tensors is currently not supported.\")\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 108, "n_words": 28, "vocab_size": 26, "complexity": 3, "nloc": 7, "token_counts": 38, "n_ast_nodes": 71, "n_identifiers": 7, "random_cut": "def _handle_meta_tensor_data_access(self):\n \n try:\n yield\n except NotImplementedError as error:\n if \"meta\" not in str(error).lower():\n raise error\n\n # TODO: See https://github.com/pytorch/pytorch/issues/68592\n raise self._make_error_meta(NotImplementedError, \"Compa" }, { "id": 313425, "commit_id": "f91aa33c5f7bc13ed031a95f946f70e11af1e2f3", "repo": "core", "path": "homeassistant/components/auth/login_flow.py", "file_name": "login_flow.py", "fun_name": "_async_flow_result_to_response", "commit_message": "Add FlowResultType enum to data entry flow (#72955)", "code": "async def _async_flow_result_to_response(self, request, client_id, result):\n \n if result[\"type\"] != data_entry_flow.FlowResultType.CREATE_ENTRY:\n # @log_invalid_auth does not work here since it returns HTTP 200.\n # We need to manually log failed login attempts.\n if (\n result[\"type\"] == data_entry_flow.FlowResultType.FORM\n and (errors := result.get(\"errors\"))\n and errors.get(\"base\")\n in (\n \"invalid_auth\",\n \"invalid_code\",\n )\n ):\n await process_wrong_login(request)\n return self.json(_prepare_result_json(result))\n\n result.pop(\"data\")\n\n hass: HomeAssistant = request.app[\"hass\"]\n result_obj: Credentials = result.pop(\"result\")\n\n # Result can be None if credential was never linked to a user before.\n user = await hass.auth.async_get_user_by_credentials(result_obj)\n\n if user is not None and (\n user_access_error := async_user_not_allowed_do_auth(hass, user)\n ):\n return self.json_message(\n f\"Login blocked: {user_access_error}\", HTTPStatus.FORBIDDEN\n )\n\n await process_success_login(request)\n result[\"result\"] = self._store_result(client_id, result_obj)\n\n return self.json(result)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 420, "n_words": 105, "vocab_size": 80, "complexity": 7, "nloc": 26, "token_counts": 169, "n_ast_nodes": 289, "n_identifiers": 30, "random_cut": "async def _async_flow_result_to_response(self, request, client_id, result):\n \n if result[\"type\"] != data_entry_flow.FlowResultType.CREATE_ENTRY:\n # @log_invalid_auth does not work here since it returns HTTP 200.\n # We need to manually log failed login attempts.\n if (\n result[\"type\"] == data_entry_flow.FlowResultType.FORM\n and (errors := result.get(\"errors\"))\n and errors.get(\"base\")\n in (\n \"invalid_auth\",\n \"invalid_code\",\n )\n ):\n await process_wrong_login(request)\n return self.json(_prepare_result_json(result))\n\n result.pop(\"data\")\n\n hass: HomeAssistant = request.app[\"hass\"]\n result_obj: Credentials = result.pop(\"result\")\n\n # Result can be None if credential was never linked to a user before.\n user = await hass.auth.async_get_user_by_credentials(result_obj)\n\n if user is not None and (\n user_access_error := async_user_not_allowed_do_auth(hass, user)\n" }, { "id": 318227, "commit_id": "20fec104e2a11b1a5164d7fe779eb0d894e098cf", "repo": "core", "path": "homeassistant/components/avea/light.py", "file_name": "light.py", "fun_name": "update", "commit_message": "Improve type hints in light [a-i] (#75936)\n\n* Improve type hints in ads light\r\n\r\n* Improve type hints in avea light\r\n\r\n* Improve type hints in avion light\r\n\r\n* Improve type hints in broadlink light\r\n\r\n* More type hints\r\n\r\n* One more", "code": "def update(self) -> None:\n \n if (brightness := self._light.get_brightness()) is not None:\n self._attr_is_on = brightness != 0\n self._attr_brightness = round(255 * (brightness / 4095))\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 59, "n_words": 23, "vocab_size": 20, "complexity": 2, "nloc": 8, "token_counts": 45, "n_ast_nodes": 74, "n_identifiers": 8, "random_cut": "def update(self) -> None:\n" }, { "id": 319877, "commit_id": "b70e21a6d50bfc84e76fd68ce0b8c22b0928ff51", "repo": "paperless-ngx", "path": "src/documents/consumer.py", "file_name": "consumer.py", "fun_name": "run_post_consume_script", "commit_message": "When raising an exception during exception handling, chain them together for slightly cleaner logs", "code": "def run_post_consume_script(self, document):\n if not settings.POST_CONSUME_SCRIPT:\n return\n\n if not os.path.isfile(settings.POST_CONSUME_SCRIPT):\n self._fail(\n MESSAGE_POST_CONSUME_SCRIPT_NOT_FOUND,\n f\"Configured post-consume script \"\n f\"{settings.POST_CONSUME_SCRIPT} does not exist.\",\n )\n\n self.log(\n \"info\",\n f\"Executing post-consume script {settings.POST_CONSUME_SCRIPT}\",\n )\n\n script_env = os.environ.copy()\n\n script_env[\"DOCUMENT_ID\"] = str(document.pk)\n script_env[\"DOCUMENT_CREATED\"] = str(document.created)\n script_env[\"DOCUMENT_MODIFIED\"] = str(document.modified)\n script_env[\"DOCUMENT_ADDED\"] = str(document.added)\n script_env[\"DOCUMENT_FILE_NAME\"] = document.get_public_filename()\n script_env[\"DOCUMENT_SOURCE_PATH\"] = os.path.normpath(document.source_path)\n script_env[\"DOCUMENT_ARCHIVE_PATH\"] = os.path.normpath(\n str(document.archive_path),\n )\n script_env[\"DOCUMENT_THUMBNAIL_PATH\"] = os.path.normpath(\n document.thumbnail_path,\n )\n script_env[\"DOCUMENT_DOWNLOAD_URL\"] = reverse(\n \"document-download\",\n kwargs={\"pk\": document.pk},\n )\n script_env[\"DOCUMENT_THUMBNAIL_URL\"] = reverse(\n \"document-thumb\",\n kwargs={\"pk\": document.pk},\n )\n script_env[\"DOCUMENT_CORRESPONDENT\"] = str(document.correspondent)\n script_env[\"DOCUMENT_TAGS\"] = str(\n \",\".join(document.tags.all().values_list(\"name\", flat=True)),\n )\n\n try:\n Popen(\n (\n settings.POST_CONSUME_SCRIPT,\n str(document.pk),\n document.get_public_filename(),\n os.path.normpath(document.source_path),\n os.path.normpath(document.thumbnail_path),\n reverse(\"document-download\", kwargs={\"pk\": document.pk}),\n reverse(\"document-thumb\", kwargs={\"pk\": document.pk}),\n str(document.correspondent),\n str(\",\".join(document.tags.all().values_list(\"name\", flat=True))),\n ),\n env=script_env,\n ).wait()\n except Exception as e:\n self._fail(\n MESSAGE_POST_CONSUME_SCRIPT_ERROR,\n f\"Error while executing post-consume script: {e}\",\n exc_info=True,\n exception=e,\n )\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 781, "n_words": 117, "vocab_size": 84, "complexity": 4, "nloc": 60, "token_counts": 383, "n_ast_nodes": 648, "n_identifiers": 40, "random_cut": "def run_post_consume_script(self, document):\n if not settings.POST_CONSUME_SCRIPT:\n return\n\n if not os.path.isfile(settings.POST_CONSUME_SCRIPT):\n self._fail(\n MESSAGE_POST_CONSUME_SCRIPT_NOT_FOUND,\n f\"Configured post-consume script \"\n f\"{settings.POST_CONSUME_SCRIPT} does not exist.\",\n )\n\n self.log(\n \"info\",\n f\"Executing post-consume script {settings.POST_CONSUME_SCRIPT}\",\n )\n\n script_env = os.environ.copy()\n\n script_env[\"DOCUMENT_ID\"] = str(document.pk)\n script_env[\"DOCUMENT_CREATED\"] = str(document.created)\n script_env[\"DOCUMENT_MODIFIED\"] = str(document.modified)\n script_env[\"DOCUMENT_ADDED\"] = str(document.added)\n script_env[\"DOCUMENT_FILE_NAME\"] = document.get_public_filename()\n script_env[\"DOCUMENT_SOURCE_PATH\"] = os.path.normpath(document.source_path)\n " }, { "id": 88393, "commit_id": "f322fa798595b3bc7dc54e904c9628e44102f1f5", "repo": "sentry", "path": "tests/sentry/snuba/metrics/test_metrics_layer/test_metrics_enhanced_performance.py", "file_name": "test_metrics_enhanced_performance.py", "fun_name": "test_broken_custom_metric", "commit_message": "feat(metrics): Add support for wildcard searches (#41114)", "code": "def test_broken_custom_metric(self, mock):\n # Store valid metric\n self.store_transaction_metric(\n 1,\n metric=\"measurements.something_custom\",\n internal_metric=\"d:transactions/measurements.something_custom@millisecond\",\n entity=\"metrics_distributions\",\n timestamp=self.day_ago + timedelta(hours=1, minutes=0),\n )\n\n # mock mri failing to parse the metric\n mock.return_value = None\n result = get_custom_measurements(\n project_ids=[self.project.id],\n organization_id=self.organization.id,\n start=self.day_ago,\n )\n assert result == []\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 182, "n_words": 39, "vocab_size": 34, "complexity": 1, "nloc": 15, "token_counts": 82, "n_ast_nodes": 127, "n_identifiers": 21, "random_cut": "def test_broken_custom_metric(self, mock):\n # Store valid metric\n self.store_transaction_metric(\n 1,\n metric=\"measurements.something_custom\",\n internal_metric=\"d:transactions/measurements.something_custom@millisecond\",\n entity=\"metrics_distributions\",\n timestamp=self.day_ago + timedelta(hours=1, minutes=0),\n )\n\n # mock mri failing to parse the metric\n mock.return_value = None\n result = get_custom_measurements(\n project_ids=[self.project.id],\n organiz" }, { "id": 301222, "commit_id": "92582beeff7a5d1e9fa6cfae3afa41f596b5e3c2", "repo": "core", "path": "tests/components/wemo/conftest.py", "file_name": "conftest.py", "fun_name": "create_pywemo_device", "commit_message": "Use properties of wemo Maker device (#72378)", "code": "def create_pywemo_device(pywemo_registry, pywemo_model):\n \n cls = getattr(pywemo, pywemo_model)\n device = create_autospec(cls, instance=True)\n device.host = MOCK_HOST\n device.port = MOCK_PORT\n device.name = MOCK_NAME\n device.serialnumber = MOCK_SERIAL_NUMBER\n device.model_name = pywemo_model.replace(\"LongPress\", \"\")\n device.udn = f\"uuid:{device.model_name}-1_0-{device.serialnumber}\"\n device.firmware_version = MOCK_FIRMWARE_VERSION\n device.get_state.return_value = 0 # Default to Off\n device.supports_long_press.return_value = cls.supports_long_press()\n\n if issubclass(cls, pywemo.Insight):\n device.get_standby_state = pywemo.StandbyState.OFF\n device.current_power_watts = MOCK_INSIGHT_CURRENT_WATTS\n device.today_kwh = MOCK_INSIGHT_TODAY_KWH\n device.threshold_power_watts = MOCK_INSIGHT_STATE_THRESHOLD_POWER\n device.on_for = 1234\n device.today_on_time = 5678\n device.total_on_time = 9012\n\n if issubclass(cls, pywemo.Maker):\n device.has_sensor = 1\n device.sensor_state = 1\n device.switch_mode = 1\n device.switch_state = 0\n\n url = f\"http://{MOCK_HOST}:{MOCK_PORT}/setup.xml\"\n with patch(\"pywemo.setup_url_for_address\", return_value=url), patch(\n \"pywemo.discovery.device_from_description\", return_value=device\n ):\n yield device\n\n\n@pytest.fixture(name=\"pywemo_device\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"pywemo_device\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 237, "n_words": 95, "vocab_size": 67, "complexity": 3, "nloc": 30, "token_counts": 191, "n_ast_nodes": 352, "n_identifiers": 48, "random_cut": "def create_pywemo_device(pywemo_registry, pywemo_model):\n \n cls = getattr(pywemo, pywemo_model)\n device = create_autospec(cls, instance=True)\n device.host = MOCK_HOST\n device.port = MOCK_PORT\n device.name = MOCK_NAME\n device.serialnumber = MOCK_SERIAL_NUMBER\n device.model_name = pywemo_model.replace(\"LongPress\", \"\")\n device.udn = f\"uuid:{device.model_name}-1_0-{device.serialnumber}\"\n devic" }, { "id": 290441, "commit_id": "b364ef98a073214aad8deff4ff9b91e9ff041557", "repo": "core", "path": "homeassistant/components/mqtt/vacuum/schema_state.py", "file_name": "schema_state.py", "fun_name": "async_locate", "commit_message": "Use `_attr_` for MQTT vacuum (#81534)\n\n* Use `_attr_` for MQTT vacuum\r\n\r\n* Remove unneeded properties\r\n\r\n* Follow-up comment\r\n\r\n* Remove default value", "code": "async def async_locate(self, **kwargs):\n \n if self.supported_features & VacuumEntityFeature.LOCATE == 0:\n return\n await self.async_publish(\n self._command_topic,\n self._config[CONF_PAYLOAD_LOCATE],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 113, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 10, "token_counts": 59, "n_ast_nodes": 89, "n_identifiers": 13, "random_cut": "async def async_locate(self, **kwargs):\n \n if self.sup" }, { "id": 3562, "commit_id": "f78ede0b511de022482c5f0713752ddf01460eb4", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-google-analytics-v4/unit_tests/unit_test.py", "file_name": "unit_test.py", "fun_name": "mock_auth_check_connection", "commit_message": "reintroduce window in days, log warning when sampling occurs (#9480)\n\n* reintroduce window in days, log warning when sampling occurs\r\n\r\n* Unit tests\r\n\r\n* Documentation update\r\n\r\n* Update airbyte-integrations/connectors/source-google-analytics-v4/source_google_analytics_v4/source.py\r\n\r\nCo-authored-by: Sergei Solonitcyn <11441558+sergei-solonitcyn@users.noreply.github.com>\r\n\r\n* fix the spec\r\n\r\nSigned-off-by: Sergei Solonitcyn \r\n\r\n* some mypy fixes\r\n\r\nSigned-off-by: Sergei Solonitcyn \r\n\r\n* bump version\r\n\r\n* format\r\n\r\n* updated spec and def yaml\r\n\r\n* Update source.py\r\n\r\nCo-authored-by: Sergei Solonitcyn <11441558+sergei-solonitcyn@users.noreply.github.com>\r\nCo-authored-by: Sergei Solonitcyn \r\nCo-authored-by: auganbay ", "code": "def mock_auth_check_connection(requests_mock):\n yield requests_mock.post(\n \"https://analyticsreporting.googleapis.com/v4/reports:batchGet\",\n json={\"data\": {\"test\": \"value\"}},\n )\n\n\n@pytest.fixture", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 28, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 25, "n_ast_nodes": 54, "n_identifiers": 6, "random_cut": "def mock_auth_check_connection(requests_mock):\n yield requests_mock.post(\n \"https://analyticsreporting.googleapis.com/v4/reports:batchGet\",\n json={\"data\": {\"test\": \"value\"}},\n )\n\n\n@pytest.fixture" }, { "id": 143803, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/models/utils.py", "file_name": "utils.py", "fun_name": "get_filter_config", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_filter_config(shape):\n \n # VizdoomGym (large 480x640).\n filters_480x640 = [\n [16, [24, 32], [14, 18]],\n [32, [6, 6], 4],\n [256, [9, 9], 1],\n ]\n # VizdoomGym (small 240x320).\n filters_240x320 = [\n [16, [12, 16], [7, 9]],\n [32, [6, 6], 4],\n [256, [9, 9], 1],\n ]\n # 96x96x3 (e.g. CarRacing-v0).\n filters_96x96 = [\n [16, [8, 8], 4],\n [32, [4, 4], 2],\n [256, [11, 11], 2],\n ]\n # Atari.\n filters_84x84 = [\n [16, [8, 8], 4],\n [32, [4, 4], 2],\n [256, [11, 11], 1],\n ]\n # Small (1/2) Atari.\n filters_42x42 = [\n [16, [4, 4], 2],\n [32, [4, 4], 2],\n [256, [11, 11], 1],\n ]\n # Test image (10x10).\n filters_10x10 = [\n [16, [5, 5], 2],\n [32, [5, 5], 2],\n ]\n\n shape = list(shape)\n if len(shape) in [2, 3] and (shape[:2] == [480, 640] or shape[1:] == [480, 640]):\n return filters_480x640\n elif len(shape) in [2, 3] and (shape[:2] == [240, 320] or shape[1:] == [240, 320]):\n return filters_240x320\n elif len(shape) in [2, 3] and (shape[:2] == [96, 96] or shape[1:] == [96, 96]):\n return filters_96x96\n elif len(shape) in [2, 3] and (shape[:2] == [84, 84] or shape[1:] == [84, 84]):\n return filters_84x84\n elif len(shape) in [2, 3] and (shape[:2] == [42, 42] or shape[1:] == [42, 42]):\n return filters_42x42\n elif len(shape) in [2, 3] and (shape[:2] == [10, 10] or shape[1:] == [10, 10]):\n return filters_10x10\n else:\n raise ValueError(\n \"No default configuration for obs shape {}\".format(shape)\n + \", you must specify `conv_filters` manually as a model option. \"\n \"Default configurations are only available for inputs of shape \"\n \"[42, 42, K] and [84, 84, K]. You may alternatively want \"\n \"to use a custom model or preprocessor.\"\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 586, "n_words": 275, "vocab_size": 125, "complexity": 19, "nloc": 51, "token_counts": 505, "n_ast_nodes": 704, "n_identifiers": 12, "random_cut": "def get_filter_config(shape):\n \n # VizdoomGym (large 480x640).\n filters_480x640 = [\n [16, [24, 32], [14, 18]],\n [32, [6, 6], 4],\n [256, [9, 9], 1],\n ]\n # VizdoomGym (small 240x320).\n filters_240x320 = [\n [16, [12, 16], [7, 9]],\n [32, [6, 6], 4],\n [256, [9, 9], 1],\n ]\n # 96x96x3 (e.g. CarRacing-v0).\n filters_96x96 = [\n [16, [8, 8], 4],\n [32, [4, 4], 2],\n [256, [11, 11], 2],\n ]\n # Atari.\n filters_84x84 = [\n [16, [8, 8], 4],\n [32, [4, 4], 2],\n [256, [11, 11], 1],\n ]\n # Small (1/2) Atari.\n filters_42x42 = [\n [16, [4, 4], 2],\n [32, [4, 4], 2],\n [256, [11, 11], 1],\n ]\n # Test image (10x10).\n filters_10x10 = [\n [16, [5, 5], 2],\n [32, [5, 5], 2]" }, { "id": 205102, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/oracle/operations.py", "file_name": "operations.py", "fun_name": "convert_empty_string", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def convert_empty_string(value, expression, connection):\n return \"\" if value is None else value\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 2, "token_counts": 17, "n_ast_nodes": 26, "n_identifiers": 4, "random_cut": "def convert_empty_string(value, expression, connection):\n " }, { "id": 268464, "commit_id": "a02e22e902a69aeb465f16bf03f7f5a91b2cb828", "repo": "ansible", "path": "test/units/galaxy/test_collection_install.py", "file_name": "test_collection_install.py", "fun_name": "test_build_requirement_from_name_multiple_version_results", "commit_message": "Add --offline option to 'ansible-galaxy collection install' (#78678)\n\n* Add --offline option to 'ansible-galaxy collection install' to prevent querying distribution servers\r\n\r\nThis allows installing/upgrading individual tarfiles to have dependency resolution.\r\nPreviously needed to be done manually with --no-deps or else all collections and dependencies needed to be included in the requirements.\r\n\r\nCo-authored-by: Sviatoslav Sydorenko ", "code": "def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory):\n test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))\n concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)\n multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)\n dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)\n\n matches = RequirementCandidates()\n mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)\n monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)\n\n mock_get_info = MagicMock()\n mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {}, None, None)\n monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)\n\n mock_get_versions = MagicMock()\n mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']\n monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)\n\n mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']\n monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)\n\n cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.2'])\n requirements = cli._require_one_of_collections_requirements(\n ['namespace.collection:!=2.0.2'], None, artifacts_manager=concrete_artifact_cm\n )['collections']\n\n actual = collection._resolve_depenency_map(\n requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False, False, False)['namespace.collection']\n\n assert actual.namespace == u'namespace'\n assert actual.name == u'collection'\n assert actual.src == galaxy_server\n assert actual.ver == u'2.0.5'\n # should be ordered latest to earliest\n assert [c.ver for c in matches.candidates] == [u'2.0.5', u'2.0.4', u'2.0.3', u'2.0.1', u'2.0.0']\n\n assert mock_get_versions.call_count == 1\n assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 232, "n_words": 138, "vocab_size": 100, "complexity": 2, "nloc": 29, "token_counts": 337, "n_ast_nodes": 535, "n_identifiers": 51, "random_cut": "def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory):\n test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))\n concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)\n multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)\n dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)\n\n matches = RequirementCandidates()\n mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)\n monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_match" }, { "id": 9809, "commit_id": "3331b824d2e3f7a65f5df7903382a0c0a30dcc61", "repo": "gensim", "path": "setup.py", "file_name": "setup.py", "fun_name": "finalize_options", "commit_message": "Implement numpy hack in setup.py to enable install under Poetry (#3363)\n\n* Closes #3362: Install issue poetry\r\n\r\n* get rid of redundant exception handling\r\n\r\nthis code can never raise an exception, so we shouldn't be expecting\r\nthem\r\n\r\nCo-authored-by: Michael Penkov ", "code": "def finalize_options(self):\n build_ext.finalize_options(self)\n\n import builtins\n builtins.__NUMPY_SETUP__ = False\n\n import numpy\n self.include_dirs.append(numpy.get_include())\n\n if need_cython():\n import Cython.Build\n Cython.Build.cythonize(list(make_c_ext(use_cython=True)), language_level=3)\n Cython.Build.cythonize(list(make_cpp_ext(use_cython=True)), language_level=3)\n\n", "url": "https://github.com/RaRe-Technologies/gensim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 93, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 10, "token_counts": 81, "n_ast_nodes": 132, "n_identifiers": 18, "random_cut": "def finalize_options(self):\n build_ext.finalize_options(self)\n\n import builtins\n builtins.__NUMPY_SETUP__ = False\n\n import numpy\n self.include_dirs.append(numpy.get_include())\n\n if need_cython():\n import Cython.Build\n Cython.Build.cythonize(list(make_c_ext(use_cython=True)), language_level=3)\n Cython" }, { "id": 44943, "commit_id": "1b568d73e1dfb838a3a0446e3a6063b9f27f04b8", "repo": "airflow", "path": "tests/providers/google/cloud/hooks/test_translate.py", "file_name": "test_translate.py", "fun_name": "test_translate_client_creation", "commit_message": "Extract ClientInfo to module level (#21554)", "code": "def test_translate_client_creation(self, mock_client, mock_get_creds):\n result = self.hook.get_conn()\n mock_client.assert_called_once_with(credentials=mock_get_creds.return_value, client_info=CLIENT_INFO)\n assert mock_client.return_value == result\n assert self.hook._client == result\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 44, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 46, "n_ast_nodes": 70, "n_identifiers": 13, "random_cut": "def test_translate_client_creation(self, mock_client, mock_get_creds):\n result = self.hook.get_conn()\n mock_client.assert_called_once_with(credentials=mock_get_creds.r" }, { "id": 66208, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/training_event/test_training_event.py", "file_name": "test_training_event.py", "fun_name": "create_training_program", "commit_message": "style: format code with black", "code": "def create_training_program(training_program):\n\tif not frappe.db.get_value(\"Training Program\", training_program):\n\t\tfrappe.get_doc(\n\t\t\t{\n\t\t\t\t\"doctype\": \"Training Program\",\n\t\t\t\t\"training_program\": training_program,\n\t\t\t\t\"description\": training_program,\n\t\t\t}\n\t\t).insert()\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 9, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 9, "token_counts": 41, "n_ast_nodes": 73, "n_identifiers": 7, "random_cut": "def create_training_program(training_program):\n\tif not frappe.db.get_value(\"Training Program\", training_program):\n\t\tfrappe.get_doc(\n\t\t\t{\n\t\t\t\t\"doctype\": \"Training Program\",\n\t\t\t\t\"training_program\": training_program,\n\t\t\t\t" }, { "id": 163209, "commit_id": "3111ff0317171f03c8ed2e3f7a153872c85d798f", "repo": "pandas", "path": "pandas/tests/series/indexing/test_setitem.py", "file_name": "test_setitem.py", "fun_name": "assert_warns", "commit_message": "TST/REF: port coercion tests to SetitemCastingEquivalents (#45209)", "code": "def assert_warns(self, request):\n # check that we issue a FutureWarning about timezone-matching\n if request.function.__name__ == \"test_slice_key\":\n key = request.getfixturevalue(\"key\")\n if not isinstance(key, slice):\n # The test is a no-op, so no warning will be issued\n yield\n return\n\n exp_dtype = request.getfixturevalue(\"exp_dtype\")\n val = request.getfixturevalue(\"val\")\n if exp_dtype == object and isinstance(val, Timestamp) and val.tz is not None:\n with tm.assert_produces_warning(FutureWarning, match=\"mismatched timezone\"):\n yield\n else:\n yield\n\n\n@pytest.mark.parametrize(\n \"val,exp_dtype\",\n [(Timedelta(\"12 day\"), \"timedelta64[ns]\"), (1, object), (\"x\", object)],\n)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"val,exp_dtype\",\n [(Timedelta(\"12 day\"), \"timedelta64[ns]\"), (1, object), (\"x\", object)],\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 217, "n_words": 72, "vocab_size": 59, "complexity": 6, "nloc": 13, "token_counts": 86, "n_ast_nodes": 206, "n_identifiers": 22, "random_cut": "def assert_warns(self, request):\n # check that we issue a FutureWarning about timezone-matching\n if request.function.__name__ == \"test_slice_key\":\n key = request.getfixturevalue(\"key\")\n if not isinstance(key, slice):\n # The test is a no-op, so no warning will be issued\n yield\n return\n\n exp_dtype = request.getfixturevalue(\"exp_dtype\")\n val = request.getfixturevalue(\"val\")\n if exp_dtype == object and isinstance(val, Timestamp) and val.tz is not None:\n wit" }, { "id": 295776, "commit_id": "c765e11f55282530275396f8bdc837cb96259920", "repo": "core", "path": "tests/components/telegram_bot/test_broadcast.py", "file_name": "test_broadcast.py", "fun_name": "test_setup", "commit_message": "Fix telegram broadcast (#69452)", "code": "async def test_setup(hass):\n \n assert await async_setup_component(\n hass,\n \"telegram_bot\",\n {\n \"telegram_bot\": {\n \"platform\": \"broadcast\",\n \"api_key\": \"1234567890:ABC\",\n \"allowed_chat_ids\": [1],\n }\n },\n )\n await hass.async_block_till_done()\n\n assert hass.services.has_service(\"telegram_bot\", \"send_message\") is True\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 137, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 14, "token_counts": 55, "n_ast_nodes": 103, "n_identifiers": 6, "random_cut": "async def test_setup(hass):\n \n assert await async_setup_componen" }, { "id": 121242, "commit_id": "9769a0accf77c4e62ff3ace0f71b5c0697b35b54", "repo": "jax", "path": "jax/_src/numpy/ufuncs.py", "file_name": "ufuncs.py", "fun_name": "_logical_op", "commit_message": "DOC: ensure that _wraps() generates correct links to wrapped functions", "code": "def _logical_op(np_op, bitwise_op):\n @_wraps(np_op, update_doc=False, module='numpy')\n @partial(jit, inline=True)\n def op(*args):\n zero = lambda x: lax.full_like(x, shape=(), fill_value=0)\n args = (x if dtypes.issubdtype(dtypes.dtype(x), np.bool_) else lax.ne(x, zero(x))\n for x in args)\n return bitwise_op(*_promote_args(np_op.__name__, *args))\n return op\n\n\nfabs = _one_to_one_unop(np.fabs, lax.abs, True)\nbitwise_not = _one_to_one_unop(np.bitwise_not, lax.bitwise_not)\ninvert = _one_to_one_unop(np.invert, lax.bitwise_not)\nnegative = _one_to_one_unop(np.negative, lax.neg)\npositive = _one_to_one_unop(np.positive, lambda x: x)\nfloor = _one_to_one_unop(np.floor, lax.floor, True)\nceil = _one_to_one_unop(np.ceil, lax.ceil, True)\nexp = _one_to_one_unop(np.exp, lax.exp, True)\nlog = _one_to_one_unop(np.log, lax.log, True)\nexpm1 = _one_to_one_unop(np.expm1, lax.expm1, True)\nlog1p = _one_to_one_unop(np.log1p, lax.log1p, True)\nsin = _one_to_one_unop(np.sin, lax.sin, True)\ncos = _one_to_one_unop(np.cos, lax.cos, True)\ntan = _one_to_one_unop(np.tan, lax.tan, True)\narcsin = _one_to_one_unop(np.arcsin, lax.asin, True)\narccos = _one_to_one_unop(np.arccos, lax.acos, True)\narctan = _one_to_one_unop(np.arctan, lax.atan, True)\nsinh = _one_to_one_unop(np.sinh, lax.sinh, True)\ncosh = _one_to_one_unop(np.cosh, lax.cosh, True)\narcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)\ntanh = _one_to_one_unop(np.tanh, lax.tanh, True)\narctanh = _one_to_one_unop(np.arctanh, lax.atanh, True)\nsqrt = _one_to_one_unop(np.sqrt, lax.sqrt, True)\ncbrt = _one_to_one_unop(np.cbrt, lax.cbrt, True)\n\nadd = _maybe_bool_binop(np.add, lax.add, lax.bitwise_or)\nbitwise_and = _one_to_one_binop(np.bitwise_and, lax.bitwise_and)\nbitwise_or = _one_to_one_binop(np.bitwise_or, lax.bitwise_or)\nbitwise_xor = _one_to_one_binop(np.bitwise_xor, lax.bitwise_xor)\nleft_shift = _one_to_one_binop(np.left_shift, lax.shift_left)\nequal = _one_to_one_binop(np.equal, lax.eq)\nmultiply = _maybe_bool_binop(np.multiply, lax.mul, lax.bitwise_and)\nnot_equal = _one_to_one_binop(np.not_equal, lax.ne)\nsubtract = _one_to_one_binop(np.subtract, lax.sub)\narctan2 = _one_to_one_binop(np.arctan2, lax.atan2, True)\nminimum = _one_to_one_binop(np.minimum, lax.min)\nmaximum = _one_to_one_binop(np.maximum, lax.max)\nfloat_power = _one_to_one_binop(np.float_power, lax.pow, True)\nnextafter = _one_to_one_binop(np.nextafter, lax.nextafter, True, True)\n\ngreater_equal = _comparison_op(np.greater_equal, lax.ge)\ngreater = _comparison_op(np.greater, lax.gt)\nless_equal = _comparison_op(np.less_equal, lax.le)\nless = _comparison_op(np.less, lax.lt)\n\nlogical_and = _logical_op(np.logical_and, lax.bitwise_and)\nlogical_not = _logical_op(np.logical_not, lax.bitwise_not)\nlogical_or = _logical_op(np.logical_or, lax.bitwise_or)\nlogical_xor = _logical_op(np.logical_xor, lax.bitwise_xor)\n\n\n@_wraps(np.arccosh, module='numpy')\n@jit", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_wraps(np.arccosh, module='numpy')\n@jit", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 225, "n_words": 250, "vocab_size": 169, "complexity": 1, "nloc": 5, "token_counts": 33, "n_ast_nodes": 1075, "n_identifiers": 95, "random_cut": "def _logical_op(np_op, bitwise_op):\n @_wraps(np_op, update_doc=False, module='numpy')\n @partial(jit, inline=True)\n def op(*args):\n zero = lambda x: lax.full_like(x, shape=(), fill_value=0)\n args = (x if dtypes.issubdtype(dtypes.dtype(x), np.bool_) else lax.ne(x, zero(x))\n for x in args)\n return bitwise_op(*_promote_args(np_op.__name__" }, { "id": 27437, "commit_id": "f2ce999fa5865917b8d104d38ef3269eebaf6c06", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/payloads.py", "file_name": "payloads.py", "fun_name": "generate_app_payload", "commit_message": "Meta fields added to subscription webhooks event types. (#9759)\n\n* Meta fields added to subscription webhooks event types.\r\n\r\n* Imports adjustments.\r\n\r\n* Change Event type from Union to Interface.\r\n\r\n* Rebase fixes.\r\n\r\n* Review fixes\r\n\r\n* Handle AnonymousUser as requestor.", "code": "def generate_app_payload(app, app_global_id):\n return json.dumps(\n {\n \"app\": {\n \"id\": app_global_id,\n \"isActive\": app.is_active,\n \"name\": app.name,\n \"appUrl\": app.app_url,\n }\n }\n )\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 120, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 11, "token_counts": 41, "n_ast_nodes": 69, "n_identifiers": 8, "random_cut": "def generate_app_payload(app, app_global_id):\n return json.dumps(\n {\n \"app\": {\n \"id\": app_global_id,\n " }, { "id": 127687, "commit_id": "8840be1942a69b2595a05c5c5556b0daec7abbcd", "repo": "ray", "path": "dashboard/modules/job/tests/test_job_agent.py", "file_name": "test_job_agent.py", "fun_name": "test_submit_job", "commit_message": "[Job Submission][refactor 4/N] Complete the remaining interfaces on JobAgent (#28533)\n\nSigned-off-by: Catch-Bull \r\njust need to implement stop_job, and I remove get_job_info because we can access JobInfoStorage without call `ray.init`.", "code": "async def test_submit_job(job_sdk_client, runtime_env_option, monkeypatch):\n # This flag allows for local testing of runtime env conda functionality\n # without needing a built Ray wheel. Rather than insert the link to the\n # wheel into the conda spec, it links to the current Python site.\n monkeypatch.setenv(\"RAY_RUNTIME_ENV_LOCAL_DEV_MODE\", \"1\")\n\n agent_client, head_client = job_sdk_client\n\n runtime_env = runtime_env_option[\"runtime_env\"]\n runtime_env = upload_working_dir_if_needed(runtime_env, logger=logger)\n runtime_env = upload_py_modules_if_needed(runtime_env, logger=logger)\n runtime_env = RuntimeEnv(**runtime_env_option[\"runtime_env\"]).to_dict()\n request = validate_request_type(\n {\"runtime_env\": runtime_env, \"entrypoint\": runtime_env_option[\"entrypoint\"]},\n JobSubmitRequest,\n )\n\n submit_result = await agent_client.submit_job_internal(request)\n job_id = submit_result.submission_id\n\n wait_for_condition(\n partial(\n _check_job, client=head_client, job_id=job_id, status=JobStatus.SUCCEEDED\n ),\n timeout=120,\n )\n\n # There is only one node, so there is no need to replace the client of the JobAgent\n resp = await agent_client.get_job_logs_internal(job_id)\n assert runtime_env_option[\"expected_logs\"] in resp.logs\n\n\n@pytest.mark.asyncio", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.asyncio", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 215, "n_words": 116, "vocab_size": 89, "complexity": 1, "nloc": 21, "token_counts": 140, "n_ast_nodes": 241, "n_identifiers": 34, "random_cut": "async def test_submit_job(job_sdk_client, runtime_env_option, monkeypatch):\n # This flag allows for local testing of runtime env conda functionality\n # without needing a built Ray wheel. Rather than insert the link to the\n # wheel into the conda spec, it links to the current Python site.\n monkeypatch.setenv(\"RAY_RUNTIME_ENV_LOCAL_DEV_MODE\", \"1\")\n\n agent_client, head_client = job_sdk_client\n\n runtime_env = runtime_env_option[\"runtime_env\"]\n runtime_env = upload_working_dir_if_needed(runtime_env, logger=logger)\n runtime_env = upload_py_modules_if_needed(runtime_env, logger=logger)\n runtime_env = RuntimeEnv(**runtime_env_option[\"runtime_env\"]).to_dict()\n request = validate_request_type(\n {\"runtime_env\": runtime_env, \"entrypoint\": runtime_env_option[\"entrypoint\"]},\n JobSubmitRequest,\n )\n\n submit_result = await agent_client.submit_job_internal(request)\n job_id = submit_result.submission_id\n\n wait_for_condition(\n partial(\n _check_job, client=head_client, job_id=job_id, status=JobStatus.SUCCEEDED\n ),\n timeout=120,\n )\n\n # There is only one node, so there is no need to replace the client of the" }, { "id": 176671, "commit_id": "84aa3823e2904fd63178608373ffbed3096ae0d9", "repo": "networkx", "path": "networkx/algorithms/traversal/tests/test_dfs.py", "file_name": "test_dfs.py", "fun_name": "test_predecessor", "commit_message": "MAINT: Update dfs_test with more comprehensive tests (#5654)", "code": "def test_predecessor(self):\n assert nx.dfs_predecessors(self.G, source=0) == {1: 0, 2: 1, 3: 1, 4: 2}\n assert nx.dfs_predecessors(self.D) == {1: 0, 3: 2}\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 34, "n_words": 21, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 55, "n_ast_nodes": 80, "n_identifiers": 7, "random_cut": "def test_predecessor(self):\n assert nx.dfs_predecessors(self.G, source=0) == {1: 0, 2: 1, 3: 1, 4: 2}\n assert nx.dfs_predecessors(self" }, { "id": 196091, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/graycode.py", "file_name": "graycode.py", "fun_name": "graycode_subsets", "commit_message": "Updated import locations", "code": "def graycode_subsets(gray_code_set):\n \n for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()):\n yield get_subset_from_bitstring(gray_code_set, bitstring)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 22, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 3, "token_counts": 31, "n_ast_nodes": 56, "n_identifiers": 8, "random_cut": "def graycode_subsets(gray_code_set):\n \n for bitstring in list(GrayCode(len(gray_code_set)).generate_gray()):\n yield get_subset_from_bitstring(gray_code_set, bitstring)\n" }, { "id": 165017, "commit_id": "03fef5f0e35200aa5828975b62782bcf11faa0d2", "repo": "pandas", "path": "pandas/tests/plotting/test_series.py", "file_name": "test_series.py", "fun_name": "test_plot_6951", "commit_message": "TST: Clean tests/plotting (#45992)", "code": "def test_plot_6951(self, ts):\n # GH 6951\n ax = _check_plot_works(ts.plot, subplots=True)\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1))\n\n ax = _check_plot_works(ts.plot, subplots=True, layout=(-1, 1))\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1))\n ax = _check_plot_works(ts.plot, subplots=True, layout=(1, -1))\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1))\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 82, "n_words": 34, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 115, "n_ast_nodes": 165, "n_identifiers": 10, "random_cut": "def test_plot_6951(self, ts):\n # GH 6951\n ax = _check_plot_works(ts.plot, subplots=True)\n self._check_axes_shape(ax, axes_num=1, layout=(1, 1))\n\n " }, { "id": 76493, "commit_id": "229fbf476aded9d5d5a6b32e1ad03025e8cbf392", "repo": "wagtail", "path": "wagtail/admin/ui/sidebar.py", "file_name": "sidebar.py", "fun_name": "js_args", "commit_message": "Sidebar style updates (#8118)\n\nCo-authored-by: Thibaud Colas ", "code": "def js_args(self):\n return [\n reverse(\"wagtailadmin_home\"),\n ]\n\n\n@adapter(\"wagtail.sidebar.SearchModule\", base=BaseSidebarAdapter)", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "@adapter(\"wagtail.sidebar.SearchModule\", base=BaseSidebarAdapter)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 31, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 19, "token_counts": 51, "n_ast_nodes": 39, "n_identifiers": 6, "random_cut": "def js_args(self):\n return [\n reverse(\"wagtailadmin_home\"),\n ]\n\n\n@adapter(\"wagtail.sidebar.SearchModule\", base=B" }, { "id": 267208, "commit_id": "a12e0a0e874c6c0d18a1a2d83dcb106d207136af", "repo": "ansible", "path": "lib/ansible/cli/doc.py", "file_name": "doc.py", "fun_name": "jdump", "commit_message": "ansible-config added json/yaml output to list/dump (#77447)\n\n\r\n fixes #733644", "code": "def jdump(text):\n try:\n display.display(json_dump(text))\n except TypeError as e:\n display.vvv(traceback.format_exc())\n raise AnsibleError('We could not convert all the documentation into JSON as there was a conversion issue: %s' % to_native(e))\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 54, "n_words": 28, "vocab_size": 27, "complexity": 2, "nloc": 6, "token_counts": 41, "n_ast_nodes": 72, "n_identifiers": 11, "random_cut": "def jdump(text):\n try:\n display.display(json_dump(text))\n except TypeError as e:\n displa" }, { "id": 252909, "commit_id": "c69239bb90c55993326c324908ac78cc2a174e44", "repo": "mitmproxy", "path": "test/mitmproxy/test_addonmanager.py", "file_name": "test_addonmanager.py", "fun_name": "test_simple", "commit_message": "switch to stdlib logging\n\nmitmproxy previously used a homegrown logging mechanism based around\n`mitmproxy.ctx.log` and the `add_log` hook. This worked well for everything\nwe control, but does not work outside the mitmproxy universe.\nFor now we have simply ignored logging in e.g. tornado or h2, but with the\nupcoming introduction of mitmproxy_wireguard we now have a dependency\non some Rust/PyO3 code for which we definitely want logs, but which also\ncannot easily be changed to use our homegrown logging (PyO3 does the heavy\nlifting to add interoperability with stdlib logging). Long story short,\nwe want to introduce a log handler for stdlib logging.\n\nNow there are two ways how such a handler could operate:\n\n 1. We could build a handler that forwards all stdlib log events\n into our homegrown mechanism.\n 2. We embrace stdlib's logging as the correct way to do things,\n and get rid of our homegrown stuff.\n\nThis PR follows the second approach by removing the `add_log` hook and\nrewriting the `TermLog` and `EventStore` addons to listen for stdlib log records.\nThis means that all `mitmproxy.ctx.log.info` events are now simply `logging.info` etc.\n\nOne upside of this approach is that many parts of the codebase now don't depend\non the existence of `mitmproxy.ctx` and we can use off-the-shelf things like pytest's\n`caplog`. We can also now better colorize log output and/or add timestamps.", "code": "async def test_simple(caplog):\n with taddons.context(loadcore=False) as tctx:\n a = tctx.master.addons\n\n assert len(a) == 0\n a.add(TAddon(\"one\"))\n assert a.get(\"one\")\n assert not a.get(\"two\")\n assert len(a) == 1\n a.clear()\n assert len(a) == 0\n assert not a.chain\n\n with taddons.context(loadcore=False) as tctx:\n a.add(TAddon(\"one\"))\n\n a.trigger(\"nonexistent\")\n assert \"AssertionError\" in caplog.text\n\n f = tflow.tflow()\n a.trigger(hooks.RunningHook())\n a.trigger(HttpResponseHook(f))\n assert \"not callable\" in caplog.text\n caplog.clear()\n\n caplog.clear()\n a.get(\"one\").response = addons\n a.trigger(HttpResponseHook(f))\n assert \"not callable\" not in caplog.text\n\n a.remove(a.get(\"one\"))\n assert not a.get(\"one\")\n\n ta = TAddon(\"one\")\n a.add(ta)\n a.trigger(hooks.RunningHook())\n assert ta.running_called\n\n assert ta in a\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 280, "n_words": 79, "vocab_size": 39, "complexity": 1, "nloc": 31, "token_counts": 229, "n_ast_nodes": 404, "n_identifiers": 26, "random_cut": "async def test_simple(caplog):\n with taddons.context(loadcore=False) as tctx:\n a = tctx.master.addons\n\n assert len(a) == 0\n a.add(TAddon(\"one\"))\n assert a.get(\"one\")\n assert not a.get(\"two\")\n assert len(a) == 1\n a.clear()\n assert len(a) == 0\n assert not a.chain\n\n with taddons.context(loadcore=False) as tctx:\n a.add(TAddon(\"one\"))\n\n a.trigger(\"nonexistent\")\n assert \"AssertionError\" in caplog.text\n\n f = tflow.tflow()\n " }, { "id": 193603, "commit_id": "658ca539369c80d8d609bf0e716dc9109186fbf4", "repo": "vision", "path": "test/test_prototype_transforms_functional.py", "file_name": "test_prototype_transforms_functional.py", "fun_name": "test_scripted_smoke", "commit_message": "cleanup prototype transforms functional tests (#6622)\n\n* cleanup prototype transforms functional tests\r\n\r\n* fix\r\n\r\n* oust local functions", "code": "def test_scripted_smoke(self, info, args_kwargs, device):\n dispatcher = script(info.dispatcher)\n\n (image_feature, *other_args), kwargs = args_kwargs.load(device)\n image_simple_tensor = torch.Tensor(image_feature)\n\n dispatcher(image_simple_tensor, *other_args, **kwargs)\n\n # TODO: We need this until the dispatchers below also have `DispatcherInfo`'s. If they do, `test_scripted_smoke`\n # replaces this test for them.", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 75, "n_words": 41, "vocab_size": 37, "complexity": 1, "nloc": 5, "token_counts": 52, "n_ast_nodes": 81, "n_identifiers": 14, "random_cut": "def test_scripted_smoke(self, info, args_kwargs, device):\n dispatcher = script(info.dispatcher)\n\n (image_feature, *other_args), kwargs = args_kwargs.load(device)\n image_simple_tensor = torch.Tensor(image_feature)\n\n dispatcher(image_simple_tensor, *other_args, **kwargs)\n\n # TODO: We need this until the dispatchers below also have `DispatcherInfo`'s. If they do, `test_scripted_smoke`\n # replaces this te" }, { "id": 81675, "commit_id": "c59bbdecdbdd920c5d3d298d691129c6bbc94c5e", "repo": "awx", "path": "awx/main/dispatch/control.py", "file_name": "control.py", "fun_name": "cancel", "commit_message": "Refactor canceling to work through messaging and signals, not database\n\nIf canceled attempted before, still allow attempting another cancel\nin this case, attempt to send the sigterm signal again.\nKeep clicking, you might help!\n\nReplace other cancel_callbacks with sigterm watcher\n adapt special inventory mechanism for this too\n\nGet rid of the cancel_watcher method with exception in main thread\n\nHandle academic case of sigterm race condition\n\nProcess cancelation as control signal\n\nFully connect cancel method and run_dispatcher to control\n\nNever transition workflows directly to canceled, add logs", "code": "def cancel(self, task_ids, *args, **kwargs):\n return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 17, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 34, "n_ast_nodes": 53, "n_identifiers": 7, "random_cut": "def cancel(self, task_ids, *args, **kwargs):\n return self.control_with_reply('cancel', *args, extra_data={'task_ids': task_ids}, **kwargs)\n" }, { "id": 206034, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/forms/widgets.py", "file_name": "widgets.py", "fun_name": "render_js", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def render_js(self):\n return [\n format_html('', self.absolute_path(path))\n for path in self._js\n ]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 25, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def render_js(self):\n return [\n format_html('', self.absolute_path(path))\n for path in self._js\n ]\n" }, { "id": 69301, "commit_id": "e392ea1104fee5add5c893c4e092edb6ad21f486", "repo": "erpnext", "path": "erpnext/accounts/doctype/pos_invoice/pos_invoice.py", "file_name": "pos_invoice.py", "fun_name": "get_bundle_availability", "commit_message": "fix: POS properly validate stock for bundle products\n\nStock availability was not calculated properly for Product Bundle with non stock item so i have added logic to properly calculate that as well.", "code": "def get_bundle_availability(bundle_item_code, warehouse):\n\tproduct_bundle = frappe.get_doc(\"Product Bundle\", bundle_item_code)\n\n\tbundle_bin_qty = 1000000\n\tfor item in product_bundle.items:\n\t\titem_bin_qty = get_bin_qty(item.item_code, warehouse)\n\t\titem_pos_reserved_qty = get_pos_reserved_qty(item.item_code, warehouse)\n\t\tavailable_qty = item_bin_qty - item_pos_reserved_qty\n\n\t\tmax_available_bundles = available_qty / item.qty\n\t\tif bundle_bin_qty > max_available_bundles and frappe.get_value(\n\t\t\t\"Item\", item.item_code, \"is_stock_item\"\n\t\t):\n\t\t\tbundle_bin_qty = max_available_bundles\n\n\tpos_sales_qty = get_pos_reserved_qty(bundle_item_code, warehouse)\n\treturn bundle_bin_qty - pos_sales_qty\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 40, "n_words": 54, "vocab_size": 35, "complexity": 4, "nloc": 14, "token_counts": 92, "n_ast_nodes": 145, "n_identifiers": 19, "random_cut": "def get_bundle_availability(bundle_item_code, warehouse):\n\tproduct_bundle = frappe.get_doc(\"Product Bundle\", bundle_item_code)\n\n\tbundle_bin_qty = 1000000\n\tfor item in product_bundle.items:\n\t\titem_bin_qty = get_bin_qty(item.item_code, warehouse)\n\t\titem_pos_reserved_qty = get_pos_reserved_qty(" }, { "id": 92867, "commit_id": "b72ff60d06f2cd1dae8390bebf71d191ae341faf", "repo": "sentry", "path": "src/sentry/runner/commands/sendmail.py", "file_name": "sendmail.py", "fun_name": "sendmail", "commit_message": "feat(runner): Add sendmail command to send emails from sentry command (#36732)", "code": "def sendmail(files, fail_silently):\n \n from sentry.runner import configure\n\n configure()\n\n for file in files:\n with open(file) as f:\n send_prepared_email(f.read(), fail_silently=fail_silently)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 48, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 42, "n_ast_nodes": 72, "n_identifiers": 11, "random_cut": "def sendmail(files, fail_silently):\n \n fr" }, { "id": 154811, "commit_id": "d86dda5094eba47840f42a21cf4b2c953e698960", "repo": "modin", "path": "modin/pandas/test/test_series.py", "file_name": "test_series.py", "fun_name": "test_str_get", "commit_message": "TEST-#5040: Rework test_series using eval_general() (#5041)\n\nSigned-off-by: Vasily Litvinov ", "code": "def test_str_get(data, i):\n modin_series, pandas_series = create_test_series(data)\n eval_general(modin_series, pandas_series, lambda series: series.str.get(i))\n\n\n@pytest.mark.parametrize(\n \"data\", test_string_list_data_values, ids=test_string_list_data_keys\n)\n@pytest.mark.parametrize(\"sep\", string_sep_values, ids=string_sep_keys)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"data\", test_string_list_data_values, ids=test_string_list_data_keys\n)\n@pytest.mark.parametrize(\"sep\", string_sep_values, ids=string_sep_keys)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 25, "n_words": 20, "vocab_size": 20, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 98, "n_identifiers": 18, "random_cut": "def test_str_get(data, i):\n modin_series, pandas_series = create_test_series(data)\n eval_general(modin_series, pandas_series, lambda series: series.str.get(i))\n\n\n@pytest.mark.parametrize(\n \"data\", test_string_list_data_values, ids=test_string_list_data_keys\n)\n@pytest.mark.parametrize(\"sep\", string_sep_values, ids=string_sep_keys)" }, { "id": 114202, "commit_id": "284fb890eca785bec894e69c6d5b122e83ef93d0", "repo": "mindsdb", "path": "mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py", "file_name": "information_schema.py", "fun_name": "_get_columns", "commit_message": "return columns list for files", "code": "def _get_columns(self):\n columns = self.information_schema['COLUMNS']\n\n # NOTE there is a lot of types in mysql, but listed below should be enough for our purposes\n row_templates = {\n 'text': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'varchar', 1024, 3072, None, None, None, 'utf8', 'utf8_bin', 'varchar(1024)', None, None, 'select', None, None],\n 'timestamp': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', 'CURRENT_TIMESTAMP', 'YES', 'timestamp', None, None, None, None, 0, None, None, 'timestamp', None, None, 'select', None, None],\n 'bigint': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'bigint', None, None, 20, 0, None, None, None, 'bigint unsigned', None, None, 'select', None, None],\n 'float': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'float', None, None, 12, 0, None, None, None, 'float', None, None, 'select', None, None]\n }\n\n result = []\n\n for table_name in self.information_schema:\n table_columns = self.information_schema[table_name]\n for i, column_name in enumerate(table_columns):\n result_row = row_templates['text'].copy()\n result_row[1] = 'information_schema'\n result_row[2] = table_name\n result_row[3] = column_name\n result_row[4] = i\n result.append(result_row)\n\n mindsb_dn = self.get('MINDSDB')\n for table_name in mindsb_dn.get_tables():\n table_columns = mindsb_dn.get_table_columns(table_name)\n for i, column_name in enumerate(table_columns):\n result_row = row_templates['text'].copy()\n result_row[1] = 'mindsdb'\n result_row[2] = table_name\n result_row[3] = column_name\n result_row[4] = i\n result.append(result_row)\n\n mindsb_dn = self.get('FILES')\n for table_name in mindsb_dn.get_tables():\n table_columns = mindsb_dn.get_table_columns(table_name)\n for i, column_name in enumerate(table_columns):\n result_row = row_templates['text'].copy()\n result_row[1] = 'files'\n result_row[2] = table_name\n result_row[3] = column_name\n result_row[4] = i\n result.append(result_row)\n\n df = pd.DataFrame(result, columns=columns)\n return df\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 679, "n_words": 216, "vocab_size": 85, "complexity": 7, "nloc": 40, "token_counts": 433, "n_ast_nodes": 685, "n_identifiers": 21, "random_cut": "def _get_columns(self):\n columns = self.information_schema['COLUMNS']\n\n # NOTE there is a lot of types in mysql, but listed below should be enough for our purposes\n row_templates = {\n 'text': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'varchar', 1024, 3072, None, None, None, 'utf8', 'utf8_bin', 'varchar(1024)', None, None, 'select', None, None],\n 'timestamp': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', 'CURRENT_TIMESTAMP', 'YES', 'timestamp', None, None, None, None, 0, None, None, 'timestamp', None, None, 'select', None, None],\n 'bigint': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'bigint', None, None, 20, 0, None, None, None, 'bigint unsigned', None, None, 'select', None, None],\n 'float': ['def', 'SCHEMA_NAME', 'TABLE_NAME', 'COLUMN_NAME', 'COL_INDEX', None, 'YES', 'float', None, None, 12, 0, None, None, None, 'float', None, None, 'select', None, None]\n }\n\n result = []\n\n for table_name in self.information_schema:\n table_columns = self.information_schema[table_name]\n for i, column_name in enumerate(table_columns):\n result_row = row_templates['text'].copy()\n result_row[1] = 'information_schema'\n result_row[2] = table_name\n result_row[3] = column_name\n result_row[4] = i\n result.append(result_row)\n\n mindsb_dn = self.get('MINDSDB')\n for table_name in mindsb_dn.get_tables():\n table_columns = mindsb_dn.get_table_columns(table_name)\n for i, column_name in enumerate(table_columns):\n result_row = row_templates['text'].copy()\n resu" }, { "id": 73767, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/migrations/0027_fix_collection_path_collation.py", "file_name": "0027_fix_collection_path_collation.py", "fun_name": "set_collection_path_collation", "commit_message": "Reformat with black", "code": "def set_collection_path_collation(apps, schema_editor):\n \n if schema_editor.connection.vendor == \"postgresql\":\n schema_editor.execute(\n \n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 7, "token_counts": 23, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def set_collection_path_collation(apps, schema_editor):\n \n if schema_editor.connection.vendor == \"postgresql\":\n schema_editor.execute(\n \n )\n\n" }, { "id": 261683, "commit_id": "35b826a49c66bc6cb03dc751784483806aba6400", "repo": "scikit-learn", "path": "sklearn/manifold/tests/test_isomap.py", "file_name": "test_isomap.py", "fun_name": "test_pipeline_with_nearest_neighbors_transformer", "commit_message": "TST use global_dtype in sklearn/manifold/tests/test_isomap.py (#22673)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Jérémie du Boisberranger ", "code": "def test_pipeline_with_nearest_neighbors_transformer(global_dtype):\n # Test chaining NearestNeighborsTransformer and Isomap with\n # neighbors_algorithm='precomputed'\n algorithm = \"auto\"\n n_neighbors = 10\n\n X, _ = datasets.make_blobs(random_state=0)\n X2, _ = datasets.make_blobs(random_state=1)\n\n X = X.astype(global_dtype, copy=False)\n X2 = X2.astype(global_dtype, copy=False)\n\n # compare the chained version and the compact version\n est_chain = pipeline.make_pipeline(\n neighbors.KNeighborsTransformer(\n n_neighbors=n_neighbors, algorithm=algorithm, mode=\"distance\"\n ),\n manifold.Isomap(n_neighbors=n_neighbors, metric=\"precomputed\"),\n )\n est_compact = manifold.Isomap(\n n_neighbors=n_neighbors, neighbors_algorithm=algorithm\n )\n\n Xt_chain = est_chain.fit_transform(X)\n Xt_compact = est_compact.fit_transform(X)\n assert_allclose(Xt_chain, Xt_compact)\n\n Xt_chain = est_chain.transform(X2)\n Xt_compact = est_compact.transform(X2)\n assert_allclose(Xt_chain, Xt_compact)\n\n\n@pytest.mark.parametrize(\n \"metric, p, is_euclidean\",\n [\n (\"euclidean\", 2, True),\n (\"manhattan\", 1, False),\n (\"minkowski\", 1, False),\n (\"minkowski\", 2, True),\n (lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False),\n ],\n)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"metric, p, is_euclidean\",\n [\n (\"euclidean\", 2, True),\n (\"manhattan\", 1, False),\n (\"minkowski\", 1, False),\n (\"minkowski\", 2, True),\n (lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False),\n ],\n)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 239, "n_words": 102, "vocab_size": 70, "complexity": 1, "nloc": 22, "token_counts": 154, "n_ast_nodes": 353, "n_identifiers": 36, "random_cut": "def test_pipeline_with_nearest_neighbors_transformer(global_dtype):\n # Test chaining NearestNeighborsTransformer and Isomap with\n # neighbors_algorithm='precomputed'\n algorithm = \"auto\"\n n_neighbors = 10\n\n X, _ = datasets.make_blobs(random_state=0)\n X2, _ = datasets.make_blobs(random_state=1)\n\n X = X.astype(global_dtype, copy=False)\n X2 = X2.astype(global_dtype, copy=False)\n\n # compare the chained version and the compact version\n est_chain = pipeline.make_pipeline(\n " }, { "id": 23174, "commit_id": "9f62b610dea6161627200ed85d92e19b1923279a", "repo": "PaddleOCR", "path": "ppocr/data/imaug/fce_aug.py", "file_name": "fce_aug.py", "fun_name": "generate_crop_target", "commit_message": "add fcenet", "code": "def generate_crop_target(self, image, all_polys, pad_h, pad_w):\n \n h, w, _ = image.shape\n h_array = np.zeros((h + pad_h * 2), dtype=np.int32)\n w_array = np.zeros((w + pad_w * 2), dtype=np.int32)\n\n text_polys = []\n for polygon in all_polys:\n rect = cv2.minAreaRect(polygon.astype(np.int32).reshape(-1, 2))\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n text_polys.append([box[0], box[1], box[2], box[3]])\n\n polys = np.array(text_polys, dtype=np.int32)\n for poly in polys:\n poly = np.round(poly, decimals=0).astype(np.int32)\n minx = np.min(poly[:, 0])\n maxx = np.max(poly[:, 0])\n w_array[minx + pad_w:maxx + pad_w] = 1\n miny = np.min(poly[:, 1])\n maxy = np.max(poly[:, 1])\n h_array[miny + pad_h:maxy + pad_h] = 1\n\n h_axis = np.where(h_array == 0)[0]\n w_axis = np.where(w_array == 0)[0]\n return h_axis, w_axis\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 301, "n_words": 103, "vocab_size": 66, "complexity": 3, "nloc": 22, "token_counts": 281, "n_ast_nodes": 424, "n_identifiers": 41, "random_cut": "def generate_crop_target(self, image, all_polys, pad_h, pad_w):\n \n h, w, _ = image.shape\n h_array = np.zeros((h + pad_h * 2), dtype=np.int32)\n w_array = np.zeros((w + pad_w * 2), dtype=np.int32)\n\n text_polys = []\n for polygon in all_polys:\n rect = cv2.minAreaRect(polygon.astype(np.int32).reshape(-1, 2))\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n text_polys.append([box[0], box[1], box[2], box[3]])\n\n polys = np.array(text_polys, dtype=np.int32)\n for poly in polys:\n poly = np.round(poly, decimals=0).astype(np.int32)\n minx = np.min(poly[:, 0])\n maxx = np.max(poly[:, 0])\n w_array[minx + pad_w:maxx + pad_w] = 1\n miny = np.min(po" }, { "id": 264595, "commit_id": "197dfca5b2d181369b90e40704ac9188d149a688", "repo": "netbox", "path": "netbox/dcim/views.py", "file_name": "views.py", "fun_name": "get_extra_context", "commit_message": "Fixes #8935: Correct ordering of next/previous racks to use naturalized names", "code": "def get_extra_context(self, request, instance):\n # Get 0U devices located within the rack\n nonracked_devices = Device.objects.filter(\n rack=instance,\n position__isnull=True,\n parent_bay__isnull=True\n ).prefetch_related('device_type__manufacturer')\n\n peer_racks = Rack.objects.restrict(request.user, 'view').filter(site=instance.site)\n\n if instance.location:\n peer_racks = peer_racks.filter(location=instance.location)\n else:\n peer_racks = peer_racks.filter(location__isnull=True)\n next_rack = peer_racks.filter(_name__gt=instance._name).first()\n prev_rack = peer_racks.filter(_name__lt=instance._name).reverse().first()\n\n reservations = RackReservation.objects.restrict(request.user, 'view').filter(rack=instance)\n power_feeds = PowerFeed.objects.restrict(request.user, 'view').filter(rack=instance).prefetch_related(\n 'power_panel'\n )\n\n device_count = Device.objects.restrict(request.user, 'view').filter(rack=instance).count()\n\n return {\n 'device_count': device_count,\n 'reservations': reservations,\n 'power_feeds': power_feeds,\n 'nonracked_devices': nonracked_devices,\n 'next_rack': next_rack,\n 'prev_rack': prev_rack,\n }\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 296, "n_words": 67, "vocab_size": 57, "complexity": 2, "nloc": 26, "token_counts": 221, "n_ast_nodes": 362, "n_identifiers": 32, "random_cut": "def get_extra_context(self, request, instance):\n # Get 0U devices located within the rack\n nonracked_devices = Device.objects.filter(\n rack=instance,\n position__isnull=True,\n parent_bay__isnull=True\n ).prefetch_related('device_type__manufacturer')\n\n peer_racks = Rack.objects.restrict(request.user, 'view').filter(site=instance.site)\n\n if instance.location:\n peer_racks = peer_racks.filter(location=instance.location)\n else:\n peer_racks = peer_racks.filter(location__isnull" }, { "id": 8608, "commit_id": "0b33b3f6524ecbf3671b2c911efe429feb93988f", "repo": "ludwig", "path": "tests/integration_tests/test_ray.py", "file_name": "test_ray.py", "fun_name": "test_ray_tabular", "commit_message": "int: Refactor `test_ray.py` to limit number of full train jobs (#2637)\n\n* refactors majority of ray tests to compare preprocessing results\r\n\r\n* reverted changes to concatenate_datasets; use fixed split config\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* added partial modin fix; fixed reshape guard\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Justin Zhao ", "code": "def test_ray_tabular(tmpdir, df_engine, ray_cluster_2cpu):\n input_features = [\n category_feature(encoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n number_feature(normalization=\"zscore\"),\n set_feature(),\n binary_feature(),\n bag_feature(),\n h3_feature(),\n date_feature(),\n ]\n output_features = [\n binary_feature(bool2str=[\"No\", \"Yes\"]),\n binary_feature(),\n number_feature(normalization=\"zscore\"),\n ]\n run_preprocessing(\n tmpdir,\n df_engine,\n input_features,\n output_features,\n )\n\n\n@pytest.mark.parametrize(\"dataset_type\", [\"csv\", \"parquet\"])\n@pytest.mark.distributed", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"dataset_type\", [\"csv\", \"parquet\"])\n@pytest.mark.distributed", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 148, "n_words": 35, "vocab_size": 29, "complexity": 1, "nloc": 21, "token_counts": 92, "n_ast_nodes": 182, "n_identifiers": 22, "random_cut": "def test_ray_tabular(tmpdir, df_engine, ray_cluster_2cpu):\n input_features = [\n category_feature(encoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n number_feature(normalization=\"zscore\"),\n set_feature(),\n binary_feature(),\n bag_feature(),\n h3_feature(),\n date_feature(),\n ]\n output_features = [\n binary_" }, { "id": 319393, "commit_id": "c8081595c4450780eade4921a81d0b1bd08105cc", "repo": "paperless-ngx", "path": "src/paperless_tika/parsers.py", "file_name": "parsers.py", "fun_name": "extract_metadata", "commit_message": "improve pdf generation", "code": "def extract_metadata(self, document_path, mime_type):\n result = []\n prefix_pattern = re.compile(r\"(.*):(.*)\")\n\n try:\n parsed = self.get_tika_result(document_path)\n except ParseError as e:\n self.log(\n \"warning\",\n f\"Error while fetching document metadata for \" f\"{document_path}: {e}\",\n )\n return result\n\n for key, value in parsed[\"metadata\"].items():\n if isinstance(value, list):\n value = \", \".join([str(e) for e in value])\n value = str(value)\n try:\n m = prefix_pattern.match(key)\n result.append(\n {\n \"namespace\": \"\",\n \"prefix\": m.group(1),\n \"key\": m.group(2),\n \"value\": value,\n },\n )\n except AttributeError:\n result.append(\n {\n \"namespace\": \"\",\n \"prefix\": \"\",\n \"key\": key,\n \"value\": value,\n },\n )\n except Exception as e:\n self.log(\n \"warning\",\n f\"Error while reading metadata {key}: {value}. Error: \" f\"{e}\",\n )\n result.sort(key=lambda item: (item[\"prefix\"], item[\"key\"]))\n return result\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 702, "n_words": 103, "vocab_size": 65, "complexity": 7, "nloc": 41, "token_counts": 207, "n_ast_nodes": 372, "n_identifiers": 28, "random_cut": "def extract_metadata(self, document_path, mime_type):\n result = []\n prefix_pattern = re.compile(r\"(.*):(.*)\")\n\n try:\n parsed = self.get_tika_result(document_path)\n except ParseError as e:\n self.log(\n \"warning\",\n f\"Error while fetching document metadata for \" f\"{document_path}: {e}\",\n )\n return result\n\n for key, value in parsed[\"metadata\"].items():\n if isinstance(value, list):\n value = \", \".join([str(e) for e in value])\n value = str(value)\n try:\n m = prefix" }, { "id": 110246, "commit_id": "9b6abd0b4933811e0a45c2535ab8fd107db65dd9", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "update_units", "commit_message": "DOC: improve grammar and consistency", "code": "def update_units(self, data):\n \n converter = munits.registry.get_converter(data)\n if converter is None:\n return False\n\n neednew = self.converter != converter\n self.converter = converter\n default = self.converter.default_units(data, self)\n if default is not None and self.units is None:\n self.set_units(default)\n\n elif neednew:\n self._update_axisinfo()\n self.stale = True\n return True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 145, "n_words": 42, "vocab_size": 27, "complexity": 5, "nloc": 13, "token_counts": 82, "n_ast_nodes": 132, "n_identifiers": 14, "random_cut": "def update_units(self, data):\n \n converter = munits.registry.get_converter(data)\n if converter is None:\n " }, { "id": 79882, "commit_id": "baddbfad424afa797287151e6b04d9c0459df3ef", "repo": "wagtail", "path": "wagtail/snippets/views/snippets.py", "file_name": "snippets.py", "fun_name": "revisions_revert_view", "commit_message": "Fix missing preview_url_name in SnippetViewSet.revisions_revert_view (#9663)", "code": "def revisions_revert_view(self):\n return self.revisions_revert_view_class.as_view(\n model=self.model,\n permission_policy=self.permission_policy,\n index_url_name=self.get_url_name(\"list\"),\n edit_url_name=self.get_url_name(\"edit\"),\n delete_url_name=self.get_url_name(\"delete\"),\n history_url_name=self.get_url_name(\"history\"),\n preview_url_name=self.get_url_name(\"preview_on_edit\"),\n revisions_revert_url_name=self.get_url_name(\"revisions_revert\"),\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 114, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 11, "token_counts": 79, "n_ast_nodes": 128, "n_identifiers": 13, "random_cut": "def revisions_revert_view(self):\n return self.revisions_revert_view_class.as_view(\n model=self.model,\n permission_policy=self.permission_policy,\n index_url_name=self.get_url_name(\"list\"),\n edit_url_name=self.get_url_name(\"edit\"),\n delete_url_name=self.get_url_name(\"delete\"),\n history_url_name=self.get_url_name(\"history\"),\n preview_url_name=self.get_url_name(\"preview_on_edit\"),\n revisions_revert_u" }, { "id": 202973, "commit_id": "c67e1cf44f17c36139e25b1eae92216cb8baad77", "repo": "django", "path": "django/test/testcases.py", "file_name": "testcases.py", "fun_name": "to_list", "commit_message": "Refs #33348 -- Deprecated passing errors=None to SimpleTestCase.assertFormError()/assertFormsetErrors().", "code": "def to_list(value):\n \n if not isinstance(value, list):\n value = [value]\n return value\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 4, "token_counts": 22, "n_ast_nodes": 38, "n_identifiers": 4, "random_cut": "def to_list(value):\n \n if not isinstance(value, list):\n value = [value]\n " }, { "id": 309509, "commit_id": "44a686931e0cdfd874539f27276aae849243229c", "repo": "core", "path": "homeassistant/components/rachio/webhooks.py", "file_name": "webhooks.py", "fun_name": "async_get_or_create_registered_webhook_id_and_url", "commit_message": "Import webhook (#64102)\n\n* Import webhook\r\n\r\n* Adjust webhook trigger\r\n\r\n* Fix pylint\r\n\r\n* Add type hints to async_handle_webhook\r\n\r\n* Revert changes to netatmo\r\n\r\nCo-authored-by: epenet ", "code": "async def async_get_or_create_registered_webhook_id_and_url(hass, entry):\n \n config = entry.data.copy()\n\n updated_config = False\n webhook_url = None\n\n if not (webhook_id := config.get(CONF_WEBHOOK_ID)):\n webhook_id = webhook.async_generate_id()\n config[CONF_WEBHOOK_ID] = webhook_id\n updated_config = True\n\n if hass.components.cloud.async_active_subscription():\n if not (cloudhook_url := config.get(CONF_CLOUDHOOK_URL)):\n cloudhook_url = await hass.components.cloud.async_create_cloudhook(\n webhook_id\n )\n config[CONF_CLOUDHOOK_URL] = cloudhook_url\n updated_config = True\n webhook_url = cloudhook_url\n\n if not webhook_url:\n webhook_url = webhook.async_generate_url(hass, webhook_id)\n\n if updated_config:\n hass.config_entries.async_update_entry(entry, data=config)\n\n return webhook_id, webhook_url\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 198, "n_words": 63, "vocab_size": 36, "complexity": 6, "nloc": 21, "token_counts": 134, "n_ast_nodes": 218, "n_identifiers": 22, "random_cut": "async def async_get_or_create_registered_webhook_id_and_url(hass, entry):\n \n config = entry.data.copy()\n\n updated_config = False\n webhook_url = None\n\n if not (webhook_id := config.get(CONF_WEBHOOK_ID)):\n webhook_id = webhook.async_generate_id()\n config[CONF_WEBHOOK_ID] = webhook_id\n updated_config = True\n\n if hass.components.cloud.async_active_subscription():\n if not (cloudhook_url := config.get(CONF_CLOUDHOOK_URL)):\n cloudhook_url = await hass.components.cloud.async_create_cloudhook(\n webhook_id\n )\n config[CONF_CLOUDHOOK_URL] = cloudhook_url\n upda" }, { "id": 189901, "commit_id": "9359331effffcdf6e6c63718002c8fd576cc8c77", "repo": "manim", "path": "tests/test_number_line.py", "file_name": "test_number_line.py", "fun_name": "test_point_to_number", "commit_message": "Add vectorized plotting functionality to improve performance (#2739)\n\n* enhancement(ParametricFunction, CoordinateSystem, Axes, NumberLine): vectorized coords_to_points,plot,plot_parametric_function; added Numberline:number_to_point_array\r\n\r\n* test(plot_log_x_axis_vectorized): added test for vectorized plotting\r\n\r\n* extend(angle_of_vector): added test for angle_of_vector with ndarray as input\r\n\r\n* fix(frames_comparison): fix naming of test data to be able to write and read the file name because ':' is forbidden\r\n\r\n* test(plot): add more vectorized tests, added use_vectorized fixture to make life simpler\r\n\r\n* fix(coordinate_system,number_line,scale): vectorizing functions and fixing bugs\r\n\r\n* enhancement(NumberLine): vectorized number_to_point and added test\r\n\r\n* enhancement(NumberLine): added tests for point_to_number, added example to doc\r\n\r\n* enhancement(CoordinateSystem): added test for coords_to_point_vectorized and vectorized coords_to_point\r\n\r\n* enhancement(Axes): vectorized point_to_coords and added tests\r\n\r\n* Minor formatting fixes and doctests\r\n\r\n* fixed flake hint with generator expression\r\n\r\n* Create __init__.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Update documentation for antiderivative\r\n\r\n* Update manim/mobject/graphing/coordinate_systems.py\r\n\r\nCo-authored-by: Raghav Goel \r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Update manim/mobject/graphing/coordinate_systems.py\r\n\r\nCo-authored-by: Benjamin Hackl \r\n\r\n* Update manim/mobject/graphing/coordinate_systems.py\r\n\r\nCo-authored-by: Benjamin Hackl \r\n\r\n* Update manim/mobject/graphing/functions.py\r\n\r\nCo-authored-by: Benjamin Hackl \r\n\r\n* Update manim/mobject/graphing/coordinate_systems.py\r\n\r\nCo-authored-by: Benjamin Hackl \r\n\r\n* Update manim/mobject/graphing/coordinate_systems.py\r\n\r\nCo-authored-by: Benjamin Hackl \r\n\r\n* fixed wrong indentation\r\n\r\n* stop doctest from leaking change in global config\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* change code block type\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Raghav Goel \r\nCo-authored-by: Benjamin Hackl ", "code": "def test_point_to_number():\n line = NumberLine()\n points = [\n [1.0, 0.0, 0.0],\n [2.0, 0.0, 0.0],\n [3.0, 0.0, 0.0],\n [4.0, 0.0, 0.0],\n [5.0, 0.0, 0.0],\n ]\n points_np = np.array(points)\n expected = [1, 2, 3, 4, 5]\n\n num_1 = [line.point_to_number(point) for point in points]\n num_2 = line.point_to_number(points)\n num_3 = line.point_to_number(points_np)\n\n np.testing.assert_array_equal(np.round(num_1, 4), np.round(expected, 4))\n np.testing.assert_array_equal(np.round(num_2, 4), np.round(expected, 4))\n np.testing.assert_array_equal(np.round(num_3, 4), np.round(expected, 4))\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 126, "n_words": 59, "vocab_size": 39, "complexity": 2, "nloc": 17, "token_counts": 206, "n_ast_nodes": 250, "n_identifiers": 16, "random_cut": "def test_point_to_number():\n line = NumberLine()\n points = [\n [1.0, 0.0, 0.0],\n [2.0, 0.0, 0.0],\n [3.0, 0.0, 0.0],\n [4.0, 0.0, 0.0],\n [5.0, 0.0, 0.0],\n ]\n points_np = np.array(points)\n expected = [1, 2, 3, 4, 5]\n\n num_1 = [line.point_to_number(point) for point in points]\n num_2 = line.point_to_number(points)\n num_3 = line.point_to_number(points_np)\n\n np.testing.assert_array_equal(np.round(num_1, 4), np.round(expected, 4))\n np.testing.assert_array_equal(np.round(num_2, 4), np.round(expect" }, { "id": 159948, "commit_id": "b335431699f86ab523dc6dba2c91efc799f4372b", "repo": "numpy", "path": "numpy/lib/tests/test_loadtxt.py", "file_name": "test_loadtxt.py", "fun_name": "test_delimiter_quotechar_collision_raises", "commit_message": "TST: Some tests for control character collisions.\n\nAdds some tests for the behavior of control characters, e.g. comments, delimiter and quotechar, when they have the same value. At this stage, these tests are more to frame the discussion about what the behavior should be, not to test what it currently is. I personally think raising an exception is correct for most of these situations, though it's worth noting that np.loadtxt currently doesn't for most of these corner cases (and seems to randomly assign precedence to delimiter over comments or vice versa depending on the values).", "code": "def test_delimiter_quotechar_collision_raises():\n with pytest.raises(TypeError, match=\"control characters.*are identical\"):\n np.loadtxt(StringIO(\"1, 2, 3\"), delimiter=\",\", quotechar=\",\")\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 21, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 61, "n_identifiers": 10, "random_cut": "def test_delimiter_quotechar_collision_raises():\n wi" }, { "id": 281167, "commit_id": "f40ba0d256a78ab2b8461f0df3a9a52ca7dc5704", "repo": "OpenBBTerminal", "path": "discordbot/stocks/dark_pool_shorts/psi.py", "file_name": "psi.py", "fun_name": "psi_command", "commit_message": "Bot logging fix (#1105)\n\n* Write bot logs to stdout instead of a file\r\nHeroku's logging uses the stdout and has problems with files\r\n\r\n* Send \"you snooze you lose\" only if debug flag is enabled\r\n\r\n* Replace print statements with logger entries in the economy menu\r\n\r\n* Add logging to bot menu command calls\r\n\r\n* Silence bandit warnings about the REPLACE_ME token\r\n\r\n* Organize imports and update logging in economy menu\r\n\r\n* Organize imports and update logging in dps menu\r\n\r\n* Organize imports and update logging in dd menu\r\n\r\n* Organize imports and update logging in gov menu\r\n\r\n* Organize imports and update logging in options menu\r\n\r\n* Organize imports and update logging in screener menu\r\n\r\n* Organize imports and update logging in ta menu\r\n\r\n* Revert automatic import sorting\r\n\r\n* Add logging to the options reaction helper", "code": "async def psi_command(ctx, ticker=\"\"):\n \n\n try:\n # Debug user input\n if cfg.DEBUG:\n logger.debug(\"!stocks.dps.psi %s\", ticker)\n\n # Check for argument\n if ticker == \"\":\n raise Exception(\"Stock ticker is required\")\n\n ticker = ticker.upper()\n\n stock = yf.download(ticker, progress=False)\n if stock.empty:\n raise Exception(\"Stock ticker is invalid\")\n\n # Retrieve data\n df, prices = stockgrid_model.get_short_interest_volume(ticker)\n\n # Debug user output\n if cfg.DEBUG:\n logger.debug(df.to_string())\n\n # Output data\n title = f\"Stocks: [Stockgrid] Price vs Short Interest Volume {ticker}\"\n embed = discord.Embed(title=title, colour=cfg.COLOR)\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n _, axes = plt.subplots(\n 2,\n 1,\n dpi=PLOT_DPI,\n gridspec_kw={\"height_ratios\": [2, 1]},\n )\n\n axes[0].bar(\n df[\"date\"],\n df[\"total_volume\"] / 1_000_000,\n width=timedelta(days=1),\n color=\"b\",\n alpha=0.4,\n label=\"Total Volume\",\n )\n axes[0].bar(\n df[\"date\"],\n df[\"short_volume\"] / 1_000_000,\n width=timedelta(days=1),\n color=\"r\",\n alpha=0.4,\n label=\"Short Volume\",\n )\n\n axes[0].set_ylabel(\"Volume (1M)\")\n ax2 = axes[0].twinx()\n ax2.plot(\n df[\"date\"].values,\n prices[len(prices) - len(df) :], # noqa: E203\n c=\"k\",\n label=\"Price\",\n )\n ax2.set_ylabel(\"Price ($)\")\n\n lines, labels = axes[0].get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n ax2.legend(lines + lines2, labels + labels2, loc=\"upper left\")\n\n axes[0].grid()\n axes[0].ticklabel_format(style=\"plain\", axis=\"y\")\n plt.title(f\"Price vs Short Volume Interest for {ticker}\")\n plt.gcf().autofmt_xdate()\n\n axes[1].plot(\n df[\"date\"].values,\n 100 * df[\"short_volume%\"],\n c=\"green\",\n label=\"Short Vol. %\",\n )\n\n axes[1].set_ylabel(\"Short Vol. %\")\n\n axes[1].grid(axis=\"y\")\n lines, labels = axes[1].get_legend_handles_labels()\n axes[1].legend(lines, labels, loc=\"upper left\")\n axes[1].set_ylim([0, 100])\n file_name = ticker + \"_psi.png\"\n plt.savefig(file_name)\n plt.close(\"all\")\n uploaded_image = gst_imgur.upload_image(file_name, title=\"something\")\n image_link = uploaded_image.link\n embed.set_image(url=image_link)\n os.remove(file_name)\n\n await ctx.send(embed=embed)\n\n except Exception as e:\n embed = discord.Embed(\n title=f\"ERROR Stocks: [Stockgrid] Price vs Short Interest Volume {ticker}\",\n colour=cfg.COLOR,\n description=e,\n )\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n await ctx.send(embed=embed)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 995, "n_words": 222, "vocab_size": 148, "complexity": 6, "nloc": 87, "token_counts": 576, "n_ast_nodes": 949, "n_identifiers": 80, "random_cut": "async def psi_command(ctx, ticker=\"\"):\n \n\n try:\n # Debug user input\n if cfg.DEBUG:\n logger.debug(\"!stocks.dps.psi %s\", ticker)\n\n # Check for argument\n if ticker == \"\":\n raise Exception(\"Stock ticker is required\")\n\n ticker = ticker.upper()\n\n stock = yf.download(ticker, progress=False)\n if stock.empty:\n raise Exception(\"Stock ticker is invalid\")\n\n # Retrieve data\n df, prices = stockgrid_model.get_short_interest_volume(ticker)\n\n # Debug user output\n if cfg.DEBUG:\n logger.debug(df.to_string())\n\n # Output data\n title = f\"Stocks: [Stockgrid] Price vs Short Interest Volume {ticker}\"\n embed = discord.Embed(title=title, colour=cfg.COLOR)\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n _, axes = plt.subplots(\n 2,\n 1,\n dpi=PLOT_DPI,\n gridspec_kw={\"height_ratios\": [2, 1]},\n )\n\n axes[0].bar(\n df[\"date\"],\n df[\"total_volume\"] / 1_000_000,\n width=timedelta(days=1),\n color=\"b\",\n alpha=0.4,\n label=\"Total Volume\",\n )\n axes[0].bar(\n df[\"date\"],\n df[\"short_volume\"] / 1_000_000,\n width=timedelta(days=1),\n color=\"r\",\n alpha=0.4,\n label=\"Short Volume\",\n )\n\n axes[0].set_ylabel(\"Volume (1M)\")\n ax2 = axes[0].twinx()\n ax2.plot(\n df[\"date\"].values,\n prices[len(prices) - len(df) :], # noqa: E203\n c=\"k\",\n label=\"Price\",\n )\n ax2.set_ylabel(\"Price ($)\")\n\n lines, labels = axes[0].get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()" }, { "id": 255801, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/shape_inference_test.py", "file_name": "shape_inference_test.py", "fun_name": "test_nonzero_existing_dim_param", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_nonzero_existing_dim_param(self) -> None:\n graph = self._make_graph(\n [('x', TensorProto.FLOAT, (3,))],\n [make_node('NonZero', ['x'], ['y'])],\n [make_tensor_value_info('y', TensorProto.INT64, (None, 'NZ'))])\n self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (1, 'NZ'))]) # type: ignore\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 72, "n_words": 25, "vocab_size": 22, "complexity": 1, "nloc": 6, "token_counts": 83, "n_ast_nodes": 133, "n_identifiers": 10, "random_cut": "def test_nonzero_existing_dim_param(self) -> None:\n graph = self._make_graph(\n [('x', TensorProto.FLOAT, (3,))],\n [make_node('NonZero', ['x'], ['y'])],\n [make_tensor_value_info('y', TensorProto.INT64, (None, 'NZ'))])" }, { "id": 137817, "commit_id": "8e680c483ce326cefc62e44f68ab1a6948b1c3d2", "repo": "ray", "path": "rllib/examples/connectors/adapt_connector_policy.py", "file_name": "adapt_connector_policy.py", "fun_name": "reset", "commit_message": "[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)", "code": "def reset(self, *, seed=None, options=None):\n obs, info = self._env.reset()\n return np.hstack((obs, [8.0, 6.0])), info\n\n\n# Custom agent connector to drop the last 2 feature values.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 37, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 3, "token_counts": 47, "n_ast_nodes": 65, "n_identifiers": 9, "random_cut": "def reset(self, *, seed=None, options=None):\n obs, info = self._env.reset()\n return np.hstack((obs, [8" }, { "id": 211026, "commit_id": "c84153a355d9855fe55cf51d203b8b24e7d884e5", "repo": "PaddleDetection", "path": "deploy/pptracking/python/mot/tracker/ocsort_tracker.py", "file_name": "ocsort_tracker.py", "fun_name": "predict", "commit_message": "[MOT] Add OC_SORT tracker (#6272)\n\n* add ocsort tracker\r\n\r\n* add ocsort deploy\r\n\r\n* merge develop\r\n\r\n* fix ocsort tracker codes\r\n\r\n* fix doc, test=document_fix\r\n\r\n* fix doc, test=document_fix", "code": "def predict(self):\n \n if ((self.kf.x[6] + self.kf.x[2]) <= 0):\n self.kf.x[6] *= 0.0\n\n self.kf.predict()\n self.age += 1\n if (self.time_since_update > 0):\n self.hit_streak = 0\n self.time_since_update += 1\n self.history.append(convert_x_to_bbox(self.kf.x, score=self.score))\n return self.history[-1]\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 29, "vocab_size": 25, "complexity": 3, "nloc": 10, "token_counts": 104, "n_ast_nodes": 159, "n_identifiers": 11, "random_cut": "def predict(self):\n \n if ((self.kf.x[6] + self.kf.x[2]) <= 0):\n self.kf.x[6] *= 0.0\n\n self.kf.predict()\n self.age += 1\n if (self.time_since_update > 0):\n self.hit_streak = 0\n self.time_since_update += 1" }, { "id": 285338, "commit_id": "9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/stocks/dark_pool_shorts/test_stockgrid_view.py", "file_name": "test_stockgrid_view.py", "fun_name": "test_short_interest_volume", "commit_message": "Here we merge all API Refactor related branches (#2236)\n\n* Update api.py\r\n\r\n* Updated forex menu\r\n\r\n* refactor ycrv command\r\n\r\n* refactor ycrv command black\r\n\r\n* refactor ecocal command\r\n\r\n* Minh changes\r\n\r\n* Adding space to test pushing\r\n\r\n* title fix ecocal df\r\n\r\n* get economic calendar annotation\r\n\r\n* fix investingcom tests\r\n\r\n* refactor index command\r\n\r\n* refactor overview command\r\n\r\n* give defaults to wsj view function args\r\n\r\n* rename date args investincom\r\n\r\n* refacto bigmac command\r\n\r\n* fix ecocal typo\r\n\r\n* refactor rtps command\r\n\r\n* alphavantage gdp\r\n\r\n* alphavantage gdp per capita\r\n\r\n* alphavantage cpi\r\n\r\n* alphavantage tyld\r\n\r\n* alphavantage inf\r\n\r\n* refactor macro command\r\n\r\n* refactor macro command w helpers\r\n\r\n* refactor treasury command\r\n\r\n* fix macro on terminal\r\n\r\n* treasury labels\r\n\r\n* refactor maturities\r\n\r\n* update treasury maturities doc strings\r\n\r\n* refactor get economic calendar finhub\r\n\r\n* refactor map command api\r\n\r\n* display map filter choices\r\n\r\n* route economy api to performance map\r\n\r\n* route economy api to performance map\r\n\r\n* display group choices on valuation command\r\n\r\n* refactor performance and valuation commands\r\n\r\n* refactor spectrum model and view\r\n\r\n* add choices to spectrum controller\r\n\r\n* delete image after view\r\n\r\n* fix model tests finviz\r\n\r\n* fix finciz view tests\r\n\r\n* refactor futures\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix controller test\r\n\r\n* refactor fred series notes\r\n\r\n* update fred notes docstring\r\n\r\n* refacto fred series ids\r\n\r\n* fix pred and qa when empty datasets\r\n\r\n* refactor fred\r\n\r\n* uncomment stuff\r\n\r\n* refacto get series data\r\n\r\n* fix some tests\r\n\r\n* set defaults on args\r\n\r\n* refactor fred yield curve\r\n\r\n* black\r\n\r\n* fix spell and remove ecocal names\r\n\r\n* fix linting\r\n\r\n* linting\r\n\r\n* pylint fix\r\n\r\n* change dangerous defaults\r\n\r\n* Working through crypto fixes (#2256)\r\n\r\n* Working through crypto fixes\r\n\r\n* Continued adding crypto stuff\r\n\r\n* Added crypto overview\r\n\r\n* Added test fixes\r\n\r\n* Added fixtures\r\n\r\n* Fixed tests\r\n\r\n* Fixed charting issue\r\n\r\n* Removed broken APIs\r\n\r\n* Final adjustments\r\n\r\n* Added test fixes\r\n\r\n* map get groups and get ycrv countries into old api\r\n\r\n* exposed econdb helper funcs\r\n\r\n* remove helpers\r\n\r\n* refactor search indices\r\n\r\n* linting\r\n\r\n* refactor arg currency\r\n\r\n* pylint from currency\r\n\r\n* Started switching crpyto ascending to ascend\r\n\r\n* Merging\r\n\r\n* Portfolio model arguements, params, and docstring\r\n\r\n* Refactored for etf commands (#2292)\r\n\r\n* Refactored for etf commands\r\n\r\n* Fixed tests\r\n\r\n* Added load command\r\n\r\n* Fixed menu\r\n\r\n* Portfolio logic fixes\r\n\r\n* Added econometrics (#2260)\r\n\r\n* Added econometrics\r\n\r\n* Fixed tests\r\n\r\n* Simplified API\r\n\r\n* Added test fixes\r\n\r\n* Added test csv\r\n\r\n* Allowed examples to be loaded\r\n\r\n* Fund refactor (#2291)\r\n\r\n* Fund refactor\r\n\r\n* Changed fund_name and fund to name\r\n\r\n* Changed ascending to ascend\r\n\r\n* Stock menu refactoring for easier API usage (#2194)\r\n\r\n* Stocks refactoring for easier API usage\r\n\r\n* Linting\r\n\r\n* Refactor newly added features\r\n\r\n* Linting\r\n\r\n* Fixing tests\r\n\r\n* Refactor common files used by stocks menu\r\n\r\n* Fixing flake8\r\n\r\n* Fix linting and tests\r\n\r\n* Linting\r\n\r\n* Fix flake8\r\n\r\n* refactor insider_data\r\n\r\n* refactor mentions\r\n\r\n* refactor watchlist\r\n\r\n* refactor sentiment\r\n\r\n* refactor sentiment\r\n\r\n* fix yahoofinance tests\r\n\r\n* refactor load and candle\r\n\r\n* refactor get_news and display_news\r\n\r\n* refactor stocks.ins.act\r\n\r\n* candle default matplotlib\r\n\r\n* fix yahoofinance_view tests\r\n\r\n* fix ark model tests\r\n\r\n* fix ark view tests\r\n\r\n* fix business insider model\r\n\r\n* fix business insider view\r\n\r\n* refactor csimarket model\r\n\r\n* fix tests csi market model\r\n\r\n* update dd controller\r\n\r\n* fix get suppliers tests\r\n\r\n* fix dd controller tests\r\n\r\n* fix finhub tests\r\n\r\n* fix finviz tests\r\n\r\n* fix fmp tests\r\n\r\n* fix marketwatch tests\r\n\r\n* corrected argument keywords in test_bt_model\r\n\r\n* corrected argument keywords in test_bt_view\r\n\r\n* refactor fa controller\r\n\r\n* refactor marketwatch view\r\n\r\n* refactor gov controller\r\n\r\n* fix tests fa av\r\n\r\n* fix tests elect\r\n\r\n* fix dcf tests\r\n\r\n* fix polygon tests\r\n\r\n* fix fmp tests\r\n\r\n* fix quiverquant tests\r\n\r\n* fix yahoofinance fa tests\r\n\r\n* fix more fa tests\r\n\r\n* fix insider tests\r\n\r\n* fix more tests\r\n\r\n* fix more tests\r\n\r\n* fix options tests\r\n\r\n* fix stock gov tests\r\n\r\n* fix tests test_ba_controller\r\n\r\n* fix tests for test_finviz_compare_model.py\r\n\r\n* fixed 2 tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fix final tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* Fix tests\r\n\r\n* black\r\n\r\n* forgot to black tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* flakefix\r\n\r\n* Tests + code : Stocks / Discovery\r\n\r\n* fix tests\r\n\r\n* added recorder\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* black\r\n\r\n* black\r\n\r\n* remove unused imports\r\n\r\n* refactor display raw\r\n\r\n* sia dicts fix\r\n\r\n* pylint\r\n\r\n* linting\r\n\r\n* remove dangerous default\r\n\r\n* fix tests\r\n\r\n* fix beta model test\r\n\r\n* black\r\n\r\n* skip screener qa test\r\n\r\n* change sector path to sectors\r\n\r\n* update tests readme\r\n\r\n* fix metric defaults\r\n\r\n* black\r\n\r\n* substitute lost ticker\r\n\r\n* defaults cpic\r\n\r\n* another round on sia\r\n\r\n* refactor cramer\r\n\r\n* reduce default tweets on sentiment\r\n\r\n* refactor yf hist, corr, volume\r\n\r\n* arkorders default\r\n\r\n* refactor income, balance, cashflow\r\n\r\n* refacto scorr, screener, getfinnhub\r\n\r\n* refactor stockgrid\r\n\r\n* ibkr refactor\r\n\r\n* another round on stockgrid\r\n\r\n* add dividens end point\r\n\r\n* refactor discovery endpoints\r\n\r\n* update docstrings with similar input\r\n\r\n* refactor messages\r\n\r\n* refactor ba\r\n\r\n* refactor regioons\r\n\r\n* refactor twitter sentiment\r\n\r\n* refactor hist\r\n\r\n* refactor regions\r\n\r\n* give default to timeframe\r\n\r\n* refactor bunch of defaults and arg names\r\n\r\n* remove leftover imports\r\n\r\n* refactor vwap\r\n\r\n* let tests run\r\n\r\n* fix tests\r\n\r\n* fix stock tests\r\n\r\n* fix stockanalysis tests\r\n\r\n* flake\r\n\r\n* MYPY\r\n\r\n* Made important changes\r\n\r\n* added fixes\r\n\r\n* Fixed big issue\r\n\r\n* Added fixes to tests\r\n\r\n* fix qa tests\r\n\r\n* fix tests\r\n\r\n* fix 1 more test\r\n\r\n* last stocks failing\r\n\r\n* fix crypto test\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: montezdesousa \r\nCo-authored-by: hjoaquim \r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\n\r\n* fix portfolio tests\r\n\r\n* change period to window\r\n\r\n* update ca docstrings\r\n\r\n* refactor get_similar_companies func\r\n\r\n* Fixed\r\n\r\n* Update CI\r\n\r\n* Update CI 2\r\n\r\n* Update CI 3\r\n\r\n* Update dependencies\r\n\r\nCo-authored-by: colin99d \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: montezdesousa \r\nCo-authored-by: James Simmons \r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com>\r\nCo-authored-by: hjoaquim ", "code": "def test_short_interest_volume(mocker, raw):\n # MOCK VISUALIZE_OUTPUT\n mocker.patch(target=\"openbb_terminal.helper_classes.TerminalStyle.visualize_output\")\n\n stockgrid_view.short_interest_volume(\n symbol=\"PM\",\n limit=2,\n raw=raw,\n export=\"\",\n )\n\n\n@pytest.mark.vcr\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n \"raw\",\n [True, False],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n \"raw\",\n [True, False],\n)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 61, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 36, "n_ast_nodes": 103, "n_identifiers": 15, "random_cut": "def test_short_interest_volume(mocker, raw):\n # MOCK VISUALIZE_OUTPUT\n mocker.patch(target=\"openbb_terminal.helper_classes.Term" }, { "id": 153731, "commit_id": "cf0eb393daa41abdd8cdf32b52ceee938cdcbe1a", "repo": "modin", "path": "modin/experimental/pandas/test/test_io_exp.py", "file_name": "test_io_exp.py", "fun_name": "test_read_csv_without_glob", "commit_message": "FIX-#4461: Fix S3 CSV data path (#4462)\n\nSigned-off-by: jeffreykennethli ", "code": "def test_read_csv_without_glob(self):\n with pytest.warns(UserWarning, match=r\"Shell-style wildcard\"):\n with pytest.raises(FileNotFoundError):\n pd.read_csv_glob(\"s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-\")\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 41, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 58, "n_identifiers": 10, "random_cut": "def test_read_csv_without_glob(self):\n with pytest.warns(UserWarning, match=r\"Shell-style wildcard\"):\n with py" }, { "id": 6009, "commit_id": "68b0f1c9d4aaafabe973c38334b400766ead5348", "repo": "ludwig", "path": "ludwig/models/ecd.py", "file_name": "ecd.py", "fun_name": "save_torchscript", "commit_message": "Added end-to-end Torchscript compilation for tabular types (#1693)", "code": "def save_torchscript(self, save_path):\n traced = self.to_torchscript()\n traced.save(save_path)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 6, "random_cut": "def save_torchscript(self, save_path):\n " }, { "id": 143148, "commit_id": "75d08b06328d213656e7280639b35ccecdfc34d0", "repo": "ray", "path": "python/ray/tune/tests/test_convergence.py", "file_name": "test_convergence.py", "fun_name": "testConvergenceHyperopt", "commit_message": "[tune/structure] Refactor `suggest` into `search` package (#26074)\n\nThis PR renames the `suggest` package to `search` and alters the layout slightly.\r\n\r\nIn the new package, the higher-level abstractions are on the top level and the search algorithms have their own subdirectories.\r\n\r\nIn a future refactor, we can turn algorithms such as PBT into actual `SearchAlgorithm` classes and move them into the `search` package. \r\n\r\nThe main reason to keep algorithms and searchers in the same directory is to avoid user confusion - for a user, `Bayesopt` is as much a search algorithm as e.g. `PBT`, so it doesn't make sense to split them up.", "code": "def testConvergenceHyperopt(self):\n from ray.tune.search.hyperopt import HyperOptSearch\n\n np.random.seed(0)\n searcher = HyperOptSearch(random_state_seed=1234)\n analysis = self._testConvergence(searcher, patience=50, top=5)\n\n assert math.isclose(analysis.best_config[\"x\"], 0, abs_tol=1e-2)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 53, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 67, "n_ast_nodes": 98, "n_identifiers": 20, "random_cut": "def testConvergenceHyperopt(self):\n from ray.tune.search.hyperopt import HyperOptSearch\n\n np.random.seed(0)\n searcher = HyperOptSearch(ran" }, { "id": 136948, "commit_id": "2cab697e29aee1654a00ffe642d35b9171c09236", "repo": "ray", "path": "python/ray/data/tests/preprocessors/test_encoder.py", "file_name": "test_encoder.py", "fun_name": "test_multi_hot_encoder", "commit_message": "[Datasets] Split `test_preprocessors.py` into separate modules (#30633)\n\ntest_preprocessors.py has gotten pretty long. So, I've split it up into modules like test_scaler.py.\r\n\r\nSigned-off-by: Balaji Veeramani \r\nSigned-off-by: Balaji Veeramani \r\nCo-authored-by: Antoni Baum ", "code": "def test_multi_hot_encoder():\n \n col_a = [\"red\", \"green\", \"blue\", \"red\"]\n col_b = [\"warm\", \"cold\", \"hot\", \"cold\"]\n col_c = [1, 10, 5, 10]\n col_d = [[\"warm\"], [], [\"hot\", \"warm\", \"cold\"], [\"cold\", \"cold\"]]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c, \"D\": col_d})\n ds = ray.data.from_pandas(in_df)\n\n encoder = MultiHotEncoder([\"B\", \"C\", \"D\"])\n\n # Transform with unfitted preprocessor.\n with pytest.raises(PreprocessorNotFittedException):\n encoder.transform(ds)\n\n # Fit data.\n encoder.fit(ds)\n\n assert encoder.stats_ == {\n \"unique_values(B)\": {\"cold\": 0, \"hot\": 1, \"warm\": 2},\n \"unique_values(C)\": {1: 0, 5: 1, 10: 2},\n \"unique_values(D)\": {\"cold\": 0, \"hot\": 1, \"warm\": 2},\n }\n\n # Transform data.\n transformed = encoder.transform(ds)\n out_df = transformed.to_pandas()\n\n processed_col_a = col_a\n processed_col_b = [[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]]\n processed_col_c = [[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]\n processed_col_d = [[0, 0, 1], [0, 0, 0], [1, 1, 1], [2, 0, 0]]\n expected_df = pd.DataFrame.from_dict(\n {\n \"A\": processed_col_a,\n \"B\": processed_col_b,\n \"C\": processed_col_c,\n \"D\": processed_col_d,\n }\n )\n\n assert out_df.equals(expected_df)\n\n # Transform batch.\n pred_col_a = [\"blue\", \"yellow\", None]\n pred_col_b = [\"cold\", \"warm\", \"other\"]\n pred_col_c = [10, 1, 20]\n pred_col_d = [[\"cold\", \"warm\"], [], [\"other\", \"cold\"]]\n pred_in_df = pd.DataFrame.from_dict(\n {\"A\": pred_col_a, \"B\": pred_col_b, \"C\": pred_col_c, \"D\": pred_col_d}\n )\n\n pred_out_df = encoder.transform_batch(pred_in_df)\n print(pred_out_df.to_string())\n\n pred_processed_col_a = [\"blue\", \"yellow\", None]\n pred_processed_col_b = [[1, 0, 0], [0, 0, 1], [0, 0, 0]]\n pred_processed_col_c = [[0, 0, 1], [1, 0, 0], [0, 0, 0]]\n pred_processed_col_d = [[1, 0, 1], [0, 0, 0], [1, 0, 0]]\n pred_expected_df = pd.DataFrame.from_dict(\n {\n \"A\": pred_processed_col_a,\n \"B\": pred_processed_col_b,\n \"C\": pred_processed_col_c,\n \"D\": pred_processed_col_d,\n }\n )\n\n assert pred_out_df.equals(pred_expected_df)\n\n # Test null behavior.\n null_col = [1, None]\n nonnull_col = [1, 1]\n null_df = pd.DataFrame.from_dict({\"A\": null_col})\n null_ds = ray.data.from_pandas(null_df)\n nonnull_df = pd.DataFrame.from_dict({\"A\": nonnull_col})\n nonnull_ds = ray.data.from_pandas(nonnull_df)\n null_encoder = OneHotEncoder([\"A\"])\n\n # Verify fit fails for null values.\n with pytest.raises(ValueError):\n null_encoder.fit(null_ds)\n null_encoder.fit(nonnull_ds)\n\n # Verify transform fails for null values.\n with pytest.raises(ValueError):\n null_encoder.transform(null_ds)\n null_encoder.transform(nonnull_ds)\n\n # Verify transform_batch fails for null values.\n with pytest.raises(ValueError):\n null_encoder.transform_batch(null_df)\n null_encoder.transform_batch(nonnull_df)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 655, "n_words": 312, "vocab_size": 155, "complexity": 1, "nloc": 69, "token_counts": 683, "n_ast_nodes": 1087, "n_identifiers": 53, "random_cut": "def test_multi_hot_encoder():\n \n col_a = [\"red\", \"green\", \"blue\", \"red\"]\n col_b = [\"warm\", \"cold\", \"hot\", \"cold\"]\n col_c = [1, 10, 5, 10]\n col_d = [[\"warm\"], [], [\"hot\", \"warm\", \"cold\"], [\"cold\", \"cold\"]]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c, \"D\": col_d})\n ds = ray.data.from_pandas(in_df)\n\n encoder = MultiHotEncoder([\"B\", \"C\", \"D\"])\n\n # Transform with unfitted preprocessor.\n with pytest.raises(PreprocessorNotFittedException):\n encoder.transform(ds)\n\n # Fit data.\n encoder.fit(ds)\n\n assert encoder.stats_ == {\n \"unique_values(B)\": {\"cold\": 0, \"hot\": 1, \"warm\": 2},\n \"unique_values(C)\": {1: 0, 5: 1, 10: 2},\n \"unique_values(D)\": {\"cold\": 0, \"hot\": 1, \"warm\": 2},\n }\n\n # Transform data.\n transformed = encoder.transform(ds)\n out_df = transformed.to_pandas()\n\n processed_col_a = col_a\n processed_col_b = [[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]]\n processed_col_c = [[1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1]]\n processed_col_d = [[0, 0, 1]" }, { "id": 67208, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/eway_bill/eway_bill.py", "file_name": "eway_bill.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute(filters=None):\n\tif not filters:\n\t\tfilters.setdefault(\"posting_date\", [nowdate(), nowdate()])\n\tcolumns, data = [], []\n\tcolumns = get_columns()\n\tdata = get_data(filters)\n\treturn columns, data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 15, "n_words": 22, "vocab_size": 17, "complexity": 2, "nloc": 7, "token_counts": 51, "n_ast_nodes": 86, "n_identifiers": 8, "random_cut": "def execute(filters=None):\n\tif not filters:\n\t\tfilters.setdefault(\"postin" }, { "id": 285444, "commit_id": "9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/stocks/options/test_syncretism_model.py", "file_name": "test_syncretism_model.py", "fun_name": "test_get_historical_greeks_invalid_status", "commit_message": "Here we merge all API Refactor related branches (#2236)\n\n* Update api.py\r\n\r\n* Updated forex menu\r\n\r\n* refactor ycrv command\r\n\r\n* refactor ycrv command black\r\n\r\n* refactor ecocal command\r\n\r\n* Minh changes\r\n\r\n* Adding space to test pushing\r\n\r\n* title fix ecocal df\r\n\r\n* get economic calendar annotation\r\n\r\n* fix investingcom tests\r\n\r\n* refactor index command\r\n\r\n* refactor overview command\r\n\r\n* give defaults to wsj view function args\r\n\r\n* rename date args investincom\r\n\r\n* refacto bigmac command\r\n\r\n* fix ecocal typo\r\n\r\n* refactor rtps command\r\n\r\n* alphavantage gdp\r\n\r\n* alphavantage gdp per capita\r\n\r\n* alphavantage cpi\r\n\r\n* alphavantage tyld\r\n\r\n* alphavantage inf\r\n\r\n* refactor macro command\r\n\r\n* refactor macro command w helpers\r\n\r\n* refactor treasury command\r\n\r\n* fix macro on terminal\r\n\r\n* treasury labels\r\n\r\n* refactor maturities\r\n\r\n* update treasury maturities doc strings\r\n\r\n* refactor get economic calendar finhub\r\n\r\n* refactor map command api\r\n\r\n* display map filter choices\r\n\r\n* route economy api to performance map\r\n\r\n* route economy api to performance map\r\n\r\n* display group choices on valuation command\r\n\r\n* refactor performance and valuation commands\r\n\r\n* refactor spectrum model and view\r\n\r\n* add choices to spectrum controller\r\n\r\n* delete image after view\r\n\r\n* fix model tests finviz\r\n\r\n* fix finciz view tests\r\n\r\n* refactor futures\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix controller test\r\n\r\n* refactor fred series notes\r\n\r\n* update fred notes docstring\r\n\r\n* refacto fred series ids\r\n\r\n* fix pred and qa when empty datasets\r\n\r\n* refactor fred\r\n\r\n* uncomment stuff\r\n\r\n* refacto get series data\r\n\r\n* fix some tests\r\n\r\n* set defaults on args\r\n\r\n* refactor fred yield curve\r\n\r\n* black\r\n\r\n* fix spell and remove ecocal names\r\n\r\n* fix linting\r\n\r\n* linting\r\n\r\n* pylint fix\r\n\r\n* change dangerous defaults\r\n\r\n* Working through crypto fixes (#2256)\r\n\r\n* Working through crypto fixes\r\n\r\n* Continued adding crypto stuff\r\n\r\n* Added crypto overview\r\n\r\n* Added test fixes\r\n\r\n* Added fixtures\r\n\r\n* Fixed tests\r\n\r\n* Fixed charting issue\r\n\r\n* Removed broken APIs\r\n\r\n* Final adjustments\r\n\r\n* Added test fixes\r\n\r\n* map get groups and get ycrv countries into old api\r\n\r\n* exposed econdb helper funcs\r\n\r\n* remove helpers\r\n\r\n* refactor search indices\r\n\r\n* linting\r\n\r\n* refactor arg currency\r\n\r\n* pylint from currency\r\n\r\n* Started switching crpyto ascending to ascend\r\n\r\n* Merging\r\n\r\n* Portfolio model arguements, params, and docstring\r\n\r\n* Refactored for etf commands (#2292)\r\n\r\n* Refactored for etf commands\r\n\r\n* Fixed tests\r\n\r\n* Added load command\r\n\r\n* Fixed menu\r\n\r\n* Portfolio logic fixes\r\n\r\n* Added econometrics (#2260)\r\n\r\n* Added econometrics\r\n\r\n* Fixed tests\r\n\r\n* Simplified API\r\n\r\n* Added test fixes\r\n\r\n* Added test csv\r\n\r\n* Allowed examples to be loaded\r\n\r\n* Fund refactor (#2291)\r\n\r\n* Fund refactor\r\n\r\n* Changed fund_name and fund to name\r\n\r\n* Changed ascending to ascend\r\n\r\n* Stock menu refactoring for easier API usage (#2194)\r\n\r\n* Stocks refactoring for easier API usage\r\n\r\n* Linting\r\n\r\n* Refactor newly added features\r\n\r\n* Linting\r\n\r\n* Fixing tests\r\n\r\n* Refactor common files used by stocks menu\r\n\r\n* Fixing flake8\r\n\r\n* Fix linting and tests\r\n\r\n* Linting\r\n\r\n* Fix flake8\r\n\r\n* refactor insider_data\r\n\r\n* refactor mentions\r\n\r\n* refactor watchlist\r\n\r\n* refactor sentiment\r\n\r\n* refactor sentiment\r\n\r\n* fix yahoofinance tests\r\n\r\n* refactor load and candle\r\n\r\n* refactor get_news and display_news\r\n\r\n* refactor stocks.ins.act\r\n\r\n* candle default matplotlib\r\n\r\n* fix yahoofinance_view tests\r\n\r\n* fix ark model tests\r\n\r\n* fix ark view tests\r\n\r\n* fix business insider model\r\n\r\n* fix business insider view\r\n\r\n* refactor csimarket model\r\n\r\n* fix tests csi market model\r\n\r\n* update dd controller\r\n\r\n* fix get suppliers tests\r\n\r\n* fix dd controller tests\r\n\r\n* fix finhub tests\r\n\r\n* fix finviz tests\r\n\r\n* fix fmp tests\r\n\r\n* fix marketwatch tests\r\n\r\n* corrected argument keywords in test_bt_model\r\n\r\n* corrected argument keywords in test_bt_view\r\n\r\n* refactor fa controller\r\n\r\n* refactor marketwatch view\r\n\r\n* refactor gov controller\r\n\r\n* fix tests fa av\r\n\r\n* fix tests elect\r\n\r\n* fix dcf tests\r\n\r\n* fix polygon tests\r\n\r\n* fix fmp tests\r\n\r\n* fix quiverquant tests\r\n\r\n* fix yahoofinance fa tests\r\n\r\n* fix more fa tests\r\n\r\n* fix insider tests\r\n\r\n* fix more tests\r\n\r\n* fix more tests\r\n\r\n* fix options tests\r\n\r\n* fix stock gov tests\r\n\r\n* fix tests test_ba_controller\r\n\r\n* fix tests for test_finviz_compare_model.py\r\n\r\n* fixed 2 tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fix final tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* Fix tests\r\n\r\n* black\r\n\r\n* forgot to black tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* flakefix\r\n\r\n* Tests + code : Stocks / Discovery\r\n\r\n* fix tests\r\n\r\n* added recorder\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* black\r\n\r\n* black\r\n\r\n* remove unused imports\r\n\r\n* refactor display raw\r\n\r\n* sia dicts fix\r\n\r\n* pylint\r\n\r\n* linting\r\n\r\n* remove dangerous default\r\n\r\n* fix tests\r\n\r\n* fix beta model test\r\n\r\n* black\r\n\r\n* skip screener qa test\r\n\r\n* change sector path to sectors\r\n\r\n* update tests readme\r\n\r\n* fix metric defaults\r\n\r\n* black\r\n\r\n* substitute lost ticker\r\n\r\n* defaults cpic\r\n\r\n* another round on sia\r\n\r\n* refactor cramer\r\n\r\n* reduce default tweets on sentiment\r\n\r\n* refactor yf hist, corr, volume\r\n\r\n* arkorders default\r\n\r\n* refactor income, balance, cashflow\r\n\r\n* refacto scorr, screener, getfinnhub\r\n\r\n* refactor stockgrid\r\n\r\n* ibkr refactor\r\n\r\n* another round on stockgrid\r\n\r\n* add dividens end point\r\n\r\n* refactor discovery endpoints\r\n\r\n* update docstrings with similar input\r\n\r\n* refactor messages\r\n\r\n* refactor ba\r\n\r\n* refactor regioons\r\n\r\n* refactor twitter sentiment\r\n\r\n* refactor hist\r\n\r\n* refactor regions\r\n\r\n* give default to timeframe\r\n\r\n* refactor bunch of defaults and arg names\r\n\r\n* remove leftover imports\r\n\r\n* refactor vwap\r\n\r\n* let tests run\r\n\r\n* fix tests\r\n\r\n* fix stock tests\r\n\r\n* fix stockanalysis tests\r\n\r\n* flake\r\n\r\n* MYPY\r\n\r\n* Made important changes\r\n\r\n* added fixes\r\n\r\n* Fixed big issue\r\n\r\n* Added fixes to tests\r\n\r\n* fix qa tests\r\n\r\n* fix tests\r\n\r\n* fix 1 more test\r\n\r\n* last stocks failing\r\n\r\n* fix crypto test\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: montezdesousa \r\nCo-authored-by: hjoaquim \r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\n\r\n* fix portfolio tests\r\n\r\n* change period to window\r\n\r\n* update ca docstrings\r\n\r\n* refactor get_similar_companies func\r\n\r\n* Fixed\r\n\r\n* Update CI\r\n\r\n* Update CI 2\r\n\r\n* Update CI 3\r\n\r\n* Update dependencies\r\n\r\nCo-authored-by: colin99d \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: montezdesousa \r\nCo-authored-by: James Simmons \r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com>\r\nCo-authored-by: hjoaquim ", "code": "def test_get_historical_greeks_invalid_status(mocker):\n mock_response = requests.Response()\n mock_response.status_code = 400\n mocker.patch(target=\"requests.get\", new=mocker.Mock(return_value=mock_response))\n\n result_df = syncretism_model.get_historical_greeks(\n symbol=\"PM\",\n expiry=\"2022-01-07\",\n chain_id=\"PM220107P00090000\",\n strike=90,\n put=True,\n )\n\n assert result_df.empty\n\n\n@pytest.mark.vcr", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 73, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 12, "token_counts": 67, "n_ast_nodes": 119, "n_identifiers": 23, "random_cut": "def test_get_historical_greeks_invalid_status(mocker):\n mock_response = requests.Response()\n mock_response.status_code = 400\n mocker.patch(target=\"requests.get\", new=mocker.Mock(return_value=mock_response))\n\n r" }, { "id": 307745, "commit_id": "b29605060a74c441550708ccf4ace4b697f66ae6", "repo": "core", "path": "homeassistant/components/hdmi_cec/media_player.py", "file_name": "media_player.py", "fun_name": "media_play", "commit_message": "Enforce MediaPlayerState in hdmi_cec media player (#78522)", "code": "def media_play(self) -> None:\n \n self.send_keypress(KEY_PLAY)\n self._attr_state = MediaPlayerState.PLAYING\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def media_play(self) -> None:\n \n self.send_keypress(KEY_PLAY)\n self._attr_state = Media" }, { "id": 212768, "commit_id": "1eb653d91015c13ecd48eaa84d73efeaca94d5de", "repo": "PySimpleGUI", "path": "DemoPrograms/Demo_User_Settings.py", "file_name": "Demo_User_Settings.py", "fun_name": "settings_window", "commit_message": "Getting more Demo Programs synced up. New demo for VPush too", "code": "def settings_window():\n \n\n window = make_window()\n current_theme = sg.theme()\n\n while True:\n event, values = window.read()\n if event in (sg.WINDOW_CLOSED, 'Exit'):\n break\n if event == 'Save':\n # Save some of the values as user settings\n sg.user_settings_set_entry('-input-', values['-IN-'])\n sg.user_settings_set_entry('-theme-', values['-LISTBOX-'][0])\n sg.user_settings_set_entry('-option1-', values['-CB1-'])\n sg.user_settings_set_entry('-option2-', values['-CB2-'])\n\n # if the theme was changed, restart the window\n if values['-LISTBOX-'][0] != current_theme:\n current_theme = values['-LISTBOX-'][0]\n window.close()\n window = make_window()\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 207, "n_words": 61, "vocab_size": 44, "complexity": 5, "nloc": 16, "token_counts": 123, "n_ast_nodes": 220, "n_identifiers": 12, "random_cut": "def settings_window():\n \n\n window = make_window()\n current_theme = sg.theme()\n\n while True:\n event, values = window.read()\n if event in (sg.WINDOW_CLOSED, 'Exit'):\n break\n if event == 'Save':\n # Save some of the values as user settings\n sg.user_settings_set_entry('-input-', values['-IN-'])\n sg.user_settings_set_en" }, { "id": 45825, "commit_id": "26e8d6d7664bbaae717438bdb41766550ff57e4f", "repo": "airflow", "path": "tests/providers/ftp/hooks/test_ftp.py", "file_name": "test_ftp.py", "fun_name": "test_connection_success", "commit_message": "Updates FTPHook provider to have test_connection (#21997)\n\n* Updates FTP provider to have test_connection\r\n\r\nCo-authored-by: eladkal <45845474+eladkal@users.noreply.github.com>", "code": "def test_connection_success(self):\n with fh.FTPHook() as ftp_hook:\n status, msg = ftp_hook.test_connection()\n assert status is True\n assert msg == 'Connection successfully tested'\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 31, "n_ast_nodes": 56, "n_identifiers": 8, "random_cut": "def test_connection_success(self):\n with fh.FTPHook() as ftp_hook:\n status, msg = ftp_hook.test_connection()\n assert status is True\n asse" }, { "id": 196939, "commit_id": "4577d7bc0d6778506f6c2491636f3c06ecd0ff4d", "repo": "sympy", "path": "sympy/solvers/decompogen.py", "file_name": "decompogen.py", "fun_name": "decompogen", "commit_message": "added decompogen for Min and Max", "code": "def decompogen(f, symbol):\n \n f = sympify(f)\n if not isinstance(f, Expr) or isinstance(f, Relational):\n raise TypeError('expecting Expr but got: `%s`' % func_name(f))\n if symbol not in f.free_symbols:\n return [f]\n\n result = []\n\n # ===== Simple Functions ===== #\n if isinstance(f, (Function, Pow)):\n if f.is_Pow and f.base == S.Exp1:\n arg = f.exp\n else:\n arg = f.args[0]\n if arg == symbol:\n return [f]\n result += [f.subs(arg, symbol)] + decompogen(arg, symbol)\n return result\n\n # ===== Min/Max Functions ===== #\n if isinstance(f, (Min, Max)):\n if And(*[a.has(symbol) for a in f.args]):\n raise TypeError('cannot decompose %s' % f)\n for i in f.args:\n if i.has(symbol):\n arg = i\n result += [f.subs(i, symbol)] + decompogen(i, symbol)\n return result\n\n # ===== Convert to Polynomial ===== #\n fp = Poly(f)\n gens = list(filter(lambda x: symbol in x.free_symbols, fp.gens))\n\n if len(gens) == 1 and gens[0] != symbol:\n f1 = f.subs(gens[0], symbol)\n f2 = gens[0]\n result += [f1] + decompogen(f2, symbol)\n return result\n\n # ===== Polynomial decompose() ====== #\n try:\n result += decompose(f)\n return result\n except ValueError:\n return [f]\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 410, "n_words": 166, "vocab_size": 90, "complexity": 16, "nloc": 36, "token_counts": 295, "n_ast_nodes": 463, "n_identifiers": 38, "random_cut": "def decompogen(f, symbol):\n \n f = sympify(f)\n if not isinstance(f, Expr) or isinstance(f, Relational):\n raise TypeError('expecting Expr but got: `%s`' % func_name(f))\n if symbol not in f.free_symbols:\n return [f]\n\n result = []\n\n # ===== Simple Functions ===== #\n if isinstance(f, (Function, Pow)):\n if f.is_Pow and f.base == S.Exp1:\n arg = f.exp\n else:\n arg = f.args[0]\n if arg == symbol:\n return [f]\n result += [f.subs(arg, symbol)] + decompogen(arg, symbol)\n return result\n\n # ===== Min/Max Functions ===== #\n if isinstance(f, (Min, Max)):\n if And(*[a.has(symbol) for a in f.args]):\n raise TypeError('ca" }, { "id": 47680, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/utils/test_dag_cycle.py", "file_name": "test_dag_cycle.py", "fun_name": "test_cycle_large_loop", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_cycle_large_loop(self):\n # large loop\n dag = DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})\n\n # A -> B -> C -> D -> E -> A\n with dag:\n start = EmptyOperator(task_id='start')\n current = start\n\n for i in range(10000):\n next_task = EmptyOperator(task_id=f'task_{i}')\n current.set_downstream(next_task)\n current = next_task\n\n current.set_downstream(start)\n with pytest.raises(AirflowDagCycleException):\n assert not check_cycle(dag)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 182, "n_words": 48, "vocab_size": 34, "complexity": 2, "nloc": 12, "token_counts": 83, "n_ast_nodes": 151, "n_identifiers": 19, "random_cut": "def test_cycle_large_loop(self):\n # large loop\n dag = DAG('dag', start_date=DEFAULT_DATE, default_args={'owner': 'owner1'})\n\n # A -> B -> C -> D -> E -> A\n with dag:\n start = EmptyOperator(task_id='start')\n current = start\n\n for i in range(10000):\n next_task = EmptyOperator(task_id=f'task_{i}')\n current.set_downstream(next_task)\n current = next_task\n\n current.set_downstream(start)\n with pytest.raises(AirflowD" }, { "id": 150554, "commit_id": "05ed1b544f2853ae0054cd22bd15e623abbb3aa9", "repo": "freqtrade", "path": "freqtrade/freqai/prediction_models/RL/RLPrediction_env.py", "file_name": "RLPrediction_env.py", "fun_name": "_calculate_reward", "commit_message": "Working base for reinforcement learning model", "code": "def _calculate_reward(self, action):\n step_reward = 0\n\n trade = False\n if ((action == Actions.Buy.value and self._position == Positions.Short) or\n (action == Actions.Sell.value and self._position == Positions.Long)):\n trade = True\n\n if trade:\n current_price = self.prices[self._current_tick]\n last_trade_price = self.prices[self._last_trade_tick]\n price_diff = current_price - last_trade_price\n\n if self._position == Positions.Long:\n step_reward += price_diff\n\n return step_reward\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 169, "n_words": 50, "vocab_size": 30, "complexity": 7, "nloc": 13, "token_counts": 97, "n_ast_nodes": 150, "n_identifiers": 19, "random_cut": "def _calculate_reward(self, action):\n step_rew" }, { "id": 44631, "commit_id": "f217becdfc371ea18486886cc3b2f47eeda0f77f", "repo": "airflow", "path": "tests/www/views/test_views_tasks.py", "file_name": "test_views_tasks.py", "fun_name": "test_code_from_db_all_example_dags", "commit_message": "Modernize DAG-related URL routes and rename \"tree\" to \"grid\" (#20730)\n\nCo-authored-by: Igor Kholopov ", "code": "def test_code_from_db_all_example_dags(admin_client):\n dagbag = DagBag(include_examples=True)\n for dag in dagbag.dags.values():\n DagCode(dag.fileloc, DagCode._get_code_from_file(dag.fileloc)).sync_to_db()\n url = 'code?dag_id=example_bash_operator'\n resp = admin_client.get(url, follow_redirects=True)\n check_content_not_in_response('Failed to load DAG file Code', resp)\n check_content_in_response('example_bash_operator', resp)\n\n\n@pytest.mark.parametrize(\n \"url, data, content\",\n [\n ('paused?dag_id=example_bash_operator&is_paused=false', None, 'OK'),\n (\n \"failed\",\n dict(\n task_id=\"run_this_last\",\n dag_id=\"example_bash_operator\",\n dag_run_id=DEFAULT_DAGRUN,\n upstream=\"false\",\n downstream=\"false\",\n future=\"false\",\n past=\"false\",\n origin=\"/graph?dag_id=example_bash_operator\",\n ),\n \"Marked failed on 1 task instances\",\n ),\n (\n \"success\",\n dict(\n task_id=\"run_this_last\",\n dag_id=\"example_bash_operator\",\n dag_run_id=DEFAULT_DAGRUN,\n upstream=\"false\",\n downstream=\"false\",\n future=\"false\",\n past=\"false\",\n origin=\"/graph?dag_id=example_bash_operator\",\n ),\n \"Marked success on 1 task instances\",\n ),\n (\n \"clear\",\n dict(\n task_id=\"runme_1\",\n dag_id=\"example_bash_operator\",\n execution_date=DEFAULT_DATE,\n upstream=\"false\",\n downstream=\"false\",\n future=\"false\",\n past=\"false\",\n only_failed=\"false\",\n ),\n \"example_bash_operator\",\n ),\n (\n \"run\",\n dict(\n task_id=\"runme_0\",\n dag_id=\"example_bash_operator\",\n ignore_all_deps=\"false\",\n ignore_ti_state=\"true\",\n execution_date=DEFAULT_DATE,\n ),\n \"\",\n ),\n ],\n ids=[\n \"paused\",\n \"failed-flash-hint\",\n \"success-flash-hint\",\n \"clear\",\n \"run\",\n ],\n)", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"url, data, content\",\n [\n ('paused?dag_id=example_bash_operator&is_paused=false', None, 'OK'),\n (\n \"failed\",\n dict(\n task_id=\"run_this_last\",\n dag_id=\"example_bash_operator\",\n dag_run_id=DEFAULT_DAGRUN,\n upstream=\"false\",\n downstream=\"false\",\n future=\"false\",\n past=\"false\",\n origin=\"/graph?dag_id=example_bash_operator\",\n ),\n \"Marked failed on 1 task instances\",\n ),\n (\n \"success\",\n dict(\n task_id=\"run_this_last\",\n dag_id=\"example_bash_operator\",\n dag_run_id=DEFAULT_DAGRUN,\n upstream=\"false\",\n downstream=\"false\",\n future=\"false\",\n past=\"false\",\n origin=\"/graph?dag_id=example_bash_operator\",\n ),\n \"Marked success on 1 task instances\",\n ),\n (\n \"clear\",\n dict(\n task_id=\"runme_1\",\n dag_id=\"example_bash_operator\",\n execution_date=DEFAULT_DATE,\n upstream=\"false\",\n downstream=\"false\",\n future=\"false\",\n past=\"false\",\n only_failed=\"false\",\n ),\n \"example_bash_operator\",\n ),\n (\n \"run\",\n dict(\n task_id=\"runme_0\",\n dag_id=\"example_bash_operator\",\n ignore_all_deps=\"false\",\n ignore_ti_state=\"true\",\n execution_date=DEFAULT_DATE,\n ),\n \"\",\n ),\n ],\n ids=[\n \"paused\",\n \"failed-flash-hint\",\n \"success-flash-hint\",\n \"clear\",\n \"run\",\n ],\n)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 853, "n_words": 107, "vocab_size": 68, "complexity": 2, "nloc": 8, "token_counts": 70, "n_ast_nodes": 445, "n_identifiers": 37, "random_cut": "def test_code_from_db_all_example_dags(admin_client):\n dagbag = DagBag(include_examples=True)\n for dag in dagbag.dags.values():\n DagCode(dag.fileloc, DagCode._get_code_from_file(dag.fileloc)).sync_to_db()\n url = 'code?dag_id=example_bash_operator'\n resp = admin_client.get(url, follow_redirects=True)\n check_content_not_in_response('Failed to load DAG file Code', resp)\n check_content_in_response('example_bash_operator', resp)\n\n\n@pytest.mark.parametrize(\n \"url, data, content\",\n [\n ('paused?dag_id=example_bash_operator&is_paused=false', None, 'OK'),\n (\n \"failed\",\n dict(\n task_id=\"run_this_last\",\n dag_id=\"example_bash_operator\",\n dag_run_id=DEFAULT_DAGRUN,\n upstream=\"false\",\n downstream=\"false\",\n future=\"false\",\n past=\"false\",\n origin=\"/graph?dag_id=example_bash_operator\",\n ),\n \"Marked failed on 1 task instances\",\n ),\n (\n \"success\",\n dict(\n task_id=\"run_this_last\",\n dag_id=\"example_bash_operator\",\n dag_run_id=DEFAULT_DAGRUN,\n upstream=\"false\",\n downstream=\"false\",\n future=\"false\",\n past=\"false\",\n origin=\"/graph?dag_id=example_bash_operator\",\n ),\n \"Marked success on 1 task instances\",\n ),\n (\n \"clear\",\n dict(\n task_id=\"runme_1\",\n dag_id=\"example_bash_operator\",\n execution_date=DEFAULT_DATE,\n upstream=\"false\",\n " }, { "id": 105516, "commit_id": "6ea46d88c6a09244d785e55e2681bc4033740442", "repo": "datasets", "path": "tests/packaged_modules/test_folder_based_builder.py", "file_name": "test_folder_based_builder.py", "fun_name": "data_files_with_two_splits_and_metadata", "commit_message": "Add AudioFolder packaged loader (#4530)\n\n* add audiofolder loader (almost identical to imagefolder except for inferring labels is not default)\r\n\r\n* add instruction on how to obtain list of audio extensions\r\n\r\n* add a generic loader\r\n\r\n* patch autofolder for streaming manually\r\n\r\n* align autofolder with the latest imagefolder implementation\r\n\r\n* update tests\r\n\r\n* add test for duplicate label col\r\n\r\n* add tests for autofolder (+copied from imagefolder)\r\n\r\n* add missed audio_file fixture\r\n\r\n* add documentation\r\n\r\n* remove boilerplate, make base feature builder's class arg instead of a config's one\r\n\r\n* remove self.config.label_name, use hardcoded 'label'\r\n\r\n* patch parents that inherit from DatasetBuilder, revert get_imports\r\n\r\n* rename autofolder -> folder_builder\r\n\r\n* make base column name an abstract attr of FolderBuilder instead of config's parameter\r\n\r\n* Update src/datasets/streaming.py\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\n* rename FolderBuilder -> FolderBasedBuilder\r\n\r\n* set drop_labels to None by default for AudioFolder\r\n\r\n* update documentation\r\n\r\n* check if builder extending for streaming is not in datasets.builder module\r\n\r\nCo-authored-by: Mario Šaško \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file):\n data_dir = tmp_path / \"autofolder_data_dir_with_metadata_two_splits\"\n data_dir.mkdir(parents=True, exist_ok=True)\n train_dir = data_dir / \"train\"\n train_dir.mkdir(parents=True, exist_ok=True)\n test_dir = data_dir / \"test\"\n test_dir.mkdir(parents=True, exist_ok=True)\n\n filename = train_dir / \"file.txt\" # train\n shutil.copyfile(auto_text_file, filename)\n filename2 = train_dir / \"file2.txt\" # train\n shutil.copyfile(auto_text_file, filename2)\n filename3 = test_dir / \"file3.txt\" # test\n shutil.copyfile(auto_text_file, filename3)\n\n train_metadata_filename = train_dir / \"metadata.jsonl\"\n train_metadata = textwrap.dedent(\n \n )\n with open(train_metadata_filename, \"w\", encoding=\"utf-8\") as f:\n f.write(train_metadata)\n test_metadata_filename = test_dir / \"metadata.jsonl\"\n test_metadata = textwrap.dedent(\n \n )\n with open(test_metadata_filename, \"w\", encoding=\"utf-8\") as f:\n f.write(test_metadata)\n data_files_with_two_splits_and_metadata = DataFilesDict.from_local_or_remote(\n get_data_patterns_locally(data_dir), data_dir\n )\n assert len(data_files_with_two_splits_and_metadata) == 2\n assert len(data_files_with_two_splits_and_metadata[\"train\"]) == 3\n assert len(data_files_with_two_splits_and_metadata[\"test\"]) == 2\n return data_files_with_two_splits_and_metadata\n\n\n@pytest.fixture", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 220, "n_words": 104, "vocab_size": 57, "complexity": 1, "nloc": 37, "token_counts": 205, "n_ast_nodes": 365, "n_identifiers": 30, "random_cut": "def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file):\n data_dir = tmp_path / \"autofolder_data_dir_with_metadata_two_splits\"\n data_dir.mkdir(parents=True, exist_ok=True)\n train_dir = data_dir / \"train\"\n train_dir.mkdir(parents=True, exist_ok=True)\n test_dir = data_dir / \"test\"\n test_dir.mkdir(parents=True, exist_ok=True)\n\n filename = train_dir / \"file.txt\" # train\n shutil.copyfile(auto_text_file, filename)\n filename2 = train_dir / \"file2.txt\" # train\n shutil.copyfile(auto_text_file, filename2)\n filename3 = test_dir / \"file3.txt\" # test\n shutil.copyfile(auto_text_file, filename3)\n\n train_metadata_filename = train_dir / \"metadata.jsonl\"\n train_metadata = textwrap.dedent(\n \n )\n with open(train_metadata_filename, \"w\", encoding=\"utf-8\") as f:\n f.write(train_metadata)\n test_metadata_filename = test_dir / \"metadata.jsonl\"\n test_metadata = textwrap.dedent(\n \n )\n with open(test_metadata_filename, \"w\", encoding=\"utf-8\") as f:\n f.write(test_metadata)\n data_files_with_two_splits_and_metadata = DataFilesDict.from_local_or_remote(\n get_data_patterns_locally(data_dir), data_dir\n )\n assert len(data_files_with_two_splits_and_metadata) == 2\n assert len(data_files_with_two_splits_and_metadata[\"train\"]) == 3\n assert len(data_files_with_two_splits_and_metadata[\"test\"]) == 2\n return data_files_with_two_splits_and_me" }, { "id": 198335, "commit_id": "ae1662c58912be3363b6232999b60b90050cdd0f", "repo": "sympy", "path": "sympy/integrals/tests/test_manual.py", "file_name": "test_manual.py", "fun_name": "test_manualintegrate_sqrt_quadratic", "commit_message": "manualintegrate poly*(a+b*x+c*x**2)**(n+1/2)", "code": "def test_manualintegrate_sqrt_quadratic():\n assert_is_integral_of(1/sqrt((x - I)**2-1), log(2*x + 2*sqrt(x**2 - 2*I*x - 2) - 2*I))\n assert_is_integral_of(1/sqrt(3*x**2+4*x+5), sqrt(3)*asinh(3*sqrt(11)*(x + S(2)/3)/11)/3)\n assert_is_integral_of(1/sqrt(-3*x**2+4*x+5), sqrt(3)*asin(3*sqrt(19)*(x - S(2)/3)/19)/3)\n assert_is_integral_of(1/sqrt(3*x**2+4*x-5), sqrt(3)*log(6*x + 2*sqrt(3)*sqrt(3*x**2 + 4*x - 5) + 4)/3)\n assert manualintegrate(1/sqrt(a+b*x+c*x**2), x) == \\\n Piecewise((log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)),\n (2*sqrt(a + b*x)/b, Ne(b, 0)), (x/sqrt(a), True))\n\n assert_is_integral_of((7*x+6)/sqrt(3*x**2+4*x+5),\n 7*sqrt(3*x**2 + 4*x + 5)/3 + 4*sqrt(3)*asinh(3*sqrt(11)*(x + S(2)/3)/11)/9)\n assert_is_integral_of((7*x+6)/sqrt(-3*x**2+4*x+5),\n -7*sqrt(-3*x**2 + 4*x + 5)/3 + 32*sqrt(3)*asin(3*sqrt(19)*(x - S(2)/3)/19)/9)\n assert_is_integral_of((7*x+6)/sqrt(3*x**2+4*x-5),\n 7*sqrt(3*x**2 + 4*x - 5)/3 + 4*sqrt(3)*log(6*x + 2*sqrt(3)*sqrt(3*x**2 + 4*x - 5) + 4)/9)\n assert manualintegrate((d+e*x)/sqrt(a+b*x+c*x**2), x) == \\\n Piecewise((e*sqrt(a + b*x + c*x**2)/c +\n (-b*e/(2*c) + d)*log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)),\n ((2*d*sqrt(a + b*x) + 2*e*(-a*sqrt(a + b*x) + (a + b*x)**(S(3)/2)/3)/b)/b, Ne(b, 0)),\n ((d*x + e*x**2/2)/sqrt(a), True))\n\n assert manualintegrate((3*x**3-x**2+2*x-4)/sqrt(x**2-3*x+2), x) == \\\n sqrt(x**2 - 3*x + 2)*(x**2 + 13*x/4 + S(101)/8) + 135*log(2*x + 2*sqrt(x**2 - 3*x + 2) - 3)/16\n\n assert_is_integral_of(sqrt(53225*x**2-66732*x+23013),\n (x/2 - S(16683)/53225)*sqrt(53225*x**2 - 66732*x + 23013) +\n 111576969*sqrt(2129)*asinh(53225*x/10563 - S(11122)/3521)/1133160250)\n assert manualintegrate(sqrt(a+c*x**2), x) == \\\n Piecewise((a*log(2*sqrt(c)*sqrt(a + c*x**2) + 2*c*x)/(2*sqrt(c)) + x*sqrt(a + c*x**2)/2, Ne(c, 0)),\n (sqrt(a)*x, True))\n assert manualintegrate(sqrt(a+b*x+c*x**2), x) == \\\n Piecewise(((x/2 + b/(4*c))*sqrt(a + b*x + c*x**2) +\n (a/2 - b**2/(8*c))*log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)),\n (2*(a + b*x)**(S(3)/2)/(3*b), Ne(b, 0)),\n (sqrt(a)*x, True))\n\n assert_is_integral_of(x*sqrt(x**2+2*x+4),\n (x**2/3 + x/6 + S(5)/6)*sqrt(x**2 + 2*x + 4) - 3*asinh(sqrt(3)*(x + 1)/3)/2)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 597, "n_words": 233, "vocab_size": 100, "complexity": 1, "nloc": 34, "token_counts": 1241, "n_ast_nodes": 1879, "n_identifiers": 17, "random_cut": "def test_manualintegrate_sqrt_quadratic():\n assert_is_integral_of(1/sqrt((x - I)**2-1), log(2*x + 2*sqrt(x**2 - 2*I*x - 2) - 2*I))\n assert_is_integral_of(1/sqrt(3*x**2+4*x+5), sqrt(3)*asinh(3*sqrt(11)*(x + S(2)/3)/11)/3)\n assert_is_integral_of(1/sqrt(-3*x**2+4*x+5), sqrt(3)*asin(3*sqrt(19)*(x - S(2)/3)/19)/3)\n assert_is_integral_of(1/sqrt(3*x**2+4*x-5), sqrt(3)*log(6*x + 2*sqrt(3)*sqrt(3*x**2 + 4*x - 5) + 4)/3)\n assert manualintegrate(1/sqrt(a+b*x+c*x**2), x) == \\\n Piecewise((log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)),\n (2*sqrt(a + b*x)/b, Ne(b, 0)), (x/sqrt(a), True))\n\n assert_is_integral_of((7*x+6)/sqrt(3*x**2+4*x+5),\n 7*sqrt(3*x**2 + 4*x + 5)/3 + 4*sqrt(3)*asinh(3*sqrt(11)*(x + S(2)/3)/11)/9)\n assert_is_integral_of((7*x+6)/sqrt(-3*x**2+4*x+5),\n -7*sqrt(-3*x**2 + 4*x + 5)/3 + 32*sqrt(3)*asin(3*sqrt(19)*(x - S(2)/3)/19)/9)\n assert_is_integral_of((7*x+6)/sqrt(3*x**2+4*x-5),\n 7*sqrt(3*x**2 + 4*x - 5)/3 + 4*sqrt(3)*log(6*x + 2*sqrt(3)*sqrt(3*x**2 + 4*x - 5) + 4)/9)\n assert manualintegrate((d+e*x)/sqrt(a+b*x+c*x**2), x) == \\\n Piecewise((e*sqrt(a + b*x + c*x**2)/c +\n (-b*e/(2*c) + d)*log(b + 2*sqrt(c)*sqrt(a + b*x + c*x**2) + 2*c*x)/sqrt(c), Ne(c, 0)),\n ((2*d*sqrt(a + b*x) + 2*e*(-a*sqrt(a + b*x) + (a + b*x)**(S(3)/2)/3)/b)/b, Ne(b, 0)),\n ((d*x + e*x**2/2)/sqrt(a), True))\n\n assert manualintegrate((3*x**3-x**2+2*x-4)/sqrt(x**2-3*x+2), x)" }, { "id": 298678, "commit_id": "7b1d5fb10af9cf71fae27f9e1020e18bd1fc2510", "repo": "core", "path": "homeassistant/components/ecobee/climate.py", "file_name": "climate.py", "fun_name": "set_temperature", "commit_message": "Use climate enums in ecobee (#70632)", "code": "def set_temperature(self, **kwargs):\n \n low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)\n high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)\n temp = kwargs.get(ATTR_TEMPERATURE)\n\n if self.hvac_mode == HVACMode.HEAT_COOL and (\n low_temp is not None or high_temp is not None\n ):\n self.set_auto_temp_hold(low_temp, high_temp)\n elif temp is not None:\n self.set_temp_hold(temp)\n else:\n _LOGGER.error(\"Missing valid arguments for set_temperature in %s\", kwargs)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 145, "n_words": 45, "vocab_size": 35, "complexity": 5, "nloc": 12, "token_counts": 84, "n_ast_nodes": 136, "n_identifiers": 17, "random_cut": "def set_temperature(self, **kwargs):\n \n low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)\n high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)\n temp = kwargs.get(ATTR_TEMPERATURE)\n\n if self.hvac_mode == HVACMode.HEAT_COOL and (\n low_temp is not None or high_temp is no" }, { "id": 122323, "commit_id": "efd61b73f6a3c54a1043881f0670ae2b9dff4c51", "repo": "jax", "path": "jax/_src/lib/xla_bridge.py", "file_name": "xla_bridge.py", "fun_name": "_make_tpu_driver_client", "commit_message": "Migrate JAX internals to builtin Python logging\n\nThis commit changes the JAX codebase to use Python's builtin logging instead of ABSL logging. With the latter being used in JAX code as of now, the change to Python builtin logging is advised for the following reasons (among others):\n\n- absl-py can be removed as an external dependency of JAX.\n- Builtin logging brings the option of adding more log handlers, for example file handlers for log dumps or writers to different IO streams.\n\nLogging in JAX is ported over to take place at the module level. While previously, some Python namespaces within JAX already used module-scoped logging via absl.vlog, the following idiom was adopted to provide the same functionality in Python builtin logging:\n\n```py\nimport logging\nlogger = logging.getLogger(__name__)\n\nlogger.debug(...)\nlogger.info(...)\n```\n\n The builtin root logger is left untouched, which is beneficial for downstream users planning to customize the Python root logger. All JAX internal code promises to log to descendants of the top-level \"jax\" logger by virtue of log propagation.\n\nThe package `absl-py` was removed from JAX's install requirements, and added into its test requirements.", "code": "def _make_tpu_driver_client():\n if tpu_driver_client is None:\n logger.info(\"Remote TPU is not linked into jax; skipping remote TPU.\")\n return None\n if FLAGS.jax_backend_target is None:\n logger.info(\"No --jax_backend_target was provided; skipping remote TPU.\")\n return None\n return tpu_driver_client.TpuBackend.create(worker=FLAGS.jax_backend_target)\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 33, "vocab_size": 23, "complexity": 3, "nloc": 8, "token_counts": 45, "n_ast_nodes": 77, "n_identifiers": 9, "random_cut": "def _make_tpu_driver_client():\n if tpu_driver_client is None:\n logger.info(\"Remote TPU is not linked into j" }, { "id": 223535, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "parse_mime_version", "commit_message": "add python 3.10.4 for windows", "code": "def parse_mime_version(value):\n \n # The [CFWS] is implicit in the RFC 2045 BNF.\n # XXX: This routine is a bit verbose, should factor out a get_int method.\n mime_version = MIMEVersion()\n if not value:\n mime_version.defects.append(errors.HeaderMissingRequiredValue(\n \"Missing MIME version number (eg: 1.0)\"))\n return mime_version\n if value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n mime_version.append(token)\n if not value:\n mime_version.defects.append(errors.HeaderMissingRequiredValue(\n \"Expected MIME version number but found only CFWS\"))\n digits = ''\n while value and value[0] != '.' and value[0] not in CFWS_LEADER:\n digits += value[0]\n value = value[1:]\n if not digits.isdigit():\n mime_version.defects.append(errors.InvalidHeaderDefect(\n \"Expected MIME major version number but found {!r}\".format(digits)))\n mime_version.append(ValueTerminal(digits, 'xtext'))\n else:\n mime_version.major = int(digits)\n mime_version.append(ValueTerminal(digits, 'digits'))\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n mime_version.append(token)\n if not value or value[0] != '.':\n if mime_version.major is not None:\n mime_version.defects.append(errors.InvalidHeaderDefect(\n \"Incomplete MIME version; found only major number\"))\n if value:\n mime_version.append(ValueTerminal(value, 'xtext'))\n return mime_version\n mime_version.append(ValueTerminal('.', 'version-separator'))\n value = value[1:]\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n mime_version.append(token)\n if not value:\n if mime_version.major is not None:\n mime_version.defects.append(errors.InvalidHeaderDefect(\n \"Incomplete MIME version; found only major number\"))\n return mime_version\n digits = ''\n while value and value[0] not in CFWS_LEADER:\n digits += value[0]\n value = value[1:]\n if not digits.isdigit():\n mime_version.defects.append(errors.InvalidHeaderDefect(\n \"Expected MIME minor version number but found {!r}\".format(digits)))\n mime_version.append(ValueTerminal(digits, 'xtext'))\n else:\n mime_version.minor = int(digits)\n mime_version.append(ValueTerminal(digits, 'digits'))\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n mime_version.append(token)\n if value:\n mime_version.defects.append(errors.InvalidHeaderDefect(\n \"Excess non-CFWS text after MIME version\"))\n mime_version.append(ValueTerminal(value, 'xtext'))\n return mime_version\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 645, "n_words": 233, "vocab_size": 82, "complexity": 24, "nloc": 62, "token_counts": 440, "n_ast_nodes": 742, "n_identifiers": 19, "random_cut": "def parse_mime_version(value):\n \n # The [CFWS] is implicit in the RFC 2045 BNF.\n # XXX: This routine is a bit verbose, should factor out a get_int method.\n mime_version = MIMEVersion()\n if not value:\n mime_version.defects.append(errors.HeaderMissingRequiredValue(\n \"Missing MIME version number (eg: 1.0)\"))\n return mime_version\n if value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n mime_version.append(token)\n if not value:\n mime_version.defects.append(errors.HeaderMissingRequiredValue(\n \"Expected MIME version number but found only CFWS\"))\n digits = ''\n while value and value[0] != '.' and value[0] not in CFWS_LEADER:\n digits += value[0]\n value = value[1:]\n if not digits.isdigit():\n mime_version.defects.append(errors.InvalidHeaderDefect(\n \"Expected MIME major version number but found {!r}\".format(digits)))\n mime_version.append(ValueTerminal(digits, 'xtext'))\n else:\n mime_version.major = int(digits)\n mime_version.append(ValueTerminal(digits, 'digits'))\n if value and value[0] in CFWS_LEADER:\n token, value = get_cfws(value)\n mime_version.a" }, { "id": 215604, "commit_id": "e70b30ce4f3ac47695fca662ca9e353bf90dabc9", "repo": "salt", "path": "tests/pytests/unit/transport/test_tcp.py", "file_name": "test_tcp.py", "fun_name": "xtest_client_reconnect_backoff", "commit_message": "Fix pre-commit", "code": "def xtest_client_reconnect_backoff(client_socket):\n opts = {\"tcp_reconnect_backoff\": 5}\n\n client = salt.transport.tcp.MessageClient(\n opts, client_socket.listen_on, client_socket.port\n )\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 14, "token_counts": 82, "n_ast_nodes": 51, "n_identifiers": 10, "random_cut": "def xtest_client_reconnect_backoff(client_socket):\n opts = {\"tcp_reconnect_backoff\": 5}\n\n client = salt.transport.tcp.MessageClient(\n opts, client_socket.listen_on, client_socket.po" }, { "id": 197967, "commit_id": "1d3c89501c65e4b6cd3b635be60ba1d2bf003b4d", "repo": "sympy", "path": "sympy/printing/tests/test_latex.py", "file_name": "test_latex.py", "fun_name": "test_issue_15439", "commit_message": "Fixing bug in MatMul.could_extract_minus_sign", "code": "def test_issue_15439():\n x = MatrixSymbol('x', 2, 2)\n y = MatrixSymbol('y', 2, 2)\n assert latex((x * y).subs(y, -y)) == r\"x \\left(- y\\right)\"\n assert latex((x * y).subs(y, -2*y)) == r\"x \\left(- 2 y\\right)\"\n assert latex((x * y).subs(x, -x)) == r\"\\left(- x\\right) y\"\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 40, "vocab_size": 25, "complexity": 1, "nloc": 6, "token_counts": 86, "n_ast_nodes": 136, "n_identifiers": 6, "random_cut": "def test_issue_15439():\n x = MatrixSymbol('x', 2, 2)\n y = MatrixSymbol('y', 2, 2)\n" }, { "id": 147100, "commit_id": "16fd099b8b881c7e195fea7e52832d5784c2325e", "repo": "ray", "path": "python/ray/tests/test_runtime_env_validation.py", "file_name": "test_runtime_env_validation.py", "fun_name": "test_validate_ray", "commit_message": "[runtime env] Change `pip_check` default from `True` to `False` (#23306)\n\n@SongGuyang @Catch-Bull @edoakes I know we discussed this earlier, but after thinking about it some more I think a more reasonable default is for `pip check` to be `False` by default. My guess is that a lot of users (including myself) work inside an environment where `python -m pip check` fails, but the environment doesn't cause them any problems otherwise. So a lot of users will hit an error when trying a simple `runtime_env` `pip` example, and possibly give up. Another less important piece of evidence is that we had to set `pip_check = False` to make some CI tests pass in the original PR.\r\n\r\nThis also matches the default behavior of pip which allows this situation to occur in the first place: `pip install` doesn't error when there's a dependency conflict; rather the command succeeds, the package is installed and usable, and it prints a warning (which is confusingly titled \"ERROR\")", "code": "def test_validate_ray(self):\n result = parse_and_validate_pip([\"pkg1\", \"ray\", \"pkg2\"])\n assert result[\"packages\"] == [\"pkg1\", \"ray\", \"pkg2\"]\n assert not result[\"pip_check\"]\n assert \"pip_version\" not in result\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 21, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 41, "n_ast_nodes": 77, "n_identifiers": 4, "random_cut": "def test_validate_ray(self):\n result = parse_and_validate_pip([\"pkg1\", \"ray\", \"pkg2\"])\n assert result[\"packages\"] == [\"pkg1\", \"ray\", \"pkg2\"]\n assert not result[\"pip_check\"]\n " }, { "id": 137999, "commit_id": "8e680c483ce326cefc62e44f68ab1a6948b1c3d2", "repo": "ray", "path": "rllib/utils/exploration/tests/test_curiosity.py", "file_name": "test_curiosity.py", "fun_name": "env_maker", "commit_message": "[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)", "code": "def env_maker(config):\n name = config.get(\"name\", \"MiniGrid-Empty-5x5-v0\")\n framestack = config.get(\"framestack\", 4)\n env = gym.make(name)\n # Make it impossible to reach goal by chance.\n env = gym.wrappers.TimeLimit(env, max_episode_steps=15)\n # Only use image portion of observation (discard goal and direction).\n env = minigrid.wrappers.ImgObsWrapper(env)\n env = OneHotWrapper(\n env,\n config.vector_index if hasattr(config, \"vector_index\") else 0,\n framestack=framestack,\n )\n return env\n\n\nregister_env(\"mini-grid\", env_maker)\nCONV_FILTERS = [[16, [11, 11], 3], [32, [9, 9], 3], [64, [5, 5], 3]]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 118, "n_words": 70, "vocab_size": 57, "complexity": 2, "nloc": 12, "token_counts": 83, "n_ast_nodes": 194, "n_identifiers": 18, "random_cut": "def env_maker(config):\n name = config.get(\"name\", \"MiniGrid-Empty-5x5-v0\")\n framestack = config.get(\"framestack\", 4)\n env = gym.make(name)\n # Make it impossible to reach goal by chance.\n env = gym.wrappers.TimeLimit(env, max_episode_step" }, { "id": 292335, "commit_id": "c582aecc10f82c2f528bd8ae630445a07bcfb615", "repo": "core", "path": "homeassistant/components/cast/media_player.py", "file_name": "media_player.py", "fun_name": "async_added_to_hass", "commit_message": "Deduplicate code in cast media_player (#66815)\n\nCo-authored-by: Paulus Schoutsen ", "code": "async def async_added_to_hass(self):\n \n self._async_setup(self.entity_id)\n\n self._cast_view_remove_handler = async_dispatcher_connect(\n self.hass, SIGNAL_HASS_CAST_SHOW_VIEW, self._handle_signal_show_view\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 50, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 30, "n_ast_nodes": 50, "n_identifiers": 9, "random_cut": "async def async_added_to_hass(self):\n \n self._async_setup(self.entity_id)\n\n self._cast_view_remove_handler = async_dispatcher_connect(\n self.hass, SIGNAL_HASS_CAST_SHOW_VIEW, self._handle_signa" }, { "id": 279885, "commit_id": "ead59b2c4c85284d8c2095e691800255068694ce", "repo": "keras", "path": "keras/saving/experimental/saving_lib_test.py", "file_name": "saving_lib_test.py", "fun_name": "test_saving_model_state", "commit_message": "Keras Saving: Make sure the optimizer weights are also built and restored upon loading.\n\nAlso allow the weights used in the test to have proper gradients, and make the input shape key in config consistent across Sequential and other models.\n\nPiperOrigin-RevId: 475455814", "code": "def test_saving_model_state(self, model_type):\n temp_filepath = os.path.join(self.get_temp_dir(), \"my_model.keras\")\n model = getattr(self, f\"_get_{model_type}_model\")()\n x = np.random.random((100, 32))\n y = np.random.random((100, 1))\n model.fit(x, y, epochs=1)\n\n # Assert that the archive has not been saved.\n self.assertFalse(os.path.exists(temp_filepath))\n\n # Mutate the `Dense` layer custom weights to ensure that list and\n # dict-contained weights get restored.\n model.layers[1].additional_weights[0].assign([[2]])\n model.layers[1].weights_in_dict[\"my_weight\"].assign([[2]])\n model.layers[1].nested_layer.kernel.assign([[1]])\n\n model._save_experimental(temp_filepath)\n\n # Assert that the archive has been saved.\n self.assertTrue(os.path.exists(temp_filepath))\n loaded_model = saving_lib.load_model(temp_filepath)\n self.assertEqual(model._is_compiled, loaded_model._is_compiled)\n\n # The weights are supposed to be the same (between original and loaded\n # models).\n for original_weights, loaded_weights in zip(\n model.get_weights(), loaded_model.get_weights()\n ):\n np.testing.assert_allclose(original_weights, loaded_weights)\n\n # The optimizer variables are supposed to be the same (between original\n # and loaded models).\n for original_weights, loaded_weights in zip(\n model.optimizer.variables(), loaded_model.optimizer.variables()\n ):\n np.testing.assert_allclose(original_weights, loaded_weights)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 336, "n_words": 118, "vocab_size": 72, "complexity": 3, "nloc": 22, "token_counts": 249, "n_ast_nodes": 399, "n_identifiers": 39, "random_cut": "def test_saving_model_state(self, model_type):\n temp_filepath = os.path.join(self.get_temp_dir(), \"my_model.keras\")\n model = getattr(self, f\"_get_{model_type}_model\")()\n x = np.random.random((100, 32))\n y = np.random.random((100, 1))\n model.fit(x, y, epochs=1)\n\n # Assert that the archive has not been saved.\n self.assertFalse(os.path.exists(temp_filepath))\n\n # Mutate the `Dense` layer custom weights to ensure that list and\n # dict-contained weights get restored.\n model.layers[1].additional_weights[0].assign([[2]])\n model.layers[1].weights_in_dict[\"my_weight\"].assign([[2]])\n model.layers[1].nested_layer.kernel.assign([[1]])\n\n model._save_experimental(temp_filepath)\n\n # Assert that the archive has been saved.\n self.assertTrue(os.path.exists(temp_filepath))\n loaded_model = saving_lib.load_model(temp_filepath)\n self.assertEqual(model._is_compiled, loaded_model._is_compiled)\n\n # The weights are supposed to be the same (between original and loaded\n # models).\n for original_weights, loaded_weights in zip(\n model.get_weights(), loaded_model.get_weights()\n ):\n np." }, { "id": 186334, "commit_id": "8007c612d45429c7b0cc4314f4aaedccb78334b9", "repo": "textual", "path": "src/textual/_cache.py", "file_name": "_cache.py", "fun_name": "__repr__", "commit_message": "simpler", "code": "def __repr__(self) -> str:\n return (\n f\" str:\n return (\n f\" FixedPrecisionTensor:\n res = FixedPrecisionTensor(base=self._base, precision=self._precision)\n res.child = self.child.transpose(*args, **kwargs)\n return res\n ", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 40, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 47, "n_ast_nodes": 72, "n_identifiers": 11, "random_cut": "def transpose(self, *args, **kwargs) -> FixedPrecisionTensor:\n res = FixedPrecisionTensor(base=self._base, precision=self._precision)\n res.child = self.child.transpose(*args, **kwargs)" }, { "id": 200390, "commit_id": "24f1e7730119fe958cc8e28411f790c9a5ec04eb", "repo": "sympy", "path": "sympy/core/tests/test_arit.py", "file_name": "test_arit.py", "fun_name": "test_denest_add_mul", "commit_message": "Fix various typos\n\nFound via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`", "code": "def test_denest_add_mul():\n # when working with evaluated expressions make sure they denest\n eq = x + 1\n eq = Add(eq, 2, evaluate=False)\n eq = Add(eq, 2, evaluate=False)\n assert Add(*eq.args) == x + 5\n eq = x*2\n eq = Mul(eq, 2, evaluate=False)\n eq = Mul(eq, 2, evaluate=False)\n assert Mul(*eq.args) == 8*x\n # but don't let them denest unnecessarily\n eq = Mul(-2, x - 2, evaluate=False)\n assert 2*eq == Mul(-4, x - 2, evaluate=False)\n assert -eq == Mul(2, x - 2, evaluate=False)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 80, "vocab_size": 39, "complexity": 1, "nloc": 12, "token_counts": 135, "n_ast_nodes": 203, "n_identifiers": 7, "random_cut": "def test_denest_add_mul():\n # when working with evaluated expressions make sure they denest\n eq = x + 1\n eq = Add(eq, 2, evaluate=False)\n eq = Ad" }, { "id": 260638, "commit_id": "610ada79c9fe7219ef7f2a283e9e0f02e122f948", "repo": "scikit-learn", "path": "sklearn/mixture/_bayesian_mixture.py", "file_name": "_bayesian_mixture.py", "fun_name": "_check_means_parameters", "commit_message": "MAINT (Bayesian)GaussianMixture use _validate_params (#24021)\n\nCo-authored-by: jeremiedbb ", "code": "def _check_means_parameters(self, X):\n \n _, n_features = X.shape\n\n if self.mean_precision_prior is None:\n self.mean_precision_prior_ = 1.0\n else:\n self.mean_precision_prior_ = self.mean_precision_prior\n\n if self.mean_prior is None:\n self.mean_prior_ = X.mean(axis=0)\n else:\n self.mean_prior_ = check_array(\n self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False\n )\n _check_shape(self.mean_prior_, (n_features,), \"means\")\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 160, "n_words": 37, "vocab_size": 26, "complexity": 3, "nloc": 13, "token_counts": 98, "n_ast_nodes": 151, "n_identifiers": 19, "random_cut": "def _check_means_parameters(self, X):\n \n _, n_features = X.shape\n\n if self.mean_precision_prior is None:\n self.mean_precision_prior_ = 1.0\n else:\n self.mean_precision_prior_ = self.mean_precision_prior\n\n if self.mean_prior is None:\n self.mean_prior_ = X.mean(axis=0)\n else:\n self.mean_prior_ = check_array(\n self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False\n )\n _check_shape(self.mean_prior_, (n_fea" }, { "id": 282900, "commit_id": "50cafd500ece43df98e3cf076d81084b2806ea03", "repo": "OpenBBTerminal", "path": "bots/helpers.py", "file_name": "helpers.py", "fun_name": "groupme", "commit_message": "Discord bot massive improvement (#1481)\n\n* allow logs feature flag\r\n\r\n* Adding log collection md\r\n\r\n* upload last log at startup\r\n\r\n* additions/refractor\r\n\r\n* refactor\r\n\r\n* lint/black ++\r\n\r\n* disc\r\n\r\n* TimeRotating Logger and upload to s3\r\n\r\n* corrected regex error\r\n\r\n* makeup for config\r\n\r\n* logging/disc/sia/etf/++\r\n\r\n* append .log before uploading\r\n\r\n* process to upload logs to s3\r\n\r\n* candle ta/etfmcds\r\n\r\n* fix\r\n\r\n* ta candles\r\n\r\n* implement presignedURL\r\n\r\n* fixed regex\r\n\r\n* ma's in 1 cmd, delete older files\r\n\r\n* refactor ta candle\r\n\r\n* updates\r\n\r\n* black\r\n\r\n* moon?\r\n\r\n* Logger uploader\r\n\r\n* rotate every hour\r\n\r\n* only archive if successful\r\n\r\n* chavis suggestions\r\n\r\n* windows\r\n\r\n* ta\r\n\r\n* commands_dict update\r\n\r\n* discord tacmds\r\n\r\n* log_collection error fix\r\n\r\n* fix\r\n\r\n* fix\r\n\r\n* pylint\r\n\r\n* bb fix\r\n\r\n* only log filesize\r\n\r\n* fixes\r\n\r\n* discord logs\r\n\r\n* Delete log_collection.md\r\n\r\n* fixes for other bots on images\r\n\r\n* bots image upload fix\r\n\r\n* updated helpers/load candle\r\n\r\n* more ta cc/housekeeping/refactors/slashcmds\r\n\r\n* update bots cmds_dict\r\n\r\n* adjustments to font size/fixes\r\n\r\n* test fixs/disc earnings\r\n\r\n* missed a spot\r\n\r\n* fixes had > revesred\r\n\r\n* reversed the >< again oops\r\n\r\n* remove logger branch code blocking tests\r\n\r\n* black fix\r\n\r\n* fix missing sources in docstr/daily candle dt tz\r\n\r\n* load_candle refactor with docstring\r\n\r\n* moved insiders to disc\r\n\r\n* Lucas logging changes\r\n\r\n* Fixing log_collection.md\r\n\r\n* testing scenario\r\n\r\n* more ta converted\r\n\r\n* more ta\r\n\r\n* Update config_terminal : remove print of verbosity\r\n\r\n* table cfg/fix matplt/ screener +\r\n\r\n* fix\r\n\r\n* what's sleep? 1 more line.. or 2. scr df2img\r\n\r\n* juan more. fix news 1m chart issue\r\n\r\n* ticker.upper() fixes\r\n\r\n* Update log collection\r\n\r\n* Updating log collection - change tmp folder\r\n\r\nCo-authored-by: LBolte29 \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: LBolte29 <97528701+LBolte29@users.noreply.github.com>\r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: didierlopes.eth ", "code": "def groupme(self, func, group_id, name, *args, **kwargs):\n data = func(*args, **kwargs)\n if \"imagefile\" in data:\n imagefile = cfg.IMG_DIR / data[\"imagefile\"]\n send_image(imagefile, group_id, data.get(\"description\", \"\"), True)\n elif \"embeds_img\" in data:\n imagefiles = data[\"images_list\"]\n for img in imagefiles:\n imagefile = cfg.IMG_DIR / img\n send_image(imagefile, group_id, data.get(\"description\", \"\"), True)\n elif \"description\" in data:\n title = data.get(\"title\", \"\")\n # TODO: Allow navigation through pages\n description = data.get(\"description\")\n if isinstance(description, List):\n clean_desc = description[0].replace(\"Page \", \"\")\n else:\n clean_desc = description.replace(\"Page \", \"\")\n message = f\"{title}\\n{clean_desc}\"\n send_message(message, group_id)\n os.remove(imagefile)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 302, "n_words": 83, "vocab_size": 54, "complexity": 6, "nloc": 20, "token_counts": 171, "n_ast_nodes": 297, "n_identifiers": 25, "random_cut": "def groupme(self, func, group_id, name, *args, **kwargs):\n data = func(*args, **kwargs)\n if \"imagefile\" in data:\n imagefile = cfg.IMG_DIR / data[\"imagefile\"]\n " }, { "id": 209484, "commit_id": "e6eaa484b8fa3d10051e82f5a784fe8dedbd5592", "repo": "scapy", "path": "scapy/contrib/automotive/scanner/executor.py", "file_name": "executor.py", "fun_name": "execute_test_case", "commit_message": "Add assert to GMLAN Scanner to enforce fast fail on to many open TestSockets\n\nFix bugs in TestSocket\nFix bugs in the AutomotiveScanner execution_time handling\nSimplify test code for UDS_Scanner and reuse ObjectPipes to avoid mass\ncreation", "code": "def execute_test_case(self, test_case, kill_time=None):\n # type: (AutomotiveTestCaseABC, Optional[float]) -> None\n \n\n test_case.pre_execute(\n self.socket, self.target_state, self.configuration)\n\n try:\n test_case_kwargs = self.configuration[test_case.__class__.__name__]\n except KeyError:\n test_case_kwargs = dict()\n\n if kill_time:\n max_execution_time = max(int(kill_time - time.time()), 5)\n cur_execution_time = test_case_kwargs.get(\"execution_time\", 1200)\n test_case_kwargs[\"execution_time\"] = min(max_execution_time,\n cur_execution_time)\n\n log_interactive.debug(\"[i] Execute test_case %s with args %s\",\n test_case.__class__.__name__, test_case_kwargs)\n\n test_case.execute(self.socket, self.target_state, **test_case_kwargs)\n test_case.post_execute(\n self.socket, self.target_state, self.configuration)\n\n self.check_new_states(test_case)\n self.check_new_testcases(test_case)\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 292, "n_words": 57, "vocab_size": 48, "complexity": 3, "nloc": 19, "token_counts": 148, "n_ast_nodes": 234, "n_identifiers": 26, "random_cut": "def execute_test_case(self, test_case, kill_time=None):\n # type: (AutomotiveTestCaseABC, Optional[float]) -> None\n \n\n test_case.pre_execute(\n self.socket, self.target_state, self.configuration)\n\n try:\n test_case_kwargs = self.configuration[test_case.__class__.__name__]\n except KeyError:\n test_case_kwargs = dict()\n\n if kill_time:\n max_execution_time = max(int(kill_time - time.time()), 5)\n cur_execution_time = test_case_kwargs.get(\"execution_time\", 1200)\n test_case_kwargs[\"execution_time\"] = min(max_execution_time,\n cur_execution_time)\n\n log_interactive.debug(\"[i] Execute test_case %s with args %s\",\n test_case.__class__.__name__, test_case_kwargs)\n\n test_case.execute(self.socket, self.target_state, **test_case_kwargs)\n test_case.po" }, { "id": 313481, "commit_id": "0505c596a563c92def54ea8108be09a338a0dd53", "repo": "core", "path": "tests/components/history_stats/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "test_invalid_entity_in_template", "commit_message": "Fix dropouts in history_stats graphs on restart (#73110)", "code": "async def test_invalid_entity_in_template(hass, recorder_mock):\n \n await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": {\n \"platform\": \"history_stats\",\n \"entity_id\": \"binary_sensor.test_id\",\n \"name\": \"test\",\n \"state\": \"on\",\n \"end\": \"{{ states('binary_sensor.invalid').attributes.time }}\",\n \"duration\": \"01:00\",\n },\n },\n )\n await hass.async_block_till_done()\n assert hass.states.get(\"sensor.test\") is None\n next_update_time = dt_util.utcnow() + timedelta(minutes=1)\n with freeze_time(next_update_time):\n async_fire_time_changed(hass, next_update_time)\n await hass.async_block_till_done()\n assert hass.states.get(\"sensor.test\") is None\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 227, "n_words": 49, "vocab_size": 40, "complexity": 1, "nloc": 22, "token_counts": 108, "n_ast_nodes": 201, "n_identifiers": 14, "random_cut": "async def test_invalid_entity_in_template(hass, recorder_mock):\n \n await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": {\n \"platform\": \"history_stats\",\n \"entity_id\": \"binary_sensor.test_id\",\n \"name\": \"test\",\n \"state\": \"on\",\n \"end\": \"{{ states('binary_sensor.invalid').attributes.time }}\",\n \"duration\": \"01:00\",\n },\n },\n )\n await hass.async_block_till_done()\n assert hass.states.get(\"sensor.test\") is None\n next_update_time = dt_util.utcnow() + timedelta(minutes=1)\n with freeze_time(next_update_time):\n async_fire_time_changed(hass, next_update_time)\n await hass.async_blo" }, { "id": 130058, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "doc/source/ray-core/_examples/dask_xgboost/dask_xgboost.py", "file_name": "dask_xgboost.py", "fun_name": "tune_xgboost", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def tune_xgboost(train_df, test_df, target_column):\n # Set XGBoost config.\n config = {\n \"tree_method\": \"approx\",\n \"objective\": \"binary:logistic\",\n \"eval_metric\": [\"logloss\", \"error\"],\n \"eta\": tune.loguniform(1e-4, 1e-1),\n \"subsample\": tune.uniform(0.5, 1.0),\n \"max_depth\": tune.randint(1, 9),\n }\n\n ray_params = RayParams(\n max_actor_restarts=1, cpus_per_actor=cpus_per_actor, num_actors=num_actors\n )\n\n tune_start_time = time.time()\n\n analysis = tune.run(\n tune.with_parameters(\n train_xgboost,\n train_df=train_df,\n test_df=test_df,\n target_column=target_column,\n ray_params=ray_params,\n ),\n # Use the `get_tune_resources` helper function to set the resources.\n resources_per_trial=ray_params.get_tune_resources(),\n config=config,\n num_samples=10,\n metric=\"eval-error\",\n mode=\"min\",\n )\n\n tune_end_time = time.time()\n tune_duration = tune_end_time - tune_start_time\n print(f\"Total time taken: {tune_duration} seconds.\")\n\n accuracy = 1.0 - analysis.best_result[\"eval-error\"]\n print(f\"Best model parameters: {analysis.best_config}\")\n print(f\"Best model total accuracy: {accuracy:.4f}\")\n\n return analysis.best_config\n\n\n###############################################################################\n# Hyperparameter optimization may take some time to complete.\n\ntune_xgboost(train_df, eval_df, LABEL_COLUMN)\n\n###############################################################################\n# Prediction\n# ----------\n# With the model trained, we can now predict on unseen data. For the\n# purposes of this example, we will use the same dataset for prediction as\n# for training.\n#\n# Since prediction is naively parallelizable, distributing it over multiple\n# actors can measurably reduce the amount of time needed.\n\ninference_df = RayDMatrix(data, ignore=[LABEL_COLUMN, \"partition\"])\nresults = predict(\n bst,\n inference_df,\n ray_params=RayParams(\n cpus_per_actor=cpus_per_actor_inference, num_actors=num_actors_inference\n ),\n)\n\nprint(results)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 386, "n_words": 179, "vocab_size": 136, "complexity": 1, "nloc": 34, "token_counts": 191, "n_ast_nodes": 390, "n_identifiers": 42, "random_cut": "def tune_xgboost(train_df, test_df, target_column):\n # Set XGBoost config.\n config = {\n \"tree_method\": \"approx\",\n \"objective\": \"binary:logistic" }, { "id": 155524, "commit_id": "3c46e89aea2af010e69049cd638094fea2ddd576", "repo": "dask", "path": "dask/array/percentile.py", "file_name": "percentile.py", "fun_name": "_percentile", "commit_message": "Replace `interpolation` with `method` and `method` with `internal_method` (#8525)\n\nFollowing the change in numpy 1.22.0\r\n\r\nCo-authored-by: James Bourbeau ", "code": "def _percentile(a, q, method=\"linear\"):\n n = len(a)\n if not len(a):\n return None, n\n if isinstance(q, Iterator):\n q = list(q)\n if a.dtype.name == \"category\":\n result = np_percentile(a.cat.codes, q, method=method)\n import pandas as pd\n\n return pd.Categorical.from_codes(result, a.dtype.categories, a.dtype.ordered), n\n if type(a.dtype).__name__ == \"DatetimeTZDtype\":\n import pandas as pd\n\n if isinstance(a, (pd.Series, pd.Index)):\n a = a.values\n\n if np.issubdtype(a.dtype, np.datetime64):\n values = a\n a2 = values.view(\"i8\")\n result = np_percentile(a2, q, method=method).astype(values.dtype)\n if q[0] == 0:\n # https://github.com/dask/dask/issues/6864\n result[0] = min(result[0], values.min())\n return result, n\n if not np.issubdtype(a.dtype, np.number):\n method = \"nearest\"\n return np_percentile(a, q, method=method), n\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 238, "n_words": 91, "vocab_size": 56, "complexity": 9, "nloc": 24, "token_counts": 236, "n_ast_nodes": 369, "n_identifiers": 34, "random_cut": "def _percentile(a, q, method=\"linear\"):\n n = len(a)\n if not len(a):\n return None, n\n if isinstance(q, Iterator):\n q = list(q)\n if a.dtype.name == \"category\":\n " }, { "id": 107979, "commit_id": "5495fd220f4e2df0eb801ed9dfcfd6b557377ca2", "repo": "matplotlib", "path": "lib/matplotlib/streamplot.py", "file_name": "streamplot.py", "fun_name": "start_trajectory", "commit_message": "Implement proposed enhancement from https://github.com/matplotlib/matplotlib/issues/8388.", "code": "def start_trajectory(self, xg, yg, broken_streamlines=True):\n xm, ym = self.grid2mask(xg, yg)\n self.mask._start_trajectory(xm, ym, broken_streamlines)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 37, "n_ast_nodes": 53, "n_identifiers": 10, "random_cut": "def start_trajectory(self, xg, yg, broken_streamlines=True):\n xm, ym = self.grid2mask(xg, yg)\n sel" }, { "id": 249691, "commit_id": "2d0ba3f89aaf9545d81c4027500e543ec70b68a6", "repo": "synapse", "path": "tests/push/test_push_rule_evaluator.py", "file_name": "test_push_rule_evaluator.py", "fun_name": "test_related_event_match_with_fallback", "commit_message": "Implementation for MSC3664: Pushrules for relations (#11804)", "code": "def test_related_event_match_with_fallback(self):\n evaluator = self._get_evaluator(\n {\n \"m.relates_to\": {\n \"event_id\": \"$parent_event_id\",\n \"key\": \"😀\",\n \"rel_type\": \"m.thread\",\n \"is_falling_back\": True,\n \"m.in_reply_to\": {\n \"event_id\": \"$parent_event_id\",\n },\n }\n },\n {\n \"m.in_reply_to\": {\n \"event_id\": \"$parent_event_id\",\n \"type\": \"m.room.message\",\n \"sender\": \"@other_user:test\",\n \"room_id\": \"!room:test\",\n \"content.msgtype\": \"m.text\",\n \"content.body\": \"Original message\",\n \"im.vector.is_falling_back\": \"\",\n },\n \"m.thread\": {\n \"event_id\": \"$parent_event_id\",\n \"type\": \"m.room.message\",\n \"sender\": \"@other_user:test\",\n \"room_id\": \"!room:test\",\n \"content.msgtype\": \"m.text\",\n \"content.body\": \"Original message\",\n },\n },\n )\n self.assertTrue(\n evaluator.matches(\n {\n \"kind\": \"im.nheko.msc3664.related_event_match\",\n \"key\": \"sender\",\n \"rel_type\": \"m.in_reply_to\",\n \"pattern\": \"@other_user:test\",\n \"include_fallbacks\": True,\n },\n \"@user:test\",\n \"display_name\",\n )\n )\n self.assertFalse(\n evaluator.matches(\n {\n \"kind\": \"im.nheko.msc3664.related_event_match\",\n \"key\": \"sender\",\n \"rel_type\": \"m.in_reply_to\",\n \"pattern\": \"@other_user:test\",\n \"include_fallbacks\": False,\n },\n \"@user:test\",\n \"display_name\",\n )\n )\n self.assertFalse(\n evaluator.matches(\n {\n \"kind\": \"im.nheko.msc3664.related_event_match\",\n \"key\": \"sender\",\n \"rel_type\": \"m.in_reply_to\",\n \"pattern\": \"@other_user:test\",\n },\n \"@user:test\",\n \"display_name\",\n )\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1197, "n_words": 112, "vocab_size": 45, "complexity": 1, "nloc": 71, "token_counts": 216, "n_ast_nodes": 433, "n_identifiers": 7, "random_cut": "def test_related_event_match_with_fallback(self):\n evaluator = self._get_evaluator(\n {\n \"m.relates_to\": {\n \"event_id\": \"$parent_event_id\",\n \"key\": \"😀\",\n \"rel_type\": \"m.thread\",\n \"is_falling_back\": True,\n \"m.in_reply_to\": {\n \"event_id\": \"$parent_event_id\",\n },\n }\n },\n {\n \"m.in_reply_to\": {\n \"event_id\": \"$parent_event_id\",\n \"type\": \"m.room.message\",\n \"sender\": \"@other_user:test\",\n \"room_id\": \"!room:test\",\n \"content.msgtype\": \"m.text\",\n \"content.body\": \"Original message\",\n \"im.vector.is_falling_back\": \"\",\n },\n \"m.thread\": {\n \"event_id\": \"$parent_event_id\",\n \"type\": \"m.room.message\",\n \"sender\": \"@other_user:test\",\n \"room_id\": \"!room:test\",\n \"content.msgtype\": \"m.text\",\n \"content.body\": \"Original message\",\n },\n },\n )\n self.assertTrue(\n evaluator.matches(\n {\n \"kind\": \"im.nheko.msc3664.related_event_match\",\n \"key\": \"sender\",\n \"rel_type\": \"m.in_reply_to\",\n \"pattern\": \"@other_user:test\",\n \"include_fallbacks\": True,\n },\n \"@user:test\",\n \"display_name\",\n )\n )\n self.assertFalse(\n evaluator.matches(\n {\n \"kind\": \"im.nheko.msc3664.related_event_match\",\n \"key\": \"sender\",\n " }, { "id": 184745, "commit_id": "210214260d6272ed8af52608bbbd1de4cff91f12", "repo": "textual", "path": "tests/css/test_parse.py", "file_name": "test_parse.py", "fun_name": "test_opacity_to_styles", "commit_message": "Renaming opacity to text-opacity in code", "code": "def test_opacity_to_styles(self, css_value, styles_value):\n css = f\"#some-widget {{ text-opacity: {css_value} }}\"\n stylesheet = Stylesheet()\n stylesheet.add_source(css)\n\n assert stylesheet.rules[0].styles.text_opacity == styles_value\n assert not stylesheet.rules[0].errors\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 6, "token_counts": 47, "n_ast_nodes": 79, "n_identifiers": 12, "random_cut": "def test_opacity_to_styles(self, css_value, styles_value):\n css = f\"#some-widget {{ text-opacity: {css_value} }}\"\n stylesheet = Stylesheet()\n stylesheet.add_source(css)\n\n assert stylesheet.rules[0].styles.text_opacity == styles_value\n assert not stylesheet.rules[0].errors\n" }, { "id": 58581, "commit_id": "8abc1c25727c0236d52b025dc2e2062f3e67e94b", "repo": "prefect", "path": "tests/cli/test_work_queues.py", "file_name": "test_work_queues.py", "fun_name": "test_inspect_by_id", "commit_message": "Update work-queue CLI to accept both IDs and names", "code": "def test_inspect_by_id(work_queue):\n invoke_and_assert(\n command=f\"work-queue inspect {work_queue.id}\",\n expected_output_contains=[f\"id='{work_queue.id}'\", f\"name={work_queue.name!r}\"],\n expected_code=0,\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 36, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 27, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "def test_inspect_by_id(work_queue):\n invoke_and_assert(\n " }, { "id": 212840, "commit_id": "b3680477c755277192715b343e9cd4254de7c45e", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "bind", "commit_message": "Added propagate parameter to the Element.bind and Window.bind methods. Indicates whether tkinter should propagate the event to the corresponding element/window or stop with the user callback", "code": "def bind(self, bind_string, key, propagate=True):\n \n if not self._is_window_created('tried Window.bind'):\n return\n self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate))\n self.user_bind_dict[bind_string] = key\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 58, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 54, "n_ast_nodes": 85, "n_identifiers": 10, "random_cut": "def bind(self, bind_string, key, propagate=True):\n \n " }, { "id": 11577, "commit_id": "51403a57d03f0b1ddfd7fc533ccee78e23f5faa1", "repo": "jina", "path": "tests/integration/high_order_matches/test_document.py", "file_name": "test_document.py", "fun_name": "test_multi_executor", "commit_message": "refactor: unify port args (#4382)", "code": "def test_multi_executor():\n\n f = (\n Flow(port=exposed_port)\n .add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'r'}})\n .add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'm'}})\n )\n\n with f:\n results = Client(port=exposed_port, return_responses=True).post(\n on='index',\n inputs=Document(),\n )\n validate_results(results)\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 95, "n_words": 27, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 85, "n_ast_nodes": 155, "n_identifiers": 15, "random_cut": "def test_multi_executor():\n\n f = (\n Flow(port=exposed_port)\n .add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'r'}})\n .add(uses={'jtype': 'MatchAdder', 'with': {'traversal_paths': 'm'}})\n )\n\n with f:\n results = Client(port=exposed_port, return_responses=True).post(\n on='index',\n " }, { "id": 223057, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/test_bdist_rpm.py", "file_name": "test_bdist_rpm.py", "fun_name": "setUp", "commit_message": "add python 3.10.4 for windows", "code": "def setUp(self):\n try:\n sys.executable.encode(\"UTF-8\")\n except UnicodeEncodeError:\n raise unittest.SkipTest(\"sys.executable is not encodable to UTF-8\")\n\n super(BuildRpmTestCase, self).setUp()\n self.old_location = os.getcwd()\n self.old_sys_argv = sys.argv, sys.argv[:]\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 8, "token_counts": 58, "n_ast_nodes": 99, "n_identifiers": 15, "random_cut": "def setUp(self):\n try:\n sys.executable.encode(\"UTF-8\")\n except UnicodeEncodeError:\n raise " }, { "id": 192182, "commit_id": "93c85bbcc31f8d5a052daf06f2f91f39697af1a4", "repo": "vision", "path": "torchvision/ops/deform_conv.py", "file_name": "deform_conv.py", "fun_name": "__repr__", "commit_message": "Consolidate repr (#5392)\n\n* Consolidating __repr__ strings\r\n\r\nCo-authored-by: Vasilis Vryniotis ", "code": "def __repr__(self) -> str:\n s = (\n f\"{self.__class__.__name__}(\"\n f\"{self.in_channels}\"\n f\", {self.out_channels}\"\n f\", kernel_size={self.kernel_size}\"\n f\", stride={self.stride}\"\n )\n s += f\", padding={self.padding}\" if self.padding != (0, 0) else \"\"\n s += f\", dilation={self.dilation}\" if self.dilation != (1, 1) else \"\"\n s += f\", groups={self.groups}\" if self.groups != 1 else \"\"\n s += \", bias=False\" if self.bias is None else \"\"\n s += \")\"\n\n return s\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 173, "n_words": 63, "vocab_size": 37, "complexity": 5, "nloc": 14, "token_counts": 81, "n_ast_nodes": 194, "n_identifiers": 14, "random_cut": "def __repr__(self) -> str:\n s = (\n f\"{self.__class__.__name__}(\"\n f\"{self.in_channels}\"\n f\", {self.out_channels}\"\n f\", kernel_size={self.kernel_size}\"\n f\", stride={self.stride}\"\n )\n s += f\", padding={self.padding}\" if self.padding != (0, 0) else \"\"\n s += f\", dilation={self.dilation}\" if self.dilation != (1, 1) else \"\"\n s += f\", groups={self.groups}\" if self.groups != 1 else \"\"\n s += \", bias=False\" if self.bias is None else \"\"\n " }, { "id": 280476, "commit_id": "9fd2946909b1b26d05593c7249f2381c3d93d382", "repo": "keras", "path": "keras/utils/feature_space.py", "file_name": "feature_space.py", "fun_name": "_cross_features", "commit_message": "Add FeatureSpace utility.\n\nPiperOrigin-RevId: 487344904", "code": "def _cross_features(self, features):\n all_outputs = {}\n for cross in self.crosses:\n inputs = [features[name] for name in cross.feature_names]\n outputs = self.crossers[cross.name](inputs)\n all_outputs[cross.name] = outputs\n return all_outputs\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 25, "vocab_size": 18, "complexity": 3, "nloc": 7, "token_counts": 55, "n_ast_nodes": 83, "n_identifiers": 11, "random_cut": "def _cross_features(self, features):\n all_outputs = {}\n for cross in self.crosses:\n inputs = [features[name] fo" }, { "id": 107026, "commit_id": "b1737e0ec9b274a979dc6c13d328cf494a657214", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/axes3d.py", "file_name": "axes3d.py", "fun_name": "set_top_view", "commit_message": "Clean up 3d plot box_aspect zooming\n\nlinting\n\nCleanup\n\nMake zoom and dist private attrs\n\nDeprecate Axes3D.dist\n\nDeprecate Axes3D.dist", "code": "def set_top_view(self):\n # this happens to be the right view for the viewing coordinates\n # moved up and to the left slightly to fit labels and axes\n xdwl = 0.95 / self._dist\n xdw = 0.9 / self._dist\n ydwl = 0.95 / self._dist\n ydw = 0.9 / self._dist\n # This is purposely using the 2D Axes's set_xlim and set_ylim,\n # because we are trying to place our viewing pane.\n super().set_xlim(-xdwl, xdw, auto=None)\n super().set_ylim(-ydwl, ydw, auto=None)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 143, "n_words": 74, "vocab_size": 50, "complexity": 1, "nloc": 7, "token_counts": 71, "n_ast_nodes": 103, "n_identifiers": 11, "random_cut": "def set_top_view(self):\n # this happens to be the right view for the viewing coordinates\n # moved up and to the left slightly to fit labels and axes\n xdwl = 0.95 / self._dist\n xdw = 0.9 / self._dist\n ydwl = 0.95 / self._dist\n ydw = 0.9 / sel" }, { "id": 72060, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_page_chooser.py", "file_name": "test_page_chooser.py", "fun_name": "test_locale_selector_present_in_root_view", "commit_message": "Reformat with black", "code": "def test_locale_selector_present_in_root_view(self):\n response = self.client.get(reverse(\"wagtailadmin_choose_page\"))\n html = response.json().get(\"html\")\n\n self.assertIn(self.LOCALE_SELECTOR_HTML, html)\n\n switch_to_french_url = self.get_choose_page_url(locale=self.fr_locale)\n fr_selector = f''\n self.assertIn(fr_selector, html)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 22, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 64, "n_ast_nodes": 111, "n_identifiers": 15, "random_cut": "def test_locale_selector_present_in_root_view(self):\n response = self.client.get(reverse(\"wagtailadmin_choose_page\"))\n html = response.json().get(\"html\")\n\n self.assertIn(self.LOCALE_SELECTOR_HTML, html)\n\n switch_to_french_url = self.get_choose_page_url(locale=self.fr_locale)\n fr_selector = f''\n self.assertIn(fr_selector, html)\n" }, { "id": 191528, "commit_id": "315b0c09c614fa44daa61529d1f1da2fe827b16c", "repo": "langchain", "path": "tests/unit_tests/docstore/test_inmemory.py", "file_name": "test_inmemory.py", "fun_name": "test_adding_document_already_exists", "commit_message": "wip: add method for both docstore and embeddings (#119)\n\nthis will break atm but wanted to get thoughts on implementation.\r\n\r\n1. should add() be on docstore interface?\r\n2. should InMemoryDocstore change to take a list of documents as init?\r\n(makes this slightly easier to implement in FAISS -- if we think it is\r\nless clean then could expose a method to get the number of documents\r\ncurrently in the dict, and perform the logic of creating the necessary\r\ndictionary in the FAISS.add_texts method.\r\n\r\nCo-authored-by: Harrison Chase ", "code": "def test_adding_document_already_exists() -> None:\n \n _dict = {\"foo\": Document(page_content=\"bar\")}\n docstore = InMemoryDocstore(_dict)\n new_dict = {\"foo\": Document(page_content=\"foo\")}\n\n # Test that error is raised.\n with pytest.raises(ValueError):\n docstore.add(new_dict)\n\n # Test that old document is the same.\n bar_output = docstore.search(\"foo\")\n assert isinstance(bar_output, Document)\n assert bar_output.page_content == \"bar\"\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 79, "n_words": 42, "vocab_size": 33, "complexity": 1, "nloc": 10, "token_counts": 72, "n_ast_nodes": 134, "n_identifiers": 14, "random_cut": "def test_adding_document_already_exists() -> None:\n \n _dict = {\"foo\": Document(page_content=\"bar\")}\n docstore = InMemoryDocstore(_dict)\n new_dict = {\"foo\": Document(page_content=\"foo\")}\n\n # T" }, { "id": 103191, "commit_id": "397fbe7ad32cb455a17a44e3868f2d3582e7c998", "repo": "kitty", "path": "shell-integration/ssh/bootstrap.py", "file_name": "bootstrap.py", "fun_name": "compile_terminfo", "commit_message": "Avoid unnecessary which and fix typos", "code": "def compile_terminfo(base):\n tic = shutil.which('tic')\n if not tic:\n return\n tname = '.terminfo'\n if os.path.exists('/usr/share/misc/terminfo.cdb'):\n tname += '.cdb'\n os.environ['TERMINFO'] = os.path.join(HOME, tname)\n cp = subprocess.run(\n [tic, '-x', '-o', os.path.join(base, tname), os.path.join(base, '.terminfo', 'kitty.terminfo')],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n if cp.returncode != 0:\n sys.stderr.buffer.write(cp.stdout)\n raise SystemExit('Failed to compile the terminfo database')\n\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 112, "n_words": 47, "vocab_size": 40, "complexity": 4, "nloc": 15, "token_counts": 125, "n_ast_nodes": 208, "n_identifiers": 24, "random_cut": "def compile_terminfo(base):\n tic = shutil.which('tic')\n if not tic:\n return\n tname = '.terminfo'\n if os.path.exists('/usr/share/misc/terminfo.cdb'):\n tname += '.cdb'\n os.environ['TERMINFO'] = os.path.join(HOME, tname)\n cp = subprocess.run(\n [tic, '-x', '-o', os.path.join(base, tname), os.path.join(base, '.terminfo', 'kitty.terminfo')],\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n if cp.returncode != 0:\n sys.stderr.buffer.write(cp.stdout)\n raise SystemExit('Failed to c" }, { "id": 294683, "commit_id": "c6ba987995e4c726614ffbde2b31ce01a034aab3", "repo": "core", "path": "homeassistant/components/wemo/sensor.py", "file_name": "sensor.py", "fun_name": "unique_id_suffix", "commit_message": "Use device properties for WeMo Insight sensors (#63525)", "code": "def unique_id_suffix(self) -> str | None:\n \n return self.entity_description.unique_id_suffix\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 4, "random_cut": "def unique_id_suffix(self) -> str | None:\n \n return self.entity_description.unique_id_suf" }, { "id": 45297, "commit_id": "cb24ee9414afcdc1a2b0fe1ec0b9f0ba5e1bd7b7", "repo": "airflow", "path": "tests/providers/amazon/aws/operators/test_sagemaker_model.py", "file_name": "test_sagemaker_model.py", "fun_name": "test_execute", "commit_message": "Add SageMakerDeleteModelOperator (#21673)\n\n* Implement SagemakerDeleteModelOperator", "code": "def test_execute(self, delete_model, mock_client):\n delete_model.return_value = None\n self.sagemaker.execute(None)\n delete_model.assert_called_once_with(model_name='test')\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 49, "n_identifiers": 9, "random_cut": "def test_execute(self, delete_model, mock_client):\n delete_model.return_value = None\n self.sagemaker.execute(None)\n delete_model.assert_called_once_with(model_name='test')\n" }, { "id": 69446, "commit_id": "27df455b9862396a192ce381f2e34f0d3cb94e5e", "repo": "erpnext", "path": "erpnext/accounts/dashboard_chart_source/account_balance_timeline/account_balance_timeline.py", "file_name": "account_balance_timeline.py", "fun_name": "build_result", "commit_message": "perf: use `get_cached_value` instead of `db.get_value` in accounts module", "code": "def build_result(account, dates, gl_entries):\n\tresult = [[getdate(date), 0.0] for date in dates]\n\troot_type = frappe.get_cached_value(\"Account\", account, \"root_type\")\n\n\t# start with the first date\n\tdate_index = 0\n\n\t# get balances in debit\n\tfor entry in gl_entries:\n\n\t\t# entry date is after the current pointer, so move the pointer forward\n\t\twhile getdate(entry.posting_date) > result[date_index][0]:\n\t\t\tdate_index += 1\n\n\t\tresult[date_index][1] += entry.debit - entry.credit\n\n\t# if account type is credit, switch balances\n\tif root_type not in (\"Asset\", \"Expense\"):\n\t\tfor r in result:\n\t\t\tr[1] = -1 * r[1]\n\n\t# for balance sheet accounts, the totals are cumulative\n\tif root_type in (\"Asset\", \"Liability\", \"Equity\"):\n\t\tfor i, r in enumerate(result):\n\t\t\tif i > 0:\n\t\t\t\tr[1] = r[1] + result[i - 1][1]\n\n\treturn result\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 95, "n_words": 116, "vocab_size": 75, "complexity": 9, "nloc": 16, "token_counts": 155, "n_ast_nodes": 242, "n_identifiers": 18, "random_cut": "def build_result(account, dates, gl_entries):\n\tresult = [[getdate(date), 0.0] for date in dates]\n\troot_type = frappe.get_cached_value(\"Account\", account, \"root_type\")\n\n\t# start with the first date\n\tdate_index = 0\n\n\t# get balances in debit\n\tfor entry in gl_entries:\n\n\t\t# entry date is after the current pointer, so move the pointer forward\n\t\twhile getdate(entry.posting_date) > result[date_index][0]:\n\t\t\tdate_index += 1\n\n\t\tresult[date_index][1] += entry.debit - entry.cre" }, { "id": 129795, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "ci/travis/bazel.py", "file_name": "bazel.py", "fun_name": "textproto_split", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def textproto_split(input_lines, json_encoder):\n \n outputs = []\n re_flags = re.M\n pat_open = re.compile(b\"^(\\\\s*)([-\\\\w:]+)(\\\\s*){$\", flags=re_flags)\n pat_line = re.compile(b\"^(\\\\s*)([-\\\\w]+): (.*)$\", flags=re_flags)\n pat_close = re.compile(b\"}$\", flags=re_flags)\n prev_comma = False\n prev_tail = b\"\"\n for full_line in input_lines:\n pieces = re.split(b\"(\\\\r|\\\\n)\", full_line, 1)\n pieces[1:] = [b\"\".join(pieces[1:])]\n [line, tail] = pieces\n next_line = pat_open.sub(b'\\\\1[\"\\\\2\",\\\\3[', line)\n outputs.append(\n b\"\" if not prev_comma else b\"]\" if next_line.endswith(b\"}\") else b\",\"\n )\n next_line = pat_close.sub(b\"]\", next_line)\n next_line = pat_line.sub(\n lambda m: textproto_format(*(m.groups() + (json_encoder,))), next_line\n )\n outputs.append(prev_tail + next_line)\n if line == b\"}\":\n yield b\"\".join(outputs)\n del outputs[:]\n prev_comma = line != b\"}\" and (\n next_line.endswith(b\"]\") or next_line.endswith(b'\"')\n )\n prev_tail = tail\n if len(outputs) > 0:\n yield b\"\".join(outputs)\n del outputs[:]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 304, "n_words": 107, "vocab_size": 70, "complexity": 8, "nloc": 31, "token_counts": 256, "n_ast_nodes": 418, "n_identifiers": 28, "random_cut": "def textproto_split(input_lines, json_encoder):\n \n outputs = []\n re_flags = re.M\n pat_open = re.compile(b\"^(\\\\s*)([-\\\\w:]+)(\\\\s*){$\", flags=re_flags)\n pat_line = re.compile(b\"^(\\\\s*)([-\\\\w]+): (.*)$\", flags=re_flags)\n pat_close = re.compile(b\"}$\", flags=re_flags)\n prev_comma = False\n prev_tail = b\"\"\n for full_line in input_lines:\n pieces = re.split(b\"(\\\\r|\\\\n)\", full_line, 1)\n pieces[1:] = [b\"\".join(pieces[1:])]\n [line, tail] = pieces\n next_line = pat_open.sub(b'\\\\1[\"\\\\2\",\\\\3[', line)\n outputs.append(\n b\"\" if not prev_comma else b\"]\" if next_line.endswith(b\"}\") else b\",\"\n )\n next_line = pat_close.sub(b\"]\", next_line)\n next_line = pat_line.sub(\n lambda m: textproto_format(*(m.groups() + (json_encoder,))), next_line\n )\n outputs.append(prev_tail + next_line)\n if line == b\"}\":\n yield b\"\".join(outputs)\n " }, { "id": 91148, "commit_id": "c1aa08e0f83144dd42b8e9dfef660b7a35f1e803", "repo": "sentry", "path": "tests/sentry/snuba/test_tasks.py", "file_name": "test_tasks.py", "fun_name": "test_user_query_transactions", "commit_message": "feat(mep): Add `build_snql_query` method to replace `build_snuba_filter` (#35537)\n\nThis adds in `build_snql_query`, which is intended to replace `build_snuba_filter` everywhere.\r\nInitially, I'm just implementing this in the error and transaction entities. I'll follow up with\r\nsessions and metrics in a later pr.\r\n\r\nThis function uses `QueryBuilder` to build the snql queries that we'll use to create alert rules.\r\nCurrently, `QueryBuilder` requires that all queries have a start/end passed since those are required\r\nfor all on demand queries that users make. Alert rules operate differently - we explicitly can't\r\npass a start/end, since these alerts operate on a time window, and snuba adds in the time component\r\nevery time these subscriptions run. To support this, I've added the ability to skip start/end checks\r\nin `QueryBuilder`.\r\n\r\nFor testing, I just copied `BuildSnubaFilterTest` and converted it to check snql queries instead.\r\nSince `build_snuba_filter` will go away soon I'm not concerned about duplication here. A bunch of\r\ntests here are still commented out - these are all session/metric related. I'll fix those tests in\r\na follow up pr as well.", "code": "def test_user_query_transactions(self):\n expected_conditions = [\n Condition(Column(\"user\"), Op.EQ, \"anengineer@work.io\"),\n Condition(Column(\"project_id\"), Op.IN, (self.project.id,)),\n ]\n self.run_test(\n QueryDatasets.TRANSACTIONS,\n \"p95()\",\n \"user:anengineer@work.io\",\n expected_conditions,\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 111, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 11, "token_counts": 59, "n_ast_nodes": 94, "n_identifiers": 13, "random_cut": "def test_user_query_transactions(self):\n expected_conditions = [\n Condition(Column(\"user\"), Op.EQ, \"anengineer@work.io\"),\n Condition(Column(\"project_id\"), Op.IN, (self.project.id,)),\n ]\n self.run_test(\n QueryDatasets.TRANSACTIONS,\n \"p95()\",\n \"user:anengineer@work.io\",\n " }, { "id": 47588, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/models/test_dag.py", "file_name": "test_dag.py", "fun_name": "_make_test_subdag", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def _make_test_subdag(self, session):\n dag_id = 'test_subdag'\n self._clean_up(dag_id)\n task_id = 't1'\n dag = DAG(dag_id, start_date=DEFAULT_DATE, max_active_runs=1)\n t_1 = EmptyOperator(task_id=task_id, dag=dag)\n subdag = DAG(dag_id + '.test', start_date=DEFAULT_DATE, max_active_runs=1)\n SubDagOperator(task_id='test', subdag=subdag, dag=dag)\n t_2 = EmptyOperator(task_id='task', dag=subdag)\n subdag.parent_dag = dag\n\n dag.sync_to_db()\n\n session = settings.Session()\n dag.create_dagrun(\n run_type=DagRunType.MANUAL,\n state=State.FAILED,\n start_date=DEFAULT_DATE,\n execution_date=DEFAULT_DATE,\n session=session,\n )\n subdag.create_dagrun(\n run_type=DagRunType.MANUAL,\n state=State.FAILED,\n start_date=DEFAULT_DATE,\n execution_date=DEFAULT_DATE,\n session=session,\n )\n task_instance_1 = TI(t_1, execution_date=DEFAULT_DATE, state=State.RUNNING)\n task_instance_2 = TI(t_2, execution_date=DEFAULT_DATE, state=State.RUNNING)\n session.merge(task_instance_1)\n session.merge(task_instance_2)\n\n return dag, subdag\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 318, "n_words": 69, "vocab_size": 45, "complexity": 1, "nloc": 31, "token_counts": 210, "n_ast_nodes": 321, "n_identifiers": 33, "random_cut": "def _make_test_subdag(self, session):\n dag_id = 'test_subdag'\n self._clean_up(dag_id)\n task_id = 't1'\n dag = DAG(dag_id, start_date=DEFA" }, { "id": 185846, "commit_id": "df37a9b90a52de91643ea4dc01b21f32dbeca718", "repo": "textual", "path": "tests/test_widget.py", "file_name": "test_widget.py", "fun_name": "compose", "commit_message": "Add get_child_by_id and get_widget_by_id (#1146)\n\n* Add get_child_by_id and get_widget_by_id\r\n\r\n* Remove redundant code\r\n\r\n* Add unit tests for app-level get_child_by_id and get_widget_by_id\r\n\r\n* Remove redundant test fixture injection\r\n\r\n* Update CHANGELOG\r\n\r\n* Enforce uniqueness of ID amongst widget children\r\n\r\n* Enforce unique widget IDs amongst widgets mounted together\r\n\r\n* Update CHANGELOG.md\r\n\r\n* Ensuring unique IDs in a more logical place\r\n\r\n* Add docstring to NodeList._get_by_id\r\n\r\n* Dont use duplicate IDs in tests, dont mount 2000 widgets\r\n\r\n* Mounting less widgets in a unit test\r\n\r\n* Reword error message\r\n\r\n* Use lower-level depth first search in get_widget_by_id to break out early", "code": "def compose(self) -> ComposeResult:\n grandchild1 = Widget(id=\"grandchild1\")\n child1 = Widget(grandchild1, id=\"child1\")\n child2 = Widget(id=\"child2\")\n\n yield Widget(\n child1,\n child2,\n id=\"parent\",\n )\n\n\n@pytest.fixture", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 87, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 45, "n_ast_nodes": 85, "n_identifiers": 10, "random_cut": "def compose(self) -> ComposeResult:\n grandchild1 = Widget(id=\"grandchild1\")\n child1 = Widget(grandchild1, id=\"child1\")\n child2 = Widget(id=\"child2\")\n\n yield Widget(" }, { "id": 195659, "commit_id": "415a6c8615125c6128155633d4527e332a68347b", "repo": "vnpy", "path": "setup.py", "file_name": "setup.py", "fun_name": "get_install_requires", "commit_message": "[Mod] update requirements.txt for Python 3.10", "code": "def get_install_requires():\n install_requires = [\n \"tzlocal>=2.0.0\",\n \"PyQt5>=5.15.6\",\n \"pyqtgraph>=0.12.3\",\n \"qdarkstyle>=3.0.3\",\n \"numpy>=1.22.1\",\n \"pandas>=1.4.0\",\n \"matplotlib>=3.5.1\",\n \"seaborn>=0.11.2\",\n \"ta-lib>=0.4.24\",\n \"deap>=1.3.1\",\n \"pyzmq>=22.3.0\",\n \"QScintilla>=2.13.1\",\n \"plotly>=5.5.0\",\n ]\n\n return install_requires\n\n", "url": "https://github.com/vnpy/vnpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 120, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 17, "token_counts": 36, "n_ast_nodes": 70, "n_identifiers": 2, "random_cut": "def get_install_requires():\n install_requires = [\n \"tzlocal>=2.0.0\",\n \"PyQt5>=5.15.6\",\n \"pyqtgraph>=0.12.3\",\n \"qdarkstyle>=3.0.3\",\n \"numpy>=1.22.1\",\n \"pandas>=1.4.0\",\n \"matplotlib>=3.5.1\",\n \"seaborn>=0.11.2\",\n \"ta-lib>=0.4.24\",\n \"deap>=1.3.1\",\n \"pyzmq>=22.3.0\",\n \"QScintilla>=2.13.1\",\n \"plotly>=5.5.0\",\n ]\n\n return install_requi" }, { "id": 218485, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "_parse_octet", "commit_message": "add python 3.10.4 for windows", "code": "def _parse_octet(cls, octet_str):\n \n if not octet_str:\n raise ValueError(\"Empty octet not permitted\")\n # Reject non-ASCII digits.\n if not (octet_str.isascii() and octet_str.isdigit()):\n msg = \"Only decimal digits permitted in %r\"\n raise ValueError(msg % octet_str)\n # We do the length check second, since the invalid character error\n # is likely to be more informative for the user\n if len(octet_str) > 3:\n msg = \"At most 3 characters permitted in %r\"\n raise ValueError(msg % octet_str)\n # Handle leading zeros as strict as glibc's inet_pton()\n # See security bug bpo-36384\n if octet_str != '0' and octet_str[0] == '0':\n msg = \"Leading zeros are not permitted in %r\"\n raise ValueError(msg % octet_str)\n # Convert to integer (we know digits are legal)\n octet_int = int(octet_str, 10)\n if octet_int > 255:\n raise ValueError(\"Octet %d (> 255) not permitted\" % octet_int)\n return octet_int\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 320, "n_words": 134, "vocab_size": 88, "complexity": 8, "nloc": 16, "token_counts": 105, "n_ast_nodes": 190, "n_identifiers": 10, "random_cut": "def _parse_octet(cls, octet_str):\n \n if not octet_str:\n raise ValueError(\"Empty octet not permitted\")\n # Reject non-ASCII digits.\n if not (octet_str.isascii() and octet_str.isdigit()):\n msg = \"Only decimal digits permitted in %r\"\n raise ValueError(msg % octet_str)\n # We do the length check second, since the invalid character error\n # is likely to be more informative for the user\n if len(octet_str) > 3:\n msg = \"At most 3 characters permitted in %r\"\n raise ValueError(msg % octet_str)\n # Handle leading zeros as strict as glibc's inet_pton()\n # See security bug bpo-36384\n if octet_str != '0' and octet_str[0] == '0':\n msg = \"Leading zeros are not permitted in %r\"\n raise ValueError(msg % o" }, { "id": 212517, "commit_id": "528d85e642340ef30ec91f30b65c7c43370f648d", "repo": "bokeh", "path": "bokeh/server/django/routing.py", "file_name": "routing.py", "fun_name": "get_http_urlpatterns", "commit_message": "Normalize built-in types and remove `Unknown` (#12252)\n\n* Use lower case names for built-in types\r\n\r\nAlso incidentally apply TypeAlias marker.\r\n\r\n* Drop `Unknown` in favour of consistent usage of `Any`\r\n\r\n* Enable lazy annotations in conftest.py", "code": "def get_http_urlpatterns(self) -> list[URLPattern]:\n return self._http_urlpatterns + [url(r\"\", AsgiHandler)]\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def get_http_urlpatterns(self) -> list[URLPattern]:\n return self." }, { "id": 168632, "commit_id": "252ae0555abf488522f947107dcdee684be6ac8a", "repo": "pandas", "path": "pandas/tests/indexes/interval/test_setops.py", "file_name": "test_setops.py", "fun_name": "test_symmetric_difference", "commit_message": "Revert Interval/IntervalIndex/interval_range.inclusive deprecation (#48116)\n\n* Revert \"Cln tests interval wrt inclusive (#47775)\"\r\n\r\nThis reverts commit 2d6e0b251955d3a2c0c88f7e6ddb57b335ed09b7.\r\n\r\n* Revert \"CLN: Rename private variables to inclusive (#47655)\"\r\n\r\nThis reverts commit 102b3ca2119df822e2b0f346fa936d0fe9f17501.\r\n\r\n* Revert \"TYP: Improve typing interval inclusive (#47646)\"\r\n\r\nThis reverts commit 55064763e8ba55f6ff5370a8dd083767a189d7a4.\r\n\r\n* Revert \"DEPR: Deprecate set_closed and add set_incluive (#47636)\"\r\n\r\nThis reverts commit bd4ff395cbbf4cbde1fc8f1f746cae064a401638.\r\n\r\n* Revert \"DEPR: Remove deprecation from private class IntervalTree (#47637)\"\r\n\r\nThis reverts commit f6658ef9fdef5972214fdc338e2c6b5ee308dbf4.\r\n\r\n* Revert \"Revert inclusive default change of IntervalDtype (#47367)\"\r\n\r\nThis reverts commit d9dd1289e07d86928d144e53beb3d5b8ab3c2215.\r\n\r\n* Revert \"ENH: consistency of input args for boundaries - Interval (#46522)\"\r\n\r\nThis reverts commit 7e23a37e1c5bda81234801a6584563e2880769eb.\r\n\r\n* Revert \"ENH: consistency of input args for boundaries - pd.interval_range (#46355)\"\r\n\r\nThis reverts commit 073b3535d7a5171102e5915c38b57c21d13795ae.\r\n\r\n* Fix ArrowIntervalType manually\r\n\r\n* Remove unused import\r\n\r\n* Fix doctest and leftover usage\r\n\r\n* Fix remaining tests\r\n\r\n* Fix wording in doctoring\r\n\r\nCo-authored-by: Patrick Hoefler <61934744+phofl@users.noreply.github.com>", "code": "def test_symmetric_difference(self, closed, sort):\n index = monotonic_index(0, 11, closed=closed)\n result = index[1:].symmetric_difference(index[:-1], sort=sort)\n expected = IntervalIndex([index[0], index[-1]])\n if sort is None:\n tm.assert_index_equal(result, expected)\n assert tm.equalContents(result, expected)\n\n # GH 19101: empty result, same dtype\n result = index.symmetric_difference(index, sort=sort)\n expected = empty_index(dtype=\"int64\", closed=closed)\n if sort is None:\n tm.assert_index_equal(result, expected)\n assert tm.equalContents(result, expected)\n\n # GH 19101: empty result, different dtypes\n other = IntervalIndex.from_arrays(\n index.left.astype(\"float64\"), index.right, closed=closed\n )\n result = index.symmetric_difference(other, sort=sort)\n expected = empty_index(dtype=\"float64\", closed=closed)\n tm.assert_index_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 218, "n_words": 74, "vocab_size": 42, "complexity": 3, "nloc": 18, "token_counts": 182, "n_ast_nodes": 279, "n_identifiers": 20, "random_cut": "def test_symmetric_difference(self, closed, sort):\n index = monotonic_index(0, 11, closed=closed)\n result = index[1:].symmetric_difference(index[:-1], sort=sort)\n expected = IntervalIndex([index[0], index[-1]])\n if sort is None:\n tm.assert_index" }, { "id": 225368, "commit_id": "ff8552a57abf2c32f2d0344ef12707b88e008493", "repo": "mkdocs", "path": "mkdocs/tests/config/config_options_tests.py", "file_name": "config_options_tests.py", "fun_name": "test_provided_empty", "commit_message": "Add tests for new class-based configs\n\nThe old-style tests are intentionally kept at config_options_legacy_tests.py", "code": "def test_provided_empty(self) -> None:\n conf = self.get_config(self.Schema, {'option': []})\n self.assertEqual(conf.option, None)\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 24, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 34, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def test_provided_empty(self) -> None:\n conf = self.get_config(self.Schema, {'option': []})\n self.assertEqual(conf.option, None)\n" }, { "id": 46505, "commit_id": "85871eba420f3324432f55f74fe57005ff47a21c", "repo": "airflow", "path": "airflow/models/dag.py", "file_name": "dag.py", "fun_name": "_time_restriction", "commit_message": "Fix entire DAG stops when one task has end_date (#20920)\n\nrelated #19917 , #20471", "code": "def _time_restriction(self) -> TimeRestriction:\n start_dates = [t.start_date for t in self.tasks if t.start_date]\n if self.start_date is not None:\n start_dates.append(self.start_date)\n earliest = None\n if start_dates:\n earliest = timezone.coerce_datetime(min(start_dates))\n latest = self.end_date\n end_dates = [t.end_date for t in self.tasks if t.end_date]\n if len(end_dates) == len(self.tasks): # not exists null end_date\n if self.end_date is not None:\n end_dates.append(self.end_date)\n if end_dates:\n latest = timezone.coerce_datetime(max(end_dates))\n return TimeRestriction(earliest, latest, self.catchup)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 193, "n_words": 63, "vocab_size": 41, "complexity": 10, "nloc": 15, "token_counts": 133, "n_ast_nodes": 208, "n_identifiers": 18, "random_cut": "def _time_restriction(self) -> TimeRestriction:\n start_dates = [t.start_date for t in self.tasks if t.start_date]\n if self.start_date is not None:\n start_dates.append(self.start_date)\n earliest = None\n if start_dates:\n earliest = timezone.coerce_datetime(min(start_dates))\n latest = self.end_date\n end_dates = [t.end_date for t in self.tasks if t.end_date]\n if len(end_dates) == len(self.tasks): # not exists null end_date\n if self.end_d" }, { "id": 245471, "commit_id": "5620fef4ad50ec4f82d6e553e7a4851495893c4b", "repo": "mmdetection", "path": "tests/test_visualization/test_local_visualizer.py", "file_name": "test_local_visualizer.py", "fun_name": "test_add_datasample", "commit_message": "Support panoptic_seg visualization (#8399)\n\n* Support panoptic_seg visualization\r\n\r\n* add comment\r\n\r\n* support obtain image\r\n\r\n* update", "code": "def test_add_datasample(self):\n h = 12\n w = 10\n num_class = 3\n num_bboxes = 5\n out_file = 'out_file.jpg'\n\n image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')\n\n # test gt_instances\n gt_instances = InstanceData()\n gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)\n gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))\n gt_det_data_sample = DetDataSample()\n gt_det_data_sample.gt_instances = gt_instances\n #\n det_local_visualizer = DetLocalVisualizer()\n det_local_visualizer.add_datasample('image', image, gt_det_data_sample)\n\n # test out_file\n det_local_visualizer.add_datasample(\n 'image', image, gt_det_data_sample, out_file=out_file)\n assert os.path.exists(out_file)\n drawn_img = cv2.imread(out_file)\n assert drawn_img.shape == (h, w, 3)\n os.remove(out_file)\n\n # test gt_instances and pred_instances\n pred_instances = InstanceData()\n pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)\n pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))\n pred_instances.scores = torch.rand((num_bboxes, ))\n pred_det_data_sample = DetDataSample()\n pred_det_data_sample.pred_instances = pred_instances\n\n det_local_visualizer.add_datasample(\n 'image',\n image,\n gt_det_data_sample,\n pred_det_data_sample,\n out_file=out_file)\n self._assert_image_and_shape(out_file, (h, w * 2, 3))\n\n det_local_visualizer.add_datasample(\n 'image',\n image,\n gt_det_data_sample,\n pred_det_data_sample,\n draw_gt=False,\n out_file=out_file)\n self._assert_image_and_shape(out_file, (h, w, 3))\n\n det_local_visualizer.add_datasample(\n 'image',\n image,\n gt_det_data_sample,\n pred_det_data_sample,\n draw_pred=False,\n out_file=out_file)\n self._assert_image_and_shape(out_file, (h, w, 3))\n\n # test gt_panoptic_seg and pred_panoptic_seg\n det_local_visualizer.dataset_meta = dict(CLASSES=('1', '2'))\n gt_sem_seg = _create_panoptic_data(num_bboxes, h, w)\n panoptic_seg = PixelData(sem_seg=gt_sem_seg)\n gt_det_data_sample = DetDataSample()\n gt_det_data_sample.gt_panoptic_seg = panoptic_seg\n\n pred_sem_seg = _create_panoptic_data(num_bboxes, h, w)\n panoptic_seg = PixelData(sem_seg=pred_sem_seg)\n pred_det_data_sample = DetDataSample()\n pred_det_data_sample.pred_panoptic_seg = panoptic_seg\n det_local_visualizer.add_datasample(\n 'image',\n image,\n gt_det_data_sample,\n pred_det_data_sample,\n out_file=out_file)\n self._assert_image_and_shape(out_file, (h, w * 2, 3))\n\n # class information must be provided\n det_local_visualizer.dataset_meta = {}\n with self.assertRaises(AssertionError):\n det_local_visualizer.add_datasample(\n 'image',\n image,\n gt_det_data_sample,\n pred_det_data_sample,\n out_file=out_file)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 884, "n_words": 203, "vocab_size": 89, "complexity": 1, "nloc": 73, "token_counts": 444, "n_ast_nodes": 680, "n_identifiers": 52, "random_cut": "def test_add_datasample(self):\n h = 12\n w = 10\n num_class = 3\n num_bboxes = 5\n out_file = 'out_file.jpg'\n\n image = np.random.randint(0, 256, size=(h, w, 3)).astype('uint8')\n\n # test gt_instances\n gt_instances = InstanceData()\n gt_instances.bboxes = _rand_bboxes(num_bboxes, h, w)\n gt_instances.labels = torch.randint(0, num_class, (num_bboxes, ))\n gt_det_data_sample = DetDataSample()\n gt_det_data_sample.gt_instances = gt_instances\n #\n det_local_visualizer = DetLocalVisualizer()\n det_local_visualizer.add_datasample('image', image, gt_det_data_sample)\n\n # test out_file\n det_local_visualizer.add_datasample(\n 'image', image, gt_det_data_sample, out_file=out_file)\n assert os.path.exists(out_file)\n drawn_img = cv2.imread(out_file)\n assert drawn_img.shape == (h, w, 3)\n os.remove(out_file)\n\n # test gt_instances and pred_instances\n pred_instances = InstanceData()\n pred_instances.bboxes = _rand_bboxes(num_bboxes, h, w)\n pred_instances.labels = torch.randint(0, num_class, (num_bboxes, ))\n pred_instances.scores = torch.rand((num_bboxes, ))\n pred_det_data_sample = DetDataSample()\n pred_det_data_sample.pred_instances = pred_instances\n\n det_local_visualizer.add_datasample(\n " }, { "id": 139920, "commit_id": "f215c8c9887c5b0aa09ba36107391f706c8ddca8", "repo": "ray", "path": "python/ray/tune/tests/test_integration_wandb.py", "file_name": "test_integration_wandb.py", "fun_name": "testWandbDecoratorConfig", "commit_message": "[tune] Move wandb logging directory into trial logdir (#25020)\n\nWeights and biases creates a wandb directory to collect intermediate logs and artifacts before uploading them. This directory should be in the respective trial directories. This also means we can re-enable auto resuming.", "code": "def testWandbDecoratorConfig(self):\n config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n config,\n 0,\n \"trial_0\",\n \"trainable\",\n PlacementGroupFactory([{\"CPU\": 1}]),\n \"/tmp\",\n )\n trial_info = TrialInfo(trial)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 115, "n_words": 22, "vocab_size": 20, "complexity": 2, "nloc": 42, "token_counts": 336, "n_ast_nodes": 82, "n_identifiers": 8, "random_cut": "def testWandbDecoratorConfig(self):\n config = {\"par1\": 4, \"par2\": 9.12345678}\n trial = Trial(\n config,\n 0,\n \"trial_0\",\n \"trainable\",\n P" }, { "id": 188440, "commit_id": "03afa4f9743fb8e6892be62a44b19dc48e0ed7f0", "repo": "jumpserver", "path": "apps/authentication/backends/base.py", "file_name": "base.py", "fun_name": "user_can_authenticate", "commit_message": "Fix rbac (#7713)\n\n* fix: token 系统用户增加 protocol\r\n\r\n* fix: 修复清除orphan session时同时清除对应的 session_task\r\n\r\n* perf: 修改 connection token api\r\n\r\n* fix: 修复无法获取系统角色绑定的问题\r\n\r\n* perf: 增加 db terminal 及 magnus 组件\r\n\r\n* perf: 修改 migrations\r\n\r\n* fix: 修复AUTHENTICATION_BACKENDS相关的逻辑\r\n\r\n* fix: 修改判断backend认证逻辑\r\n\r\n* fix: 修复资产账号查看密码跳过mfa\r\n\r\n* fix: 修复用户组授权权限错误\r\n\r\n* feat: 支持COS对象存储\r\n\r\n* feat: 升级依赖 jms_storage==0.0.42\r\n\r\n* fix: 修复 koko api 问题\r\n\r\n* feat: 修改存储翻译信息\r\n\r\n* perf: 修改 ticket 权限\r\n\r\n* fix: 修复获取资产授权系统用户 get_queryset\r\n\r\n* perf: 抽取 ticket\r\n\r\n* perf: 修改 cmd filter 的权限\r\n\r\n* fix: 修改 ticket perm\r\n\r\n* fix: 修复oidc依赖问题\r\n\r\nCo-authored-by: Eric \r\nCo-authored-by: ibuler \r\nCo-authored-by: 小冯 \r\nCo-authored-by: feng626 <1304903146@qq.com>", "code": "def user_can_authenticate(self, user):\n \n is_valid = getattr(user, 'is_valid', None)\n return is_valid or is_valid is None\n\n # allow user to authenticate", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 43, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 3, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 5, "random_cut": "def user_can_authenticate(self, user):\n \n is_valid = getattr(user, 'is_valid', None)\n return is_v" }, { "id": 199391, "commit_id": "5b90f7d36b8291b61391d457bfad94648af8afe3", "repo": "sympy", "path": "sympy/physics/mechanics/tests/test_jointsmethod.py", "file_name": "test_jointsmethod.py", "fun_name": "test_chaos_pendulum", "commit_message": "Deprecate parent_joint_pos and child_joint_pos", "code": "def test_chaos_pendulum():\n #https://www.pydy.org/examples/chaos_pendulum.html\n mA, mB, lA, lB, IAxx, IBxx, IByy, IBzz, g = symbols('mA, mB, lA, lB, IAxx, IBxx, IByy, IBzz, g')\n theta, phi, omega, alpha = dynamicsymbols('theta phi omega alpha')\n\n A = ReferenceFrame('A')\n B = ReferenceFrame('B')\n\n rod = Body('rod', mass=mA, frame=A, central_inertia=inertia(A, IAxx, IAxx, 0))\n plate = Body('plate', mass=mB, frame=B, central_inertia=inertia(B, IBxx, IByy, IBzz))\n C = Body('C')\n with ignore_warnings(SymPyDeprecationWarning):\n J1 = PinJoint('J1', C, rod, coordinates=theta, speeds=omega,\n child_point=-lA * rod.z, parent_axis=C.y,\n child_axis=rod.y)\n J2 = PinJoint('J2', rod, plate, coordinates=phi, speeds=alpha,\n parent_point=(lB - lA) * rod.z, parent_axis=rod.z,\n child_axis=plate.z)\n\n rod.apply_force(mA*g*C.z)\n plate.apply_force(mB*g*C.z)\n\n method = JointsMethod(C, J1, J2)\n method.form_eoms()\n\n MM = method.mass_matrix\n forcing = method.forcing\n rhs = MM.LUsolve(forcing)\n xd = (-2 * IBxx * alpha * omega * sin(phi) * cos(phi) + 2 * IByy * alpha * omega * sin(phi) *\n cos(phi) - g * lA * mA * sin(theta) - g * lB * mB * sin(theta)) / (IAxx + IBxx *\n sin(phi)**2 + IByy * cos(phi)**2 + lA**2 * mA + lB**2 * mB)\n assert (rhs[0] - xd).simplify() == 0\n xd = (IBxx - IByy) * omega**2 * sin(phi) * cos(phi) / IBzz\n assert (rhs[1] - xd).simplify() == 0\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 370, "n_words": 187, "vocab_size": 108, "complexity": 1, "nloc": 28, "token_counts": 403, "n_ast_nodes": 616, "n_identifiers": 53, "random_cut": "def test_chaos_pendulum():\n #https://www.pydy.org/examples/chaos_pendulum.html\n mA, mB, lA, lB, IAxx, IBxx, " }, { "id": 216519, "commit_id": "00ee5eed1d75417faaaa185e27947b268239698e", "repo": "salt", "path": "tests/integration/states/test_pkgrepo.py", "file_name": "test_pkgrepo.py", "fun_name": "test_pkgrepo_05_copr_with_comments", "commit_message": "various changes and fixes needed to add PhotonOS into CICD.", "code": "def test_pkgrepo_05_copr_with_comments(self, grains):\n \n kwargs = {}\n if grains[\"os_family\"] == \"RedHat\":\n if (\n grains[\"osfinger\"] == \"CentOS Linux-7\"\n or grains[\"osfinger\"] == \"Amazon Linux-2\"\n or grains[\"os\"] == \"VMware Photon OS\"\n ):\n self.skipTest(\"copr plugin not installed on Centos 7 CI\")\n kwargs = {\n \"name\": \"hello-copr\",\n \"copr\": \"mymindstorm/hello\",\n \"enabled\": False,\n \"comments\": [\"This is a comment\"],\n }\n else:\n self.skipTest(\n \"{}/{} test case needed\".format(grains[\"os_family\"], grains[\"os\"])\n )\n\n try:\n # Run the state to add the repo\n ret = self.run_state(\"pkgrepo.managed\", **kwargs)\n self.assertSaltTrueReturn(ret)\n\n # Run again with modified comments\n kwargs[\"comments\"].append(\"This is another comment\")\n ret = self.run_state(\"pkgrepo.managed\", **kwargs)\n self.assertSaltTrueReturn(ret)\n ret = ret[next(iter(ret))]\n self.assertEqual(\n ret[\"changes\"],\n {\n \"comments\": {\n \"old\": [\"This is a comment\"],\n \"new\": [\"This is a comment\", \"This is another comment\"],\n }\n },\n )\n\n # Run a third time, no changes should be made\n ret = self.run_state(\"pkgrepo.managed\", **kwargs)\n self.assertSaltTrueReturn(ret)\n ret = ret[next(iter(ret))]\n self.assertFalse(ret[\"changes\"])\n self.assertEqual(\n ret[\"comment\"],\n \"Package repo '{}' already configured\".format(kwargs[\"name\"]),\n )\n finally:\n # Clean up\n self.run_state(\"pkgrepo.absent\", copr=kwargs[\"copr\"])\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 757, "n_words": 146, "vocab_size": 96, "complexity": 6, "nloc": 45, "token_counts": 254, "n_ast_nodes": 465, "n_identifiers": 15, "random_cut": "def test_pkgrepo_05_copr_with_comments(self, grains):\n \n kwargs = {}\n if grains[\"os_family\"] == \"RedHat\":\n if (\n grains[\"osfinger\"] == \"CentOS Linux-7\"\n or grains[\"osfinger\"] == \"Amazon Linux-2\"\n or grains[\"os\"] == \"VMware Photon OS\"\n ):\n self.skipTest(\"copr plugin not installed on Centos 7 CI\")\n " }, { "id": 132299, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/suggest/__init__.py", "file_name": "__init__.py", "fun_name": "_import_hebo_search", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _import_hebo_search():\n from ray.tune.suggest.hebo import HEBOSearch\n\n return HEBOSearch\n\n\nSEARCH_ALG_IMPORT = {\n \"variant_generator\": _import_variant_generator,\n \"random\": _import_variant_generator,\n \"ax\": _import_ax_search,\n \"dragonfly\": _import_dragonfly_search,\n \"skopt\": _import_skopt_search,\n \"hyperopt\": _import_hyperopt_search,\n \"bayesopt\": _import_bayesopt_search,\n \"bohb\": _import_bohb_search,\n \"nevergrad\": _import_nevergrad_search,\n \"optuna\": _import_optuna_search,\n \"zoopt\": _import_zoopt_search,\n \"sigopt\": _import_sigopt_search,\n \"hebo\": _import_hebo_search,\n \"blendsearch\": _import_blendsearch_search,\n \"cfo\": _import_cfo_search,\n}\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 90, "n_words": 42, "vocab_size": 40, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 136, "n_identifiers": 20, "random_cut": "def _import_hebo_search():\n from ray.tune.suggest.hebo import HEBOSearch\n\n return HEBOSearch\n\n\nSEARCH_ALG_IMPORT = {\n \"variant_generator\": _import_variant_generator,\n \"random\": _import_variant_generator,\n \"ax\": _import_ax_search,\n \"dragonfly\": _import_dragonfly_search,\n \"skopt\": _import_skopt_search,\n \"hyperopt\": _import_hyperopt_search,\n \"bayesopt\": _import_bayesopt_search,\n \"bohb\": _import_bohb_search,\n \"nevergrad\": _import_nevergrad_search,\n \"optuna\": _import_optuna_search,\n \"zoopt\": _import_zoopt_search,\n \"sigopt\": _import_sigopt_search,\n \"hebo\": _import_hebo_search,\n \"blendsearch\": _import_blendsearch_search,\n \"cfo\": _import_cfo_s" }, { "id": 213714, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy_tests/test_core/test_container.py", "file_name": "test_container.py", "fun_name": "test_container_find_sub_structure", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def test_container_find_sub_structure(dev, call):\n dict_in = {'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}\n top_cont = Container(dict_in)\n\n # full\n sub_cont = Container({'c': ivy.array([4], dev=dev), 'd': ivy.array([5], dev=dev)})\n assert not top_cont.find_sub_container(sub_cont)\n found_kc = top_cont.find_sub_structure(sub_cont)\n assert found_kc == 'b'\n found_kc = top_cont.find_sub_structure(top_cont)\n assert found_kc == ''\n\n # partial\n partial_sub_cont = Container({'d': ivy.array([5], dev=dev)})\n found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True)\n assert found_kc == 'b'\n partial_sub_cont = Container({'b': {'d': ivy.array([5], dev=dev)}})\n found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True)\n assert found_kc == ''\n\n", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 135, "n_words": 74, "vocab_size": 40, "complexity": 1, "nloc": 16, "token_counts": 211, "n_ast_nodes": 346, "n_identifiers": 14, "random_cut": "def test_container_find_sub_structure(dev, call):\n dict_in = {'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}\n top_cont = Container(dict_in)\n\n # full\n sub_cont = Container({'c': ivy.array([4], dev=dev), 'd': ivy.array([5], dev=dev)})\n assert not top_cont.find_sub_container(sub_cont)\n found_kc = top_cont.find_sub_structure(sub_cont)\n assert found_kc == 'b'\n found_kc = top_cont.find_sub_structure(top_cont)\n assert found_kc == ''\n\n # partial\n partial_sub_cont = Container({'d': ivy.array([5], dev=dev)})\n found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True)\n assert found_kc == 'b'\n partial_sub_cont = Container({'b" }, { "id": 85952, "commit_id": "0e19363d7def84ab448bc45755d9fe4d7d336fd2", "repo": "sentry", "path": "tests/acceptance/test_replay_detail.py", "file_name": "test_replay_detail.py", "fun_name": "test_simple", "commit_message": "test(replay): Create Acceptance tests for Replay Details and List pages (#38724)\n\nCan run tests individually with ` pytest tests/acceptance/test_replay_detail.py --no-headless=true`\r\n\r\nMore Testing Tips: https://develop.sentry.dev/testing/#acceptance-tests", "code": "def test_simple(self):\n with self.feature(FEATURE_NAME):\n self.browser.get(self.path)\n self.browser.wait_until_not('[data-test-id=\"loading-indicator\"]')\n self.browser.wait_until_not('[data-test-id=\"loading-placeholder\"]')\n self.browser.snapshot(\"replay detail\")\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 47, "n_ast_nodes": 85, "n_identifiers": 9, "random_cut": "def test_simple(self):\n with self.feature(FEATURE_NAME):\n self.brows" }, { "id": 88823, "commit_id": "08e022578ad68856f3ae820c68d2b0f4d6dc4f74", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_group_events.py", "file_name": "test_group_events.py", "fun_name": "test_perf_issue", "commit_message": "feat(querybuilder): Use query builder in group events endpoint (#41276)\n\n- This adds a feature flag where the query buildere will be used so it\r\ncan be gradually rolled out\r\n- this is the last place that get_filter is being used, if this change\r\nis successful the entire get_filter function and all it depends on can\r\nbe deleted", "code": "def test_perf_issue(self):\n event_data = load_data(\n \"transaction\",\n fingerprint=[f\"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1\"],\n )\n event_1 = self.store_event(data=event_data, project_id=self.project.id)\n event_2 = self.store_event(data=event_data, project_id=self.project.id)\n\n self.login_as(user=self.user)\n\n url = f\"/api/0/issues/{event_1.groups[0].id}/events/\"\n response = self.do_request(url)\n\n assert response.status_code == 200, response.content\n assert sorted(map(lambda x: x[\"eventID\"], response.data)) == sorted(\n [str(event_1.event_id), str(event_2.event_id)]\n )\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 140, "n_words": 38, "vocab_size": 29, "complexity": 1, "nloc": 14, "token_counts": 124, "n_ast_nodes": 218, "n_identifiers": 28, "random_cut": "def test_perf_issue(self):\n event_data = load_data(\n \"transaction\",\n fingerprint=[f\"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1\"],\n )\n event_1 = self.store_event(data=event_data, project_id=self.project.id)\n event_2 = self.store_event(data=event_data, project_id=self.project.id)\n\n self.login_as(user=self.user)\n\n " }, { "id": 137349, "commit_id": "ca3d89139afb887a01948106c2bceb7f02a944c0", "repo": "ray", "path": "rllib/core/optim/tests/test_rl_optimizer.py", "file_name": "test_rl_optimizer.py", "fun_name": "input_specs_train", "commit_message": "[RLlib] New `RLOptimizer` API for local (torch+tf) optimizers and losses. Used in combination with RLModules. Initial PR. (#29737)", "code": "def input_specs_train(self) -> ModelSpec:\n return ModelSpec(\n dict(self._default_inputs(), **{\"actions\": TorchTensorSpec(\"b\")}),\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 34, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 6, "random_cut": "def input_specs_train(self) -> ModelSpec:\n return ModelSpec(\n dict(self._default_inputs(), *" }, { "id": 32960, "commit_id": "a5ca56ff158075351149220319c14dde555a86f5", "repo": "transformers", "path": "tests/mixed_int8/test_mixed_int8.py", "file_name": "test_mixed_int8.py", "fun_name": "tearDown", "commit_message": "Supporting seq2seq models for `bitsandbytes` integration (#18579)\n\n* Supporting seq2seq models for `bitsandbytes` integration\r\n\r\n- `bitsandbytes` integration supports now seq2seq models\r\n- check if a model has tied weights as an additional check\r\n\r\n* small modification\r\n\r\n- tie the weights before looking at tied weights!", "code": "def tearDown(self):\n r\n del self.base_model\n del self.sequence_model\n del self.model_8bit\n del self.seq_to_seq_model\n\n gc.collect()\n torch.cuda.empty_cache()\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 61, "n_words": 13, "vocab_size": 10, "complexity": 1, "nloc": 11, "token_counts": 35, "n_ast_nodes": 58, "n_identifiers": 11, "random_cut": "def tearDown(self):\n r\n del self.base_model\n del self.sequence_model\n del self.model_8bit\n del self.se" }, { "id": 295405, "commit_id": "ea148a1b8ea611b07b606a4bfef44f66db8b2582", "repo": "core", "path": "homeassistant/components/light/__init__.py", "file_name": "__init__.py", "fun_name": "state_attributes", "commit_message": "Add EntityFeature enum to Light (#69103)\n\nCo-authored-by: Paulus Schoutsen ", "code": "def state_attributes(self):\n \n if not self.is_on:\n return None\n\n data = {}\n supported_features = self.supported_features\n color_mode = self._light_internal_color_mode\n\n if color_mode not in self._light_internal_supported_color_modes:\n # Increase severity to warning in 2021.6, reject in 2021.10\n _LOGGER.debug(\n \"%s: set to unsupported color_mode: %s, supported_color_modes: %s\",\n self.entity_id,\n color_mode,\n self._light_internal_supported_color_modes,\n )\n\n data[ATTR_COLOR_MODE] = color_mode\n\n if color_mode in COLOR_MODES_BRIGHTNESS:\n data[ATTR_BRIGHTNESS] = self.brightness\n elif supported_features & SUPPORT_BRIGHTNESS:\n # Backwards compatibility for ambiguous / incomplete states\n # Add warning in 2021.6, remove in 2021.10\n data[ATTR_BRIGHTNESS] = self.brightness\n\n if color_mode == COLOR_MODE_COLOR_TEMP:\n data[ATTR_COLOR_TEMP] = self.color_temp\n\n if color_mode in COLOR_MODES_COLOR or color_mode == COLOR_MODE_COLOR_TEMP:\n data.update(self._light_internal_convert_color(color_mode))\n\n if supported_features & SUPPORT_COLOR_TEMP and not self.supported_color_modes:\n # Backwards compatibility\n # Add warning in 2021.6, remove in 2021.10\n data[ATTR_COLOR_TEMP] = self.color_temp\n\n if supported_features & SUPPORT_WHITE_VALUE and not self.supported_color_modes:\n # Backwards compatibility\n # Add warning in 2021.6, remove in 2021.10\n data[ATTR_WHITE_VALUE] = self.white_value\n if self.hs_color is not None:\n data.update(self._light_internal_convert_color(COLOR_MODE_HS))\n\n if supported_features & LightEntityFeature.EFFECT:\n data[ATTR_EFFECT] = self.effect\n\n return {key: val for key, val in data.items() if val is not None}\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 539, "n_words": 161, "vocab_size": 77, "complexity": 16, "nloc": 31, "token_counts": 209, "n_ast_nodes": 334, "n_identifiers": 36, "random_cut": "def state_attributes(self):\n \n if not self.is_on:\n return None\n\n data = {}\n supported_features = self.supported_features\n color_mode = self._light_internal_color_mode\n\n if color_mode not in self._light_internal_supported_color_modes:\n # Increase severity to warning in 2021.6, reject in 2021.10\n _LOGGER.debug(\n \"%s: set to unsupported color_mode: %s, supported_color_modes: %s\",\n self.entity_id,\n color_mode,\n self._ligh" }, { "id": 262261, "commit_id": "00c7600103ee34ac50506af88f1b34b713f849e7", "repo": "TTS", "path": "TTS/tts/models/vits.py", "file_name": "vits.py", "fun_name": "spec_to_mel", "commit_message": "Update Vits model API", "code": "def spec_to_mel(spec, n_fft, num_mels, sample_rate, fmin, fmax):\n \n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sample_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)\n mel = torch.matmul(mel_basis[fmax_dtype_device], spec)\n mel = amp_to_db(mel)\n return mel\n\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 86, "n_words": 48, "vocab_size": 31, "complexity": 2, "nloc": 10, "token_counts": 112, "n_ast_nodes": 171, "n_identifiers": 20, "random_cut": "def spec_to_mel(spec, n_fft, num_mels, sample_rate, fmin, fmax):\n \n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sample_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)\n mel = torch.matmul(mel_basis[fmax_dtype_device], spec)\n mel = amp_to_db(mel)\n return mel\n\n" }, { "id": 166298, "commit_id": "4a072fa0d0d34e83a0d80b1080846bf708bd7177", "repo": "pandas", "path": "pandas/core/groupby/groupby.py", "file_name": "groupby.py", "fun_name": "_python_agg_general", "commit_message": "ENH: Add numeric_only to certain groupby ops (#46728)", "code": "def _python_agg_general(self, func, *args, raise_on_typeerror=False, **kwargs):\n func = com.is_builtin_func(func)\n f = lambda x: func(x, *args, **kwargs)\n\n # iterate through \"columns\" ex exclusions to populate output dict\n output: dict[base.OutputKey, ArrayLike] = {}\n\n if self.ngroups == 0:\n # agg_series below assumes ngroups > 0\n return self._python_apply_general(f, self._selected_obj, is_agg=True)\n\n for idx, obj in enumerate(self._iterate_slices()):\n name = obj.name\n\n try:\n # if this function is invalid for this dtype, we will ignore it.\n result = self.grouper.agg_series(obj, f)\n except TypeError:\n if raise_on_typeerror:\n raise\n warn_dropping_nuisance_columns_deprecated(type(self), \"agg\")\n continue\n\n key = base.OutputKey(label=name, position=idx)\n output[key] = result\n\n if not output:\n return self._python_apply_general(f, self._selected_obj)\n\n return self._wrap_aggregated_output(output)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 332, "n_words": 95, "vocab_size": 76, "complexity": 6, "nloc": 20, "token_counts": 167, "n_ast_nodes": 260, "n_identifiers": 34, "random_cut": "def _python_agg_general(self, func, *args, raise_on_typeerror=False, **kwargs):\n func = com.is_builtin_func(func)\n f = lambda x: func(x, *args, **kwargs)\n\n # iterate through \"columns\" ex exclusions to populate output dict\n output: dict[base.OutputKey, ArrayLike] = {}\n\n if self.ngroups == 0:\n # agg_series below assumes ngroups > 0\n return self._python_apply_general(f, self._selected_obj, is_agg=True)\n\n for idx, obj in enumerate(self._iterate_slices()):\n name = obj.name\n\n try:\n # if this function is invalid for this dtype, we will ignore it.\n result = self.grouper.agg_series(obj, f)\n except TypeError:\n if raise_on_typeerror:\n raise\n warn_dropping_nuisance_columns_deprecated(type(self), \"agg\")\n continue\n\n key = base.OutputKey(label=name, position=idx)\n " }, { "id": 268096, "commit_id": "85acf4d1e55e95c266a35c49f74af3c0f251de08", "repo": "ansible", "path": "test/lib/ansible_test/_internal/ci/local.py", "file_name": "local.py", "fun_name": "prepare_core_ci_auth", "commit_message": "ansible-test - Avoid use of deprecated type hints. (#78456)\n\n* ansible-test - Avoid use of deprecated type hints.\r\n\r\nPEP 585 deprecated many container types in the `typing` module in favor of the actual types, which support subscripting as of Python 3.9.\r\n\r\nConversion of `t.Type` was skipped since PyCharm does not currently recognize it.\r\n\r\n* ansible-test - Fix `t` and `c` imports/shadowing.", "code": "def prepare_core_ci_auth(self) -> dict[str, t.Any]:\n \n path = self._get_aci_key_path()\n auth_key = read_text_file(path).strip()\n\n request = dict(\n key=auth_key,\n nonce=None,\n )\n\n auth = dict(\n remote=request,\n )\n\n return auth\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 113, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 12, "token_counts": 56, "n_ast_nodes": 90, "n_identifiers": 16, "random_cut": "def prepare_core_ci_auth(self) -> dict[str, t.Any]:\n \n path =" }, { "id": 138787, "commit_id": "5c06e3f14900e3812061416759c25ff2b88c8a23", "repo": "ray", "path": "python/ray/experimental/dag/dag_node.py", "file_name": "dag_node.py", "fun_name": "_get_all_child_nodes", "commit_message": "[DAG] add basic plotting on Ray DAGs (#24223)\n\nTo add basic plotting feature for Ray DAGs. \r\n\r\n`ray.experimental.dag.plot(dag: DAGNode, to_file=None)`\r\n\r\n### Behavior\r\n1. dump the dag plot (Dot) to file.\r\n2. also render the image whenever possible. E.g. if running in Jupyter notebook, the image will not only be saved, but also rendered in the notebook.\r\n3. when to_file is not set (i.e. None), it will be saved to a tempfile for rendering purpose only. This is common when users plot DAGs in notebook env to explore the DAG structure without wanting to save it to a file.", "code": "def _get_all_child_nodes(self) -> List[\"DAGNode\"]:\n \n\n scanner = _PyObjScanner()\n # we use List instead of Set here, reason explained\n # in `_get_toplevel_child_nodes`.\n children = []\n for n in scanner.find_nodes(\n [\n self._bound_args,\n self._bound_kwargs,\n self._bound_other_args_to_resolve,\n ]\n ):\n if n not in children:\n children.append(n)\n return children\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 190, "n_words": 41, "vocab_size": 35, "complexity": 3, "nloc": 20, "token_counts": 57, "n_ast_nodes": 94, "n_identifiers": 12, "random_cut": "def _get_all_child_nodes(self) -> List[\"DAGNode\"]:\n \n\n scanner = _PyObjScanner()\n # we use List instead of Set here, reason explained\n # in `_get_toplevel_child_nodes`.\n children = []\n for n in scanner.find_nodes(\n [\n self._bound_args,\n self._bound_kwargs,\n self._bound_other_arg" }, { "id": 3523, "commit_id": "e05dfd1bcdf59f03992b8ff5ce938fdeb9403959", "repo": "airbyte", "path": "octavia-cli/octavia_cli/list/listings.py", "file_name": "listings.py", "fun_name": "__repr__", "commit_message": "🐙 octavia-cli: add command to list existing sources, destinations and connections (#9642)", "code": "def __repr__(self):\n items = [formatting.format_column_names(self.fields_to_display)] + self.get_listing()\n return formatting.display_as_table(items)\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 22, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "def __repr__(self):\n items = [formatting.format_column_names(self.fields_to_display)] + self.get_listing()\n return formatting.display_as_table(items)\n\n" }, { "id": 215839, "commit_id": "3c8a341d60559867e72ef944ba15d3531e6de383", "repo": "salt", "path": "tests/pytests/functional/modules/file/test_replace.py", "file_name": "test_replace.py", "fun_name": "test_no_backup", "commit_message": "Use str on pathlib paths", "code": "def test_no_backup(file, multiline_file):\n # Backup file should NOT be created\n bak_file = \"{}.bak\".format(multiline_file)\n assert \"Salticus\" not in multiline_file.read_text()\n file.replace(str(multiline_file), \"Etiam\", \"Salticus\", backup=False)\n assert \"Salticus\" in multiline_file.read_text()\n assert not os.path.exists(bak_file)\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 29, "vocab_size": 23, "complexity": 1, "nloc": 6, "token_counts": 59, "n_ast_nodes": 102, "n_identifiers": 12, "random_cut": "def test_no_backup(file, multiline_file):\n # Backup file shoul" }, { "id": 158027, "commit_id": "777611a5157e4f2d28598eb8d062f6c1bfd906bf", "repo": "d2l-zh", "path": "d2l/paddle.py", "file_name": "paddle.py", "fun_name": "__getitem__", "commit_message": "[Paddle]Add chapter_natural-language-processing-pretraining (#1177)\n\n* [PAddle]\r\n\r\n* add a fuction for chap14\r\n\r\n* add show_list_len_pair_hist function for every framework\r\n\r\n* fix the bug\r\n\r\n* change torch to paddle\r\n\r\n* Fix import issue and repeat_interleave issue\r\n\r\n* Update paddle.py\r\n\r\n* remove bert pretraining unused comments\r\n\r\n* restore train ch11 missing code\r\n\r\n* rerun subward embeddings\r\n\r\n* Update subword-embedding.md\r\n\r\n* Update bert.md\r\n\r\n* Update word2vec-pretraining.md\r\n\r\n* Update build.yml\r\n\r\nCo-authored-by: w5688414 ", "code": "def __getitem__(self, tokens):\n indices = [self.token_to_idx.get(token, self.unknown_idx)\n for token in tokens]\n vecs = self.idx_to_vec[d2l.tensor(indices)]\n return vecs\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 42, "n_ast_nodes": 63, "n_identifiers": 12, "random_cut": "def __getitem__(self, tokens):\n indices = [self.token_to_idx.get(token, self.unknown_idx)\n for token in tokens]\n vecs = self.idx_to_vec" }, { "id": 137476, "commit_id": "64d744b4750b749cede563b04c5d32396470a236", "repo": "ray", "path": "rllib/algorithms/registry.py", "file_name": "registry.py", "fun_name": "_import_a3c", "commit_message": "[RLlib] Deprecate (delete) `contrib` folder. (#30992)", "code": "def _import_a3c():\n import ray.rllib.algorithms.a3c as a3c\n\n return a3c.A3C, a3c.A3C.get_default_config()\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 14, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def _import_a3c():\n import ray.rllib.algorithms.a3c as a3c\n\n return a3c.A3C, a3c.A" }, { "id": 313294, "commit_id": "ffcac67d9950f569573a76c6431243c6eb5f1671", "repo": "core", "path": "tests/util/test_network.py", "file_name": "test_network.py", "fun_name": "test_is_ipv6_address", "commit_message": "Add is_ipv4_address and is_ipv6_address utils (#66472)", "code": "def test_is_ipv6_address():\n \n assert network_util.is_ipv6_address(\"::1\") is True\n assert network_util.is_ipv6_address(\"8.8.8.8\") is False\n assert network_util.is_ipv6_address(\"8.8.8.8\") is False\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 26, "n_words": 14, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 60, "n_identifiers": 3, "random_cut": "def test_is_ipv6_address():\n \n assert network_util.is_ipv6_ad" }, { "id": 216978, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/encodings/hz.py", "file_name": "hz.py", "fun_name": "getregentry", "commit_message": "add python 3.10.4 for windows", "code": "def getregentry():\n return codecs.CodecInfo(\n name='hz',\n encode=Codec().encode,\n decode=Codec().decode,\n incrementalencoder=IncrementalEncoder,\n incrementaldecoder=IncrementalDecoder,\n streamreader=StreamReader,\n streamwriter=StreamWriter,\n )\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 10, "token_counts": 46, "n_ast_nodes": 69, "n_identifiers": 15, "random_cut": "def getregentry():\n " }, { "id": 159763, "commit_id": "66a61b03658f3c9f312505dcf7eab07e4cf91ac6", "repo": "numpy", "path": "numpy/lib/tests/test_io.py", "file_name": "test_io.py", "fun_name": "test_loadtxt_maxrows_no_blank_lines", "commit_message": "Port over tests from npreadtext test suite\n\n- Add test for parsing scientific notation.\n- Add multiple-char comment test.\n- Port over tests for structured dtypes.\n- Add tests for exceptions on skiprows/max_rows.\n- port over ndmin tests.\n- Make structured data reusable, add unpack tests.\n- Port over delimiter tests.\n- Port over maxrows test w/ various dtypes.\n- Port over test of exception msg on parse failure.\n- Port over test for converters w/neg indices.\n- Port over usecols tests\n- Port over unicode tests.\n- Port over more converter tests.\n- Port over test for large rows.\n- Port over test for string-len discovery.\n- Port over float conversion accuracy test.\n- Port over bool test.\n- Add test for implicit float->int conversion.\n- Port over complex parsing tests.\n- Port over tests for reading from generator.\n- Port over object cleanup test.\n- Port over bytes incompat test.\n- Port over converters tests.\n\nCo-authored-by: Warren Weckesser \nCo-authored-by: Sebastian Berg ", "code": "def test_loadtxt_maxrows_no_blank_lines(dtype):\n txt = TextIO(\"1.5,2.5\\n3.0,4.0\\n5.5,6.0\")\n res = np.loadtxt(txt, dtype=dtype, delimiter=\",\", max_rows=2)\n assert_equal(res.dtype, dtype)\n assert_equal(res, np.array([[\"1.5\", \"2.5\"], [\"3.0\", \"4.0\"]], dtype=dtype))\n\n\n@pytest.mark.parametrize(\"dtype\", (np.dtype(\"f8\"), np.dtype(\"i2\")))", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"dtype\", (np.dtype(\"f8\"), np.dtype(\"i2\")))", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 32, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 5, "token_counts": 66, "n_ast_nodes": 154, "n_identifiers": 14, "random_cut": "def test_loadtxt_maxrows_no_blank_lines(dtype):\n txt = TextIO(\"1.5,2.5\\n3.0,4.0\\n5." }, { "id": 187102, "commit_id": "120c10302381600abb4044083ce0a106b31df8f0", "repo": "streamlink", "path": "src/streamlink/plugin/api/validate/_validators.py", "file_name": "_validators.py", "fun_name": "validator_url", "commit_message": "plugin.api.validate: turn module into package\n\nTurn module into package with multiple logical sub-modules:\n- Define a public interface in the package's `__init__` module\n- Split validation schemas, validators and validate logic\n - schemas: classes which register attributes used by their\n respective `validate` implementations\n - validators: functions which can internally call `validate`\n and which return something that can be validated\n - validate: singledispatch functions which implement the validation\n logic for schemas and various other types\n- Rename validation schemas for better internal references\n- Rename singledispatch methods\n\nOther clean-up work:\n- Update comments and fix grammar\n- Add type annotations\n- Use f-strings\n- Use `str` instead of the `text` alias\n- Simplify some code blocks\n- Rearrange classes and functions\n- Rephrase certain error messages\n- Add a few more tests for better code coverage", "code": "def validator_url(**attributes) -> Callable[[str], bool]:\n \n\n # Convert \"http\" to AnySchema(\"http\", \"https\") for convenience\n if attributes.get(\"scheme\") == \"http\":\n attributes[\"scheme\"] = AnySchema(\"http\", \"https\")\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 21, "vocab_size": 19, "complexity": 2, "nloc": 8, "token_counts": 41, "n_ast_nodes": 70, "n_identifiers": 7, "random_cut": "def validator_url(**attributes) -> Callable[[str], bool]:\n \n\n # Convert \"http\" to AnySchema(\"http\", \"https\") for convenience\n if attributes.get(\"scheme\") == \"http\":\n attributes[\"scheme\"] = Any" }, { "id": 148908, "commit_id": "60d1e7fc6578e57ebd27ad05b37e4de63e1ed20f", "repo": "freqtrade", "path": "tests/rpc/test_rpc_telegram.py", "file_name": "test_rpc_telegram.py", "fun_name": "test_send_msg_buy_notification", "commit_message": "fix stake amt", "code": "def test_send_msg_buy_notification(default_conf, mocker, caplog) -> None:\n\n msg = {\n 'type': RPCMessageType.BUY,\n 'trade_id': 1,\n 'buy_tag': 'buy_signal_01',\n 'exchange': 'Binance',\n 'pair': 'ETH/BTC',\n 'limit': 1.099e-05,\n 'order_type': 'limit',\n 'stake_amount': 0.01465333,\n 'stake_amount_fiat': 0.0,\n 'stake_currency': 'BTC',\n 'fiat_currency': 'USD',\n 'current_rate': 1.099e-05,\n 'amount': 1333.3333333333335,\n 'open_date': arrow.utcnow().shift(hours=-1)\n }\n telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf)\n\n telegram.send_msg(msg)\n assert msg_mock.call_args[0][0] \\\n == '\\N{LARGE BLUE CIRCLE} *Binance:* Buying ETH/BTC (#1)\\n' \\\n '*Buy Tag:* `buy_signal_01`\\n' \\\n '*Amount:* `1333.33333333`\\n' \\\n '*Open Rate:* `0.00001099`\\n' \\\n '*Current Rate:* `0.00001099`\\n' \\\n '*Total:* `(0.01465333 BTC, 180.895 USD)`'\n\n freqtradebot.config['telegram']['notification_settings'] = {'buy': 'off'}\n caplog.clear()\n msg_mock.reset_mock()\n telegram.send_msg(msg)\n msg_mock.call_count == 0\n log_has(\"Notification 'buy' not sent.\", caplog)\n\n freqtradebot.config['telegram']['notification_settings'] = {'buy': 'silent'}\n caplog.clear()\n msg_mock.reset_mock()\n\n telegram.send_msg(msg)\n msg_mock.call_count == 1\n msg_mock.call_args_list[0][1]['disable_notification'] is True\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 310, "n_words": 105, "vocab_size": 84, "complexity": 1, "nloc": 38, "token_counts": 231, "n_ast_nodes": 387, "n_identifiers": 23, "random_cut": "def test_send_msg_buy_notification(default_conf, mocker, caplog) -> None:\n\n msg = {\n 'type': RPCMessageType.BUY,\n 'trade_id': 1,\n 'buy_tag': 'buy_signal_01',\n 'exchange': 'Binance',\n 'pair': 'ETH/BTC',\n 'limit': 1.099e-05,\n 'order_type': 'limit',\n 'stake_amount': 0.01465333,\n 'stake_amount_fiat': 0.0,\n 'stake_currency': 'BTC',\n 'fiat_currency': 'USD',\n 'current_rate': 1.099e-05,\n 'amount': 1333.3333333333335,\n 'open_date': arrow.utcnow().shift(hours=-1)\n }\n telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf)\n\n telegram.send_msg(msg)\n assert msg_mock.call_args[0][0] \\\n == '\\N{LARGE BLUE CIRCLE} *Binance:* Buying ETH/BTC (#1)\\n' \\\n '*Buy Tag:* `buy_signal_01`\\n' \\\n '*Amount:* `1333.33333333`\\n' \\\n '*Open Rate:* `0.00001099`\\n' \\\n '*Current Rate:* `0.00001099`\\n' \\\n '*Total:* `(0.01465333 BTC, 180.895 USD)`'\n\n freqtradebot.config['telegram']['notification_settings'] = {'buy': 'off'}\n caplog.clear()\n msg_mock.reset_mock()\n telegram.send_msg(msg)\n msg_mock.call_count == 0\n log_has(\"Notification 'buy' not sent.\", caplog)\n\n freqtradebot.config['telegram']['notification_settings'] = {'buy': 'silent'}\n ca" }, { "id": 35657, "commit_id": "df5a4094a6e3f98f2cb2058cdb688fcc3f453220", "repo": "transformers", "path": "src/transformers/models/data2vec/convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py", "file_name": "convert_data2vec_audio_original_pytorch_checkpoint_to_pytorch.py", "fun_name": "access_by_string", "commit_message": "Add Data2Vec (#15507)\n\n* Add data2vec model cloned from roberta\r\n\r\n* Add checkpoint conversion script\r\n\r\n* Fix copies\r\n\r\n* Update docs\r\n\r\n* Add checkpoint conversion script\r\n\r\n* Remove fairseq data2vec_text script and fix format\r\n\r\n* Add comment on where to get data2vec_text.py\r\n\r\n* Remove mock implementation cheat.py and fix style\r\n\r\n* Fix copies\r\n\r\n* Remove TF and Flax classes from init\r\n\r\n* Add back copy from fairseq data2vec_text.py and fix style\r\n\r\n* Update model name in docs/source/index.mdx to be CamelCase\r\n\r\n* Revert model name in table to lower-case to get check_table test to pass\r\n\r\n* Update src/transformers/models/data2vec/__init__.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/convert_data2vec_original_pytorch_checkpoint_to_pytorch.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update docs/source/model_doc/data2vec.mdx\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update docs/source/model_doc/data2vec.mdx\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/auto/configuration_auto.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/configuration_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update tests/test_modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/configuration_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update documentation\r\n\r\n* Copy-paste Data2VecConfig from BertConfig\r\n\r\n* Update config checkpoint to point to edugp/data2vec-nlp-base. Fix style and repo-consistency\r\n\r\n* Update config special tokens to match RoBERTa\r\n\r\n* Split multiple assertions and add individual error messages\r\n\r\n* Rename Data2VecModel to Data2VecForTextModel\r\n\r\n* Add Data2Vec to _toctree.yml\r\n\r\n* Rename Data2VecEmbeddings to Data2VecForTextEmbeddings\r\n\r\n* Add initial Data2VecForAudio model (unfinished). Only matching fairseq's implementation up to the feature encoder (before positional encoding).\r\n\r\n* finish audio model\r\n\r\n* finish audio file\r\n\r\n* Update names and fix style, quality and repo consistency\r\n\r\n* Remove Data2VecAudioForPretraining. Add tests for Data2VecAudio, mimicking the Wav2Vec2 test suite. Fix bias initilization in positional conv layers. Move back configurations for audio and text to separate files.\r\n\r\n* add inputs to logits to data2vec'\r\n\r\n* correct autio models\r\n\r\n* correct config auto\r\n\r\n* correct tok auto\r\n\r\n* Update utils/tests_fetcher.py\r\n\r\n* delete unnecessary files\r\n\r\n* delete unnecessary files\r\n\r\n* further renaming\r\n\r\n* make all tests pass\r\n\r\n* finish\r\n\r\n* remove useless test file\r\n\r\n* Update tests/test_modeling_common.py\r\n\r\n* Update utils/check_repo.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec_text.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Fix copies\r\n\r\n* Update docs\r\n\r\n* Remove fairseq data2vec_text script and fix format\r\n\r\n* Add comment on where to get data2vec_text.py\r\n\r\n* Remove mock implementation cheat.py and fix style\r\n\r\n* Fix copies\r\n\r\n* Remove TF and Flax classes from init\r\n\r\n* Add back copy from fairseq data2vec_text.py and fix style\r\n\r\n* Update model name in docs/source/index.mdx to be CamelCase\r\n\r\n* Revert model name in table to lower-case to get check_table test to pass\r\n\r\n* Update documentation\r\n\r\n* Update src/transformers/models/data2vec/__init__.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/convert_data2vec_original_pytorch_checkpoint_to_pytorch.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/auto/configuration_auto.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/configuration_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update tests/test_modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/configuration_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Copy-paste Data2VecConfig from BertConfig\r\n\r\n* Update config checkpoint to point to edugp/data2vec-nlp-base. Fix style and repo-consistency\r\n\r\n* Update config special tokens to match RoBERTa\r\n\r\n* Split multiple assertions and add individual error messages\r\n\r\n* Rename Data2VecModel to Data2VecForTextModel\r\n\r\n* Add Data2Vec to _toctree.yml\r\n\r\n* Rename Data2VecEmbeddings to Data2VecForTextEmbeddings\r\n\r\n* Add initial Data2VecForAudio model (unfinished). Only matching fairseq's implementation up to the feature encoder (before positional encoding).\r\n\r\n* finish audio model\r\n\r\n* finish audio file\r\n\r\n* add inputs to logits to data2vec'\r\n\r\n* Update names and fix style, quality and repo consistency\r\n\r\n* Remove Data2VecAudioForPretraining. Add tests for Data2VecAudio, mimicking the Wav2Vec2 test suite. Fix bias initilization in positional conv layers. Move back configurations for audio and text to separate files.\r\n\r\n* correct autio models\r\n\r\n* correct config auto\r\n\r\n* correct tok auto\r\n\r\n* delete unnecessary files\r\n\r\n* delete unnecessary files\r\n\r\n* Update utils/tests_fetcher.py\r\n\r\n* further renaming\r\n\r\n* make all tests pass\r\n\r\n* finish\r\n\r\n* remove useless test file\r\n\r\n* Update tests/test_modeling_common.py\r\n\r\n* Update utils/check_repo.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec_text.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Move data2vec tests to new structure\r\n\r\n* Fix test imports for text tests\r\n\r\n* Remove fairseq files\r\n\r\n* Change paper link to arxiv\r\n\r\n* Modify Data2Vec documentation to reflect that the encoder is not shared across the audio and text models in the current implementation.\r\n\r\n* Update text model checkpoint to be facebook/data2vec-text-base\r\n\r\n* Add 'Copy from' statements and update paper links and docs\r\n\r\n* fix copy from statements\r\n\r\n* improve copied from\r\n\r\n* correct more copied from statements\r\n\r\n* finish copied from stuff\r\n\r\n* make style\r\n\r\n* add model to README\r\n\r\n* add to master\r\n\r\nCo-authored-by: Eduardo Gonzalez Ponferrada \r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def access_by_string(module, path):\n names = path.split(\".\")\n return reduce(getattr, names, module)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 15, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 38, "n_identifiers": 7, "random_cut": "def access_by_string(module, path):\n names = path.split(\".\")\n return reduce(getattr, names, module)\n\n" }, { "id": 201589, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/auth_tests/test_views.py", "file_name": "test_views.py", "fun_name": "test_redirect_param", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_redirect_param(self):\n \n self.login()\n url = self.do_redirect_url + \"?next=/custom_next/\"\n response = self.client.get(url)\n self.assertRedirects(response, \"/custom_next/\", fetch_redirect_response=False)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 49, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 40, "n_ast_nodes": 70, "n_identifiers": 10, "random_cut": "def test_redirect_param(self):\n \n self.login()\n url = self.do_redirect_url + \"?next=/custom_next/\"\n response = self.client.get(url)\n self.assertRedirects(response, \"/custo" }, { "id": 295360, "commit_id": "d7375f1a9c4a69858a65a56bd524f5a78ecab23c", "repo": "core", "path": "homeassistant/components/telegram_bot/polling.py", "file_name": "polling.py", "fun_name": "start_polling", "commit_message": "Refactor telegram_bot polling/webhooks platforms and add tests (#66433)\n\nCo-authored-by: Pär Berge ", "code": "def start_polling(self, event=None):\n \n _LOGGER.debug(\"Starting polling\")\n self.updater.start_polling()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 42, "n_identifiers": 6, "random_cut": "def start_polling(self, event=None):\n " }, { "id": 201733, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/postgresql/test_creation.py", "file_name": "test_creation.py", "fun_name": "test_sql_table_creation_suffix_with_encoding", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_sql_table_creation_suffix_with_encoding(self):\n settings = {\"CHARSET\": \"UTF8\"}\n self.check_sql_table_creation_suffix(settings, \"WITH ENCODING 'UTF8'\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 38, "n_identifiers": 4, "random_cut": "def test_sql_table_creation_suffix_with_encoding(self):\n settings = {\"CHARSET\": \"UTF8\"}\n self.check_sql_table_creation_suffix(settings, \"WITH ENCODING" }, { "id": 4176, "commit_id": "63af98e3b999d4b223237b51472a819915c5a558", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-recurly/unit_tests/test_streams.py", "file_name": "test_streams.py", "fun_name": "test_billing_infos_client_method_name", "commit_message": "🎉 Recurly Schema Revamp (#9866)\n\n* Cleanup Recurly connector schemas\r\n\r\n* Add more Recurly schemas to the connector\r\n\r\n- `billing_infos`\r\n- `shipping_addresses`\r\n- `shipping_methods`\r\n- `subscription_changes`\r\n\r\n* Add Recurly `add-on` resouce\r\n\r\n* Add Recurly's account notes resource schema\r\n\r\n* Add unique coupons to Recurly source\r\n\r\n* Add credit payments to Recurly connector\r\n\r\n* Add Recurly resources to integration tests configurations\r\n\r\n* Bump Recurly source version to `0.4.0`\r\n\r\n* Add `line_items` Recurly resource\r\n\r\n* Add `line_items` to Recurly documentation\r\n\r\n* Add missing `line_items` JSON schema\r\n\r\n* Replace Subscription Change Recurly API call with Subscription `pending_changes` field\r\n\r\n* Replace Recurly unique coupon codes API call with coupons `unique_coupon` field\r\n\r\nTo avoid the extra API call to import unique coupon calls\r\n\r\n* Revert \"Replace Recurly unique coupon codes API call with coupons `unique_coupon` field\"\r\n\r\nThis reverts commit 1c4592d82da3c5e5e0026dda8eb2ed7a896ac5b8.\r\n\r\n* Add `end_time` parameter to Recurly connector\r\n\r\n* Order Recurly specs\r\n\r\n* Set the Recurly `begin_time` and `end_time` to be optional\r\n\r\n* Add `order` to Recurly `source_spec.yaml`\r\n\r\n* Add `maxLength` to Recurly source schemas\r\n\r\n* Set `maxLength` for Recurly Subscription and Transaction `uuid`\r\n\r\n* Fix Recurly `export_dates` acceptance tests", "code": "def test_billing_infos_client_method_name(self):\n stream = BillingInfos(client=self.client_mock)\n\n assert stream.client_method_name == \"list_billing_infos\"\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "def test_billing_infos_client_method_name(self):\n stream = BillingInfos(client=self.client_mock)\n\n assert stream.client_meth" }, { "id": 155889, "commit_id": "0e4ddc4772d5e48858d6627979f1fddefc7f1cb1", "repo": "dask", "path": "dask/dataframe/tests/test_shuffle.py", "file_name": "test_shuffle.py", "fun_name": "test_shuffle_values_raises", "commit_message": "Add some extra test coverage (#8302)\n\nAdds a test for an option to `sort_values` that wasn't previously tested, as well as a few other tests for lines in `numeric.py` and `shuffle.py` that weren't tested.", "code": "def test_shuffle_values_raises():\n df = pd.DataFrame({\"a\": [1, 3, 2]})\n ddf = dd.from_pandas(df, npartitions=3)\n with pytest.raises(\n ValueError, match=\"na_position must be either 'first' or 'last'\"\n ):\n ddf.sort_values(by=\"a\", na_position=\"invalid\")\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 50, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 7, "token_counts": 58, "n_ast_nodes": 98, "n_identifiers": 15, "random_cut": "def test_shuffle_values_raises():\n df = pd.DataFrame({\"a\": [1, 3, 2]})\n ddf = dd.from_pandas(df, npartitions=3)\n with pytest.raises(\n ValueError, match=\"na_position must be either 'first' or 'last'\"\n ):\n ddf.sort_values(b" }, { "id": 64047, "commit_id": "f6dda738dc99060090e703b21f7a77692887605b", "repo": "erpnext", "path": "erpnext/accounts/doctype/pricing_rule/pricing_rule.py", "file_name": "pricing_rule.py", "fun_name": "remove_pricing_rule_for_item", "commit_message": "fix: ignore pricing rule in all transactions", "code": "def remove_pricing_rule_for_item(pricing_rules, item_details, item_code=None):\n\tfrom erpnext.accounts.doctype.pricing_rule.utils import (\n\t\tget_applied_pricing_rules,\n\t\tget_pricing_rule_items,\n\t)\n\tfor d in get_applied_pricing_rules(pricing_rules):\n\t\tif not d or not frappe.db.exists(\"Pricing Rule\", d): continue\n\t\tpricing_rule = frappe.get_cached_doc('Pricing Rule', d)\n\n\t\tif pricing_rule.price_or_product_discount == 'Price':\n\t\t\tif pricing_rule.rate_or_discount == 'Discount Percentage':\n\t\t\t\titem_details.discount_percentage = 0.0\n\t\t\t\titem_details.discount_amount = 0.0\n\t\t\t\titem_details.rate = item_details.get('price_list_rate', 0)\n\n\t\t\tif pricing_rule.rate_or_discount == 'Discount Amount':\n\t\t\t\titem_details.discount_amount = 0.0\n\n\t\t\tif pricing_rule.margin_type in ['Percentage', 'Amount']:\n\t\t\t\titem_details.margin_rate_or_amount = 0.0\n\t\t\t\titem_details.margin_type = None\n\t\telif pricing_rule.get('free_item'):\n\t\t\titem_details.remove_free_item = (item_code if pricing_rule.get('same_item')\n\t\t\t\telse pricing_rule.get('free_item'))\n\n\t\tif pricing_rule.get(\"mixed_conditions\") or pricing_rule.get(\"apply_rule_on_other\"):\n\t\t\titems = get_pricing_rule_items(pricing_rule)\n\t\t\titem_details.apply_on = (frappe.scrub(pricing_rule.apply_rule_on_other)\n\t\t\t\tif pricing_rule.apply_rule_on_other else frappe.scrub(pricing_rule.get('apply_on')))\n\t\t\titem_details.applied_on_items = ','.join(items)\n\n\titem_details.pricing_rules = ''\n\n\treturn item_details\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 70, "n_words": 99, "vocab_size": 68, "complexity": 13, "nloc": 28, "token_counts": 237, "n_ast_nodes": 393, "n_identifiers": 32, "random_cut": "def remove_pricing_rule_for_item(pricing_rules, item_details, item_code=None):\n\tfrom erpnext.accounts.doctype.pricing_rule.utils import (\n\t\tget_applied_pricing_rules,\n\t\tget_pricing_rule_items,\n\t)\n\tfor d in get_applied_pricing_rules(pricing_rules):\n\t\tif not d or not frappe.db.exists(\"Pricing Ru" }, { "id": 206724, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/regex_helper.py", "file_name": "regex_helper.py", "fun_name": "walk_to_end", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def walk_to_end(ch, input_iter):\n \n if ch == \"(\":\n nesting = 1\n else:\n nesting = 0\n for ch, escaped in input_iter:\n if escaped:\n continue\n elif ch == \"(\":\n nesting += 1\n elif ch == \")\":\n if not nesting:\n return\n nesting -= 1\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 146, "n_words": 40, "vocab_size": 26, "complexity": 7, "nloc": 14, "token_counts": 53, "n_ast_nodes": 95, "n_identifiers": 5, "random_cut": "def walk_to_end(ch, input_iter):\n \n if ch == \"(\":\n nesting = 1\n else:\n nesting = 0\n for ch, escaped in input_iter:\n if escaped:\n continue\n e" }, { "id": 152851, "commit_id": "1d5330b1559fe8033556d6b27970e4b14fa3b253", "repo": "pyecharts", "path": "test/test_bar.py", "file_name": "test_bar.py", "fun_name": "test_bar_add_dataset", "commit_message": "Update many unit test codes.(Coverage Up to 99%)", "code": "def test_bar_add_dataset(fake_writer):\r\n c = (\r\n Bar()\r\n .add_dataset(\r\n source=[\r\n [\"product\", \"2015\", \"2016\", \"2017\"],\r\n [\"Matcha Latte\", 43.3, 85.8, 93.7],\r\n [\"Milk Tea\", 83.1, 73.4, 55.1],\r\n [\"Cheese Cocoa\", 86.4, 65.2, 82.5],\r\n [\"Walnut Brownie\", 72.4, 53.9, 39.1],\r\n ]\r\n )\r\n .add_yaxis(series_name=\"2015\", y_axis=[])\r\n .add_yaxis(series_name=\"2016\", y_axis=[])\r\n .add_yaxis(series_name=\"2017\", y_axis=[])\r\n .set_global_opts(\r\n title_opts=opts.TitleOpts(title=\"Dataset simple bar example\"),\r\n xaxis_opts=opts.AxisOpts(type_=\"category\"),\r\n )\r\n )\r\n c.render()\r\n _, content = fake_writer.call_args[0]\r\n assert_in(\"dataset\", content)\r\n", "url": "https://github.com/pyecharts/pyecharts.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 244, "n_words": 55, "vocab_size": 50, "complexity": 1, "nloc": 23, "token_counts": 177, "n_ast_nodes": 243, "n_identifiers": 22, "random_cut": "def test_bar_add_dataset(fake_writer):\r\n c = (\r\n Bar()\r\n .add_dataset(\r\n source=[\r\n [\"product\", \"2015\", \"2016\", \"2017\"],\r\n " }, { "id": 100813, "commit_id": "ff6b0209dd5ad57b81b0aca570df7f39a7119bfb", "repo": "faceswap", "path": "plugins/train/model/_base/model.py", "file_name": "model.py", "fun_name": "config", "commit_message": "Refactoring and TravisCI to Github Actions (#1239)\n\n* refactor training\r\n\r\n* travis to actions", "code": "def config(self) -> dict:\n \n global _CONFIG # pylint: disable=global-statement\n if not _CONFIG:\n model_name = self._config_section\n logger.debug(\"Loading config for: %s\", model_name)\n _CONFIG = Config(model_name, configfile=self._configfile).config_dict\n return _CONFIG\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 88, "n_words": 26, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 43, "n_ast_nodes": 73, "n_identifiers": 12, "random_cut": "def config(self) -> dict:\n \n global _CONFIG # pylint: disable=global-statement\n if not _CONFIG:\n model_name = self._config_section\n logger.debug(" }, { "id": 109743, "commit_id": "4896ec1a2cfb8c454e385632d8df213c915ced52", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/axes3d.py", "file_name": "axes3d.py", "fun_name": "_calc_view_axes", "commit_message": "Add pan and zoom toolbar handling to 3D Axes (Replaces PR#22614) (#23449)\n\n* ENH: Add pan and zoom toolbar handling to 3D Axes\r\n\r\n1) This moves the pan logic that was already in the mouse move handler\r\ninto the \"drag_pan\" method to make it available from the toolbar.\r\n\r\n2) This expands upon the panning logic to enable a zoom-to-box feature.\r\nThe zoom-to-box is done relative to the Axes, so it shrinks/expands\r\nthe box as a fraction of each delta, from lower-left Axes to lower-left\r\nzoom-box. Thus, it tries to handle non-centered zooms, which adds more\r\ncases to handle versus the current right-click zoom only scaling from\r\nthe center of the projection.\r\n\r\n* Rewrite zooming with bounding box\r\n\r\n* Rewrite 3d panning to work with a roll angle\r\n\r\n* Whats new for zoom and pan buttons\r\n\r\n* Make pan button configurable\r\n\r\n* Do not jump when zooming and mouse goes over other subplot\r\n\r\n* Rework zooming for 3d plots\r\n\r\n* Handle x/y lock when zooming and panning\r\n\r\n* Update tests\r\n\r\n* Docstrings\r\n\r\n* Dont assume a scale_z\r\n\r\n* Limit zoom box\r\n\r\n* Test zoom pan key modifiers\r\n\r\n* Save some calculation by saving view axes\r\n\r\n* Deprecation warnings for Axes3D.eye, .vvec\r\n\r\n* Remove Axes3D._prepare_view_from_bbox for now\r\n\r\n* Comments and docstrings\r\n\r\n* Switch from uvn to uvw\r\n\r\n* Save aspect to axes\r\n\r\n* Constrain zooming with mouse when one of the equal aspect ratios is set\r\n\r\n* Cleanup\r\n\r\n* Cleanup\r\n\r\n* Consolidate finding equal aspect axis indices\r\n\r\n* linting\r\n\r\n* More intuitive scaling\r\n\r\n* Box zoom keeps existing aspect ratios\r\n\r\n* Linting\r\n\r\n* Code review comments\r\n\r\n* Revert parameters for view_transformation\r\n\r\n* Fix new 3d pan/zoom view going on view stack twice\r\n\r\n* Better clipping\r\n\r\n* Test 3d toolbar navigation\r\n\r\n* Privatize helper functions\r\n\r\n* Deprecations\r\n\r\n* Code review changes\r\n\r\n* Deprecation note\r\n\r\n* Undeprecate proj3d.view_transformation\r\n\r\n* Undeprecate proj3d.view_transformation\r\n\r\n* Update doc/api/next_api_changes/deprecations/23449-SS.rst\r\n\r\n\r\nCo-authored-by: Greg Lucas \r\nCo-authored-by: Scott Shambaugh \r\nCo-authored-by: Oscar Gustafsson ", "code": "def _calc_view_axes(self, eye):\n \n elev_rad = np.deg2rad(art3d._norm_angle(self.elev))\n roll_rad = np.deg2rad(art3d._norm_angle(self.roll))\n\n # Look into the middle of the world coordinates\n R = 0.5 * self._roll_to_vertical(self._box_aspect)\n\n # Define which axis should be vertical. A negative value\n # indicates the plot is upside down and therefore the values\n # have been reversed:\n V = np.zeros(3)\n V[self._vertical_axis] = -1 if abs(elev_rad) > np.pi/2 else 1\n\n u, v, w = proj3d._view_axes(eye, R, V, roll_rad)\n return u, v, w\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 156, "n_words": 72, "vocab_size": 58, "complexity": 2, "nloc": 8, "token_counts": 106, "n_ast_nodes": 164, "n_identifiers": 24, "random_cut": "def _calc_view_axes(self, eye):\n \n elev_rad = " }, { "id": 224298, "commit_id": "dca7cbb43fcd6ea7c677c98ba585395b070d387b", "repo": "mkdocs", "path": "mkdocs/tests/cli_tests.py", "file_name": "cli_tests.py", "fun_name": "test_serve_dirtyreload", "commit_message": "Format code with `black -l100 --skip-string-normalization`", "code": "def test_serve_dirtyreload(self, mock_serve):\n\n result = self.runner.invoke(cli.cli, [\"serve\", '--dirtyreload'], catch_exceptions=False)\n\n self.assertEqual(result.exit_code, 0)\n mock_serve.assert_called_once_with(\n dev_addr=None,\n livereload='dirty',\n config_file=None,\n strict=None,\n theme=None,\n use_directory_urls=None,\n watch_theme=False,\n watch=(),\n )\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 136, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 13, "token_counts": 77, "n_ast_nodes": 114, "n_identifiers": 19, "random_cut": "def test_serve_dirtyreload(self, mock_serve):\n\n result = self.runner.invoke(cli.cli, [\"serve\", '--dirtyreload'], catch_exceptions=False)\n\n self.assertEqual(result.exit_code, 0)\n mock_serve.assert_called_once_with(\n dev_addr=None,\n livereload='dirty'," }, { "id": 162942, "commit_id": "3f174fcef6b614ee58716b7ec1b2744e137069ae", "repo": "inter", "path": "misc/tools/postprocess-vf2.py", "file_name": "postprocess-vf2.py", "fun_name": "build_opsz_axis_values", "commit_message": "Remove slnt/ital VF axis\n\nThis removes the slant/italic variable axis and breaks up the font in two: roman and italic. This change will allow diverging designs for italic (for example single-storey a). It also addresses the fact that most software, including web browsers, doesn't handle VFs with slnt or ital well.", "code": "def build_opsz_axis_values(ttfont):\n nametable = ttfont['name']\n instances = ttfont['fvar'].instances\n\n val_min = 0.0\n val_max = 0.0\n for instance in instances:\n opsz_val = instance.coordinates[\"opsz\"]\n if val_min == 0.0 or opsz_val < val_min:\n val_min = opsz_val\n if val_max == 0.0 or opsz_val > val_max:\n val_max = opsz_val\n\n return [\n {\n \"name\": \"Regular\",\n \"value\": val_min,\n \"linkedValue\": val_max,\n \"flags\": 2,\n },\n {\n \"name\": \"Display\",\n \"value\": val_max,\n },\n ]\n\n # results = []\n # for instance in instances:\n # opsz_val = instance.coordinates[\"opsz\"]\n # name = nametable.getName(instance.subfamilyNameID, 3, 1, 1033).toUnicode()\n # name = name.replace(\"Italic\", \"\").strip()\n # if name == \"\":\n # name = \"Regular\"\n # inst = {\n # \"name\": name,\n # \"value\": opsz_val,\n # }\n # if int(opsz_val) == val_min:\n # inst[\"flags\"] = 0\n # inst[\"linkedValue\"] = val_max\n # else:\n # inst[\"linkedValue\"] = val_min\n # results.append(inst)\n\n # return results\n\n", "url": "https://github.com/rsms/inter.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 259, "n_words": 132, "vocab_size": 59, "complexity": 6, "nloc": 23, "token_counts": 103, "n_ast_nodes": 180, "n_identifiers": 9, "random_cut": "def build_opsz_axis_values(ttfont):\n nametable = ttfont['name']\n instances = ttfont['fvar'].instances\n\n val_min = 0.0\n val_max = 0.0\n for instance in instances:\n opsz_val = instance.coordinates[\"opsz\"]\n if val_min == 0.0 or opsz_val < val_min:\n val_min = opsz_val\n if val_max == 0.0 or opsz_val > val_max:\n val_max = opsz_val\n\n return [\n {\n \"name\": \"Regular\",\n " }, { "id": 283405, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/cryptocurrency/test_cryptocurrency_helpers.py", "file_name": "test_cryptocurrency_helpers.py", "fun_name": "test_coin_api_load_df_for_ta", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def test_coin_api_load_df_for_ta(self, mock_load):\n \n\n with open(\n \"tests/openbb_terminal/cryptocurrency/json/test_cryptocurrency_helpers/btc_usd_test_data.json\",\n encoding=\"utf8\",\n ) as f:\n sample_return = json.load(f)\n\n mock_load.return_value = sample_return\n mock_return, vs = load_ta_data(\n coin_map_df=self.coin_map_df,\n source=\"cg\",\n currency=\"usd\",\n days=30,\n )\n self.assertTrue(mock_return.shape == (31, 4))\n self.assertTrue(vs == \"usd\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 165, "n_words": 32, "vocab_size": 27, "complexity": 1, "nloc": 15, "token_counts": 81, "n_ast_nodes": 137, "n_identifiers": 19, "random_cut": "def test_coin_api_load_df_for_ta(self, mock_load):\n \n\n with open(\n \"tests/openbb_terminal/cryptocurrency/json/test_cryptocurrency_helpers/btc_usd_test_data.json\",\n encoding=\"utf8\",\n ) as f:\n sample_return = json.load(f)\n\n mock_load.return_va" }, { "id": 246653, "commit_id": "54e74cc15f30585f5874780437614c0df6f639d9", "repo": "synapse", "path": "tests/rest/client/test_keys.py", "file_name": "test_keys.py", "fun_name": "test_rejects_device_key_given_as_map_to_bool", "commit_message": "Add type hints to `tests/rest/client` (#12072)", "code": "def test_rejects_device_key_given_as_map_to_bool(self) -> None:\n self.register_user(\"alice\", \"wonderland\")\n alice_token = self.login(\"alice\", \"wonderland\")\n bob = self.register_user(\"bob\", \"uncle\")\n channel = self.make_request(\n \"POST\",\n \"/_matrix/client/r0/keys/query\",\n {\n \"device_keys\": {\n bob: {\n \"device_id1\": True,\n },\n },\n },\n alice_token,\n )\n\n self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result)\n self.assertEqual(\n channel.json_body[\"errcode\"],\n Codes.BAD_JSON,\n channel.result,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 273, "n_words": 39, "vocab_size": 31, "complexity": 1, "nloc": 22, "token_counts": 101, "n_ast_nodes": 168, "n_identifiers": 16, "random_cut": "def test_rejects_device_key_given_as_map_to_bool(self) -> None:\n self.register_user(\"alice\", \"wonderland\")\n alice_token = self.login(\"alice\", \"wonderland\")\n bob = self.register_user(\"bob\", \"uncle\")\n channel = self.make_request(\n \"POST\",\n \"/_matrix/client/r0/keys/query\",\n {\n \"device_keys\": {\n bob: {\n \"device_id1\": True,\n },\n },\n },\n alice_token,\n )\n\n self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.resu" }, { "id": 264382, "commit_id": "a2981870ce6911d577dc2af3d6cd2cf5c952aa14", "repo": "netbox", "path": "netbox/dcim/forms/models.py", "file_name": "models.py", "fun_name": "save", "commit_message": "#7844: Allow installing modules via UI without replicating components", "code": "def save(self, *args, **kwargs):\n\n # If replicate_components is False, disable automatic component replication on the instance\n if self.instance.pk or not self.cleaned_data['replicate_components']:\n self.instance._disable_replication = True\n\n return super().save(*args, **kwargs)\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 58, "n_words": 27, "vocab_size": 27, "complexity": 3, "nloc": 4, "token_counts": 46, "n_ast_nodes": 75, "n_identifiers": 9, "random_cut": "def save(self, *args, **kwargs):\n\n # If replicate_components is False, disable automatic component replication on the instance\n if self.instance.pk or not self.cleaned_data['replicate_components']:\n self.instance._disable_replication = True\n\n " }, { "id": 294646, "commit_id": "c1a2be72fc8b76b55cfde1823c5688100e397369", "repo": "core", "path": "tests/components/generic/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "test_form_stream_unauthorised", "commit_message": "Generic IP Camera configflow 2 (#52360)\n\nCo-authored-by: J. Nick Koston ", "code": "async def test_form_stream_unauthorised(hass, fakeimg_png, user_flow):\n \n with patch(\n \"homeassistant.components.generic.config_flow.av.open\",\n side_effect=av.error.HTTPUnauthorizedError(0, 0),\n ):\n result2 = await hass.config_entries.flow.async_configure(\n user_flow[\"flow_id\"],\n TESTDATA,\n )\n assert result2[\"type\"] == \"form\"\n assert result2[\"errors\"] == {\"stream_source\": \"stream_unauthorised\"}\n\n\n@respx.mock", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@respx.mock", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 92, "n_words": 28, "vocab_size": 26, "complexity": 1, "nloc": 11, "token_counts": 67, "n_ast_nodes": 124, "n_identifiers": 16, "random_cut": "async def test_form_stream_unauthorised(hass, fakeimg_png, user_flow):\n \n with patch(\n \"homeassistant.components.generic.config_flow.av.open\",\n " }, { "id": 64323, "commit_id": "a64228741d065f7ac33b3208d3a704616250f925", "repo": "erpnext", "path": "erpnext/e_commerce/variant_selector/item_variants_cache.py", "file_name": "item_variants_cache.py", "fun_name": "enqueue_build_cache", "commit_message": "fix: Trim spaces from attributes (multi-variant creation) & explicit method for building cache\n\n- Multiple Item Variants creation fails due to extra spaces in attributes from popup. Clean them before passing to server side\n- Mention explicit method to build variants cache to avoid ambiguity between old method path (pre-refactor)", "code": "def enqueue_build_cache(item_code):\n\tif frappe.cache().hget('item_cache_build_in_progress', item_code):\n\t\treturn\n\tfrappe.enqueue(\n\t\t\"erpnext.e_commerce.variant_selector.item_variants_cache.build_cache\",\n\t\titem_code=item_code, queue='long'\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 4, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 7, "token_counts": 34, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "def enqueue_build_cache(item_code):\n\tif frappe.cache().hget('item_cache_build_in_progress', item_code):\n\t\treturn\n\tfrappe.enqueue(\n\t\t\"erpnext.e_commerce.variant_selector.item_variants_cache.build_cache\",\n\t\titem_code=item_cod" }, { "id": 182090, "commit_id": "d86ec1889e259c969a035cef413fee347bb76414", "repo": "textual", "path": "tests/css/test_tokenize.py", "file_name": "test_tokenize.py", "fun_name": "test_variable_declaration_comment_ignored", "commit_message": "Variable references", "code": "def test_variable_declaration_comment_ignored():\n css = \"$x: red; /* comment */\"\n assert list(tokenize(css, \"\")) == [\n Token(name='variable_declaration_start', value='$x:', path='', code=css, location=(0, 0)),\n Token(name='whitespace', value=' ', path='', code=css, location=(0, 3)),\n Token(name='token', value='red', path='', code=css, location=(0, 4)),\n Token(name='variable_declaration_end', value=';', path='', code=css, location=(0, 7)),\n Token(name='whitespace', value=' ', path='', code=css, location=(0, 8)),\n ]\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 90, "n_words": 47, "vocab_size": 32, "complexity": 1, "nloc": 9, "token_counts": 155, "n_ast_nodes": 242, "n_identifiers": 10, "random_cut": "def test_variable_declaration_comment_ignored():\n css = \"$x: red; /* comment */\"\n assert list(tokenize(css, \"\")) == [\n Token(name='variable_declaration_start', value='$x:', path='', code=css, location=(0, 0))," }, { "id": 268811, "commit_id": "75b60b17ee1ff18ded04cf07b71e4ee32e673a0b", "repo": "ansible", "path": "test/lib/ansible_test/_internal/host_profiles.py", "file_name": "host_profiles.py", "fun_name": "get_controller_target_connections", "commit_message": "ansible-test - Support RSA SHA-1 for SSH clients.", "code": "def get_controller_target_connections(self) -> list[SshConnection]:\n \n containers = get_container_database(self.args)\n access = containers.data[HostType.control]['__test_hosts__'][self.container_name]\n\n host = access.host_ip\n port = dict(access.port_map())[22]\n\n settings = SshConnectionDetail(\n name=self.config.name,\n user='root',\n host=host,\n port=port,\n identity_file=SshKey(self.args).key,\n python_interpreter=self.python.path,\n # CentOS 6 uses OpenSSH 5.3, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.\n # Since only CentOS 6 is affected, and it is only supported by ansible-core 2.12, support for RSA SHA-1 is simply hard-coded here.\n # A substring is used to allow custom containers to work, not just the one provided with ansible-test.\n enable_rsa_sha1='centos6' in self.config.image,\n )\n\n return [SshConnection(self.args, settings)]\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 258, "n_words": 92, "vocab_size": 73, "complexity": 1, "nloc": 16, "token_counts": 120, "n_ast_nodes": 189, "n_identifiers": 30, "random_cut": "def get_controller_target_connections(self) -> list[SshConnection]:\n \n containers = get_container_database(self.args)\n access = containers.data[HostType.control]['__test_hosts__'][self.container_name]\n\n host = access.host_ip\n port = dict(access.port_map())[22]\n\n settings = SshConnectionDetail(\n name=self.config.name,\n user='root',\n host=ho" }, { "id": 280930, "commit_id": "000d1e93d7187299dce5653f781345031a9ad96f", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/stocks/options/test_payoff_controller.py", "file_name": "test_payoff_controller.py", "fun_name": "test_menu_with_queue", "commit_message": "Tests : Stocks > Options (#1125)\n\n* Update tests : conftest\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Updating tests : fix typing\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : pyupgrade\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : mock dates in cassettes\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : force single threading\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : conftest\r\n\r\n* Update tests : skip stocks/options/controller\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : fixing issue\r\n\r\n* Updating tests : add init\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip", "code": "def test_menu_with_queue(expected, mocker, queue):\n path_controller = \"gamestonk_terminal.stocks.options.payoff_controller\"\n\n # MOCK CHAIN + PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_value=95.0,\n )\n\n # MOCK SWITCH\n mocker.patch(\n target=f\"{path_controller}.PayoffController.switch\",\n return_value=[\"quit\"],\n )\n result_menu = payoff_controller.menu(\n ticker=\"MOCK_TICKER\",\n expiration=\"2022-01-07\",\n queue=queue,\n )\n\n assert result_menu == expected\n\n\n@pytest.mark.vcr(record_mode=\"none\")", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 136, "n_words": 39, "vocab_size": 30, "complexity": 1, "nloc": 20, "token_counts": 81, "n_ast_nodes": 159, "n_identifiers": 18, "random_cut": "def test_menu_with_queue(expected, mocker, queue):\n path_controller = \"gamestonk_terminal.stocks.options.payoff_controller\"\n\n # MOCK CHAIN + PRICE\n mocker.patch(\n target=f\"{path_controller}.get_option_chain\",\n return_value=CHAIN,\n )\n mocker.patch(\n target=f\"{path_controller}.get_price\",\n return_va" }, { "id": 92817, "commit_id": "d435bb742d0ea89ae5e40e81ed198773262c1607", "repo": "sentry", "path": "tests/sentry/integrations/msteams/test_message_builder.py", "file_name": "test_message_builder.py", "fun_name": "test_personal_installation_message", "commit_message": "ref(msteams): Use message builder for help, installation and identity messages (#36495)\n\nMove the JSON blobs for help, installation and identity related messages to a message builder hierarchy like the one we use for Slack. Currently, all the cards that we use for the Microsoft Teams notifications are in the form of JSON blobs in sentry/integrations/msteams/card_builder.py. This is not good for maintainability and there is lot of code duplication.", "code": "def test_personal_installation_message(self):\n personal_installation_card = build_personal_installation_message()\n\n assert 2 == len(personal_installation_card[\"body\"])\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def test_personal_installation_message(self):\n personal_installation_card = build_personal_i" }, { "id": 193693, "commit_id": "29b0831c1f3469b972ad8ad6521d81fc950980c4", "repo": "vision", "path": "test/prototype_transforms_kernel_infos.py", "file_name": "prototype_transforms_kernel_infos.py", "fun_name": "sample_inputs_affine_image_mask", "commit_message": "diversify parameter types for a couple of prototype kernels (#6635)\n\n* add more size types for prototype resize sample inputs\r\n\r\n* add skip for dispatcher\r\n\r\n* add more sizes to resize kernel info\r\n\r\n* add more skips\r\n\r\n* add more diversity to gaussian_blur parameters\r\n\r\n* diversify affine parameters and fix bounding box kernel\r\n\r\n* fix center_crop dispatcher info\r\n\r\n* revert kernel fixes\r\n\r\n* add skips for scalar shears in affine_bounding_box", "code": "def sample_inputs_affine_image_mask():\n for mask_loader, center in itertools.product(\n make_mask_loaders(sizes=[\"random\"], dtypes=[torch.uint8]),\n [None, (0, 0)],\n ):\n yield ArgsKwargs(mask_loader, center=center, **_AFFINE_KWARGS[0])\n\n for mask_loader, affine_kwargs in itertools.product(\n make_mask_loaders(sizes=[\"random\"], dtypes=[torch.uint8]), _diversify_affine_kwargs_types(_AFFINE_KWARGS[0])\n ):\n yield ArgsKwargs(mask_loader, **affine_kwargs)\n\n\n@pil_reference_wrapper", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "@pil_reference_wrapper", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 75, "n_words": 30, "vocab_size": 21, "complexity": 3, "nloc": 10, "token_counts": 100, "n_ast_nodes": 155, "n_identifiers": 15, "random_cut": "def sample_inputs_affine_image_mask():\n for mask_loader, center in itertools.product(\n make_mask_loaders(sizes=[\"random\"], dtypes=[torch.uint8]),\n [None, (0, 0)],\n ):\n yield ArgsKwargs(mask_loader, center=center, **_AFFINE_KWARG" }, { "id": 27620, "commit_id": "3673e7e11f22e5a695c708b7a594c11857a93898", "repo": "saleor", "path": "saleor/graphql/checkout/mutations/checkout_lines_delete.py", "file_name": "checkout_lines_delete.py", "fun_name": "perform_mutation", "commit_message": "Unify checkout mutations/resolvers to use id field. (#9862)\n\n* Unify checkout mutations/resolvers to use id field.\r\n\r\n* Update changelog\r\n\r\n* Remove uneeded \" \" in mutation's field description", "code": "def perform_mutation(cls, _root, info, lines_ids, token=None, id=None):\n checkout = get_checkout(\n cls,\n info,\n checkout_id=None,\n token=token,\n id=id,\n error_class=CheckoutErrorCode,\n )\n\n _, lines_to_delete = resolve_global_ids_to_primary_keys(\n lines_ids, graphene_type=\"CheckoutLine\", raise_error=True\n )\n cls.validate_lines(checkout, lines_to_delete)\n checkout.lines.filter(id__in=lines_to_delete).delete()\n\n lines, _ = fetch_checkout_lines(checkout)\n\n manager = info.context.plugins\n checkout_info = fetch_checkout_info(\n checkout, lines, info.context.discounts, manager\n )\n update_checkout_shipping_method_if_invalid(checkout_info, lines)\n recalculate_checkout_discount(\n manager, checkout_info, lines, info.context.discounts\n )\n manager.checkout_updated(checkout)\n return CheckoutLinesDelete(checkout=checkout)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 257, "n_words": 54, "vocab_size": 42, "complexity": 1, "nloc": 25, "token_counts": 146, "n_ast_nodes": 213, "n_identifiers": 33, "random_cut": "def perform_mutation(cls, _root, info, lines_ids, token=None, id=None):\n checkout = get_checkout(\n cls,\n info,\n checkout_id=None,\n token=token,\n id=id,\n error_class=CheckoutErrorCode,\n )\n\n _, lines_to_delete = resolve_global_ids_to_primary_keys(\n lines_ids, graphene_type=\"CheckoutLine\", raise_error=True\n )\n cls.validate_lines(checkout, lines_to_delete)\n checkout.lines.filter(id__in=lines_to_delete).delete()\n\n lines, _ = fetch_checkout_lines(checkout)\n\n manager = info.context.plugins\n checkout_info = fetch_checkout_info(\n checkout, lines, info.context.discounts, manager\n )\n upd" }, { "id": 30778, "commit_id": "7822a9b7a7b93b5dbf04eee7db3d2423ced1f9b6", "repo": "transformers", "path": "src/transformers/models/opt/modeling_tf_opt.py", "file_name": "modeling_tf_opt.py", "fun_name": "dummy_inputs", "commit_message": "Opt in flax and tf (#17388)\n\n* initial commit\r\n\r\n* add init file\r\n\r\n* update globakl init\r\n\r\n* update index and dummy objects\r\n\r\n* style\r\n\r\n* update modelling auto\r\n\r\n* fix initi typo in src/transformers\r\n\r\n* fix typo in modeling tf auto, opt was in wrong mapping name\r\n\r\n* fixed a slow test : saved_model\r\n\r\n* style\r\n\r\n* fix positionnal embedding if no position id is provided\r\n\r\n* update tf test\r\n\r\n* update test flax requirements\r\n\r\n* fixed serialization\r\n\r\n* update\r\n\r\n* update tf name to allow smooth convertion\r\n\r\n* update flax tests\r\n\r\n* style\r\n\r\n* fix test typo\r\n\r\n* fix tf typo test\r\n\r\n* add xla for generate support in causal LM\r\n\r\n* fixed bug\r\n\r\n* cleaned tf tests\r\n\r\n* style\r\n\r\n* removed from PT for slow tests\r\n\r\n* fix typp\r\n\r\n* opt test as slow\r\n\r\n* trying to fix GPT2 undefined\r\n\r\n* correct documentation and add to test doc\r\n\r\n* update tf doc\r\n\r\n* fix doc\r\n\r\n* fake commit\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Joao Gante \r\n\r\n* update test based on review\r\n\r\n* merged main layer for functionning test\r\n\r\n* fixup + quality\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* update long comment\r\n\r\n* make fix copies\r\n\r\nCo-authored-by: Arthur \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def dummy_inputs(self):\n pad_token = 1\n input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)\n dummy_inputs = {\n \"attention_mask\": tf.math.not_equal(input_ids, pad_token),\n \"input_ids\": input_ids,\n }\n return dummy_inputs\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 20, "vocab_size": 17, "complexity": 1, "nloc": 8, "token_counts": 48, "n_ast_nodes": 77, "n_identifiers": 11, "random_cut": "def dummy_inputs(self):\n pad_token = 1\n input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)\n dummy_inputs = {\n \"attention_mask\": tf.mat" }, { "id": 29783, "commit_id": "decd505f55d02c616ce5b804c06a71e120d15c15", "repo": "saleor", "path": "saleor/graphql/discount/mutations/sale_add_catalogues.py", "file_name": "sale_add_catalogues.py", "fun_name": "perform_mutation", "commit_message": "Add plugin manager promise (#11414)", "code": "def perform_mutation(cls, _root, info, **data):\n sale = cls.get_node_or_error(\n info, data.get(\"id\"), only_type=Sale, field=\"sale_id\"\n )\n previous_catalogue = fetch_catalogue_info(sale)\n manager = get_plugin_manager_promise(info.context).get()\n with traced_atomic_transaction():\n cls.add_catalogues_to_node(sale, data.get(\"input\"))\n current_catalogue = fetch_catalogue_info(sale)\n previous_cat_converted = convert_catalogue_info_to_global_ids(\n previous_catalogue\n )\n current_cat_converted = convert_catalogue_info_to_global_ids(\n current_catalogue\n )\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 177, "n_words": 36, "vocab_size": 24, "complexity": 1, "nloc": 18, "token_counts": 113, "n_ast_nodes": 147, "n_identifiers": 22, "random_cut": "def perform_mutation(cls, _root, info, **data):\n sale = cls.get_node_or_error(\n info, data.get(\"id\"), only_type=Sale, field=\"sale_id\"\n )\n previous_catalogue = fetch_catalogue_info(sale)\n manager = get_plugin_manager_promise(info.context).get()\n with traced_atomic_transaction():\n cls.add_catalogues_to_node(sale, data.get(\"input\"))\n current_catalogue = fetch_catalogue_info(sale)\n previous_cat_converted = convert_catalogue_info_to_global_ids(\n previous_catalogue\n )\n current_cat_converted = convert_catalogue_info_to_global_ids(" }, { "id": 26886, "commit_id": "df31433d96cde352e4d62181e39bb8efcf7c9f2a", "repo": "saleor", "path": "saleor/graphql/checkout/tests/test_checkout.py", "file_name": "test_checkout.py", "fun_name": "test_checkout_transactions_missing_permission", "commit_message": "Transaction mutations for new checkout flow (#9564)\n\n* Add new mutation for creating order from checkout\r\n\r\n* Add implementaion of mutation CheckoutFromOrderCreate\r\n\r\n* Add preview label\r\n\r\n* Add mutations for manage payments\r\n\r\n* Clean up with new migration files\r\n\r\n* Add clean up after adding PoC changes\r\n\r\n* Clean up around payment mutations\r\n\r\n* Add test for new fields in payment type\r\n\r\n* Add tests for payment mutations\r\n\r\n* Add changes after self review\r\n\r\n* Add preview label to description of a new payment mutations\r\n\r\n* Remove field for depreceated error field\r\n\r\n* Add missing error code to Saleor error codes\r\n\r\n* Move validation to mutation part\r\n\r\n* Fix typo in field describtion\r\n\r\n* Apply changes after review\r\n\r\n* Clean in doc string in main method of the PR. Remove fixme that will be covered by separate PR\r\n\r\n* Add missing space in description of the field.\r\n\r\n* Apply changes after review\r\n\r\n* Fix incorrect field name for input id field\r\n\r\n* Rename orderFromCheckoutCreate to orderCreateFromCheckout\r\n\r\n* Add label ADDED_IN_32 to mutation description\r\n\r\n* Use HANDLE_CHECKOUTS permission, instead of MANAGE_CHECKOUTS\r\n\r\n* Update changelog\r\n\r\n* Fix tests\r\n\r\n* Add main logic for new handler in manager and webhook plugin\r\n\r\n* Add payment action request hooks to payment mutations\r\n\r\n* Add migration with new possible events for order\r\n\r\n* Add payment action request handlers to order mutations\r\n\r\n* Apply changes after review\r\n\r\n* Fix tests\r\n\r\n* Fix tests\r\n\r\n* Add tests for new payment flow in order mutation for payment actions\r\n\r\n* Add support for refund webhook action to fulfillment mutation related to return&refund\r\n\r\n* Apply changes after review\r\n\r\n* Add TransactionItem model for new checkout approach\r\n\r\n* Apply changes after self-review\r\n\r\n* Use createdAt and modifiedAt for new type and model\r\n\r\n* Apply changes after review\r\n\r\n* Add mutation to call transaction action\r\n\r\n* Add TransactionEvent to track changes made on TransactionItem\r\n\r\n* Clean up after self-review\r\n\r\n* Add missing space in private metadata description\r\n\r\n* Fix inccorect permission name in comment. Add missing added_in_x label\r\n\r\n* Add missing added_in_3x label\r\n\r\n* Add permissions for metadata\r\n\r\n* Apply changes after review\r\n\r\n* Apply changes after review\r\n\r\n* Make cleanup with mutation/query fields labels\r\n\r\n* Clean up after self-review\r\n\r\n* Update changelog\r\n\r\n* Attach transactions to checkout\r\n\r\n* Attach transactions created for checkout\r\n\r\n* Use [] instead of .get for dict in dataloader\r\n\r\n* Add subscription for transaction action request\r\n\r\n* Fix failing is_event_active\r\n\r\n* Clean up changelog after merging main\r\n\r\n* Clean up changelog after merging main\r\n\r\n* Add missing transaction event\r\n\r\n* Limit transaction mutations to be used only by apps\r\n\r\n* Use event.reference instead of event.payment_id for transactions\r\n\r\n* Fix failing migration\r\n\r\n* Apply changes after review\r\n\r\n* Update scheme", "code": "def test_checkout_transactions_missing_permission(api_client, checkout):\n # given\n checkout.payment_transactions.create(\n status=\"Authorized\",\n type=\"Credit card\",\n reference=\"123\",\n currency=\"USD\",\n authorized_value=Decimal(\"15\"),\n available_actions=[TransactionAction.CAPTURE, TransactionAction.VOID],\n )\n query = QUERY_CHECKOUT_TRANSACTIONS\n variables = {\"token\": str(checkout.token)}\n\n # when\n response = api_client.post_graphql(query, variables)\n\n # then\n assert_no_permission(response)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 99, "n_words": 31, "vocab_size": 27, "complexity": 1, "nloc": 13, "token_counts": 78, "n_ast_nodes": 130, "n_identifiers": 23, "random_cut": "def test_checkout_transactions_missing_permission(api_client, checkout):\n # given\n checkout.payment_transactions.create(\n status=\"Authorized\",\n type=\"Credit card\",\n reference=\"123\",\n currency=\"USD\",\n authorized_value=Decimal(\"15\"),\n available_actions=[TransactionAction.CAPTURE, TransactionAction.VOID],\n )\n query = QUERY_CHECKOUT_TRANSACTIONS\n variables = {\"token\": str(checkout.token)}\n\n # when\n response = api_client." }, { "id": 41798, "commit_id": "e2c449e18bf47a6907b0d8e88b5673f2a9b45790", "repo": "seaborn", "path": "seaborn/_core/plot.py", "file_name": "plot.py", "fun_name": "save", "commit_message": "Improve how inline pngs get scaled when using a tight bbox", "code": "def save(self, fname, **kwargs) -> Plot:\n \n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 47, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 13, "token_counts": 28, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def save(self, fname, **kwargs) -> Plot:\n \n # TODO" }, { "id": 116197, "commit_id": "5b1cd41a6202873e49c9ec43c770cf7d1f700adb", "repo": "mindsdb", "path": "mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", "file_name": "sql_query.py", "fun_name": "execute_step", "commit_message": "keep datetype from predictor", "code": "def execute_step(self, step, steps_data):\n if type(step) == GetPredictorColumns:\n predictor_name = step.predictor.parts[-1]\n dn = self.datahub.get(self.mindsdb_database_name)\n columns = dn.get_table_columns(predictor_name)\n columns = [\n (column_name, column_name) for column_name in columns\n ]\n data = {\n 'values': [],\n 'columns': {\n (self.mindsdb_database_name, predictor_name, predictor_name): columns\n },\n 'tables': [(self.mindsdb_database_name, predictor_name, predictor_name)]\n }\n elif type(step) == GetTableColumns:\n table = step.table\n dn = self.datahub.get(step.namespace)\n ds_query = Select(from_table=Identifier(table), targets=[Star()])\n\n data, columns_info = dn.query(ds_query)\n\n table_alias = (self.database, table, table)\n\n data = {\n 'values': [],\n 'columns': {\n table_alias: columns_info\n },\n 'tables': [table_alias]\n }\n elif type(step) == FetchDataframeStep:\n data = self._fetch_dataframe_step(step)\n elif type(step) == UnionStep:\n raise ErNotSupportedYet('Union step is not implemented')\n # TODO add union support\n # left_data = steps_data[step.left.step_num]\n # right_data = steps_data[step.right.step_num]\n # data = left_data + right_data\n elif type(step) == MapReduceStep:\n try:\n if step.reduce != 'union':\n raise ErLogicError(f'Unknown MapReduceStep type: {step.reduce}')\n\n step_data = steps_data[step.values.step_num]\n vars = []\n step_data_values = step_data['values']\n for row in step_data_values:\n var_group = {}\n vars.append(var_group)\n for row_data in row.values():\n for name, value in row_data.items():\n if name[0] != '__mindsdb_row_id':\n var_group[name[1] or name[0]] = value\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': []\n }\n substep = step.step\n if type(substep) == FetchDataframeStep:\n query = substep.query\n for var_group in vars:\n markQueryVar(query.where)\n for name, value in var_group.items():\n replaceQueryVar(query.where, value, name)\n sub_data = self._fetch_dataframe_step(substep)\n if len(data['columns']) == 0:\n data['columns'] = sub_data['columns']\n if len(data['tables']) == 0:\n data['tables'] = sub_data['tables']\n data['values'].extend(sub_data['values'])\n unmarkQueryVar(query.where)\n elif type(substep) == MultipleSteps:\n data = self._multiple_steps_reduce(substep, vars)\n else:\n raise ErLogicError(f'Unknown step type: {step.step}')\n except Exception as e:\n raise SqlApiUnknownError(f'error in map reduce step: {e}') from e\n elif type(step) == MultipleSteps:\n if step.reduce != 'union':\n raise ErNotSupportedYet(f\"Only MultipleSteps with type = 'union' is supported. Got '{step.type}'\")\n data = None\n for substep in step.steps:\n subdata = self.execute_step(substep, steps_data)\n if data is None:\n data = subdata\n else:\n data['values'].extend(subdata['values'])\n elif type(step) == ApplyPredictorRowStep:\n try:\n predictor = '.'.join(step.predictor.parts)\n dn = self.datahub.get(self.mindsdb_database_name)\n where_data = step.row_dict\n\n data = dn.query(\n table=predictor,\n where_data=where_data\n )\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n\n table_name = get_preditor_alias(step, self.database)\n values = [{table_name: x} for x in data]\n columns = {table_name: []}\n if len(data) > 0:\n row = data[0]\n columns[table_name] = list(row.keys())\n # TODO else\n\n data = {\n 'values': values,\n 'columns': columns,\n 'tables': [table_name]\n }\n except Exception as e:\n if isinstance(e, SqlApiException):\n raise e\n else:\n raise SqlApiUnknownError(f'error in apply predictor row step: {e}') from e\n elif type(step) in (ApplyPredictorStep, ApplyTimeseriesPredictorStep):\n try:\n # set row_id\n data = steps_data[step.dataframe.step_num]\n row_id_col = ('__mindsdb_row_id', '__mindsdb_row_id')\n for table in data['columns']:\n data['columns'][table].append(row_id_col)\n\n row_count = len(data['values'])\n\n for i, row in enumerate(data['values']):\n for n, table_name in enumerate(row):\n row[table_name][row_id_col] = self.row_id + i + n * row_count\n # shift counter\n self.row_id += self.row_id + row_count * len(data['tables'])\n\n dn = self.datahub.get(self.mindsdb_database_name)\n predictor = '.'.join(step.predictor.parts)\n where_data = []\n for row in steps_data[step.dataframe.step_num]['values']:\n new_row = {}\n for table_name in row:\n keys_intersection = set(new_row) & set(row[table_name])\n if len(keys_intersection) > 0:\n raise ErLogicError(\n f'The predictor got two identical keys from different datasources: {keys_intersection}'\n )\n new_row.update(row[table_name])\n where_data.append(new_row)\n\n where_data = [{key[1]: value for key, value in row.items()} for row in where_data]\n\n is_timeseries = self.planner.predictor_metadata[predictor]['timeseries']\n _mdb_forecast_offset = None\n if is_timeseries:\n if '> LATEST' in self.query_str:\n # stream mode -- if > LATEST, forecast starts on inferred next timestamp\n _mdb_forecast_offset = 1\n elif '= LATEST' in self.query_str:\n # override: when = LATEST, forecast starts on last provided timestamp instead of inferred next time\n _mdb_forecast_offset = 0\n else:\n # normal mode -- emit a forecast ($HORIZON data points on each) for each provided timestamp\n _mdb_forecast_offset = None\n for row in where_data:\n if '__mdb_forecast_offset' not in row:\n row['__mdb_forecast_offset'] = _mdb_forecast_offset\n\n # for row in where_data:\n # for key in row:\n # if isinstance(row[key], datetime.date):\n # row[key] = str(row[key])\n\n table_name = get_preditor_alias(step, self.database)\n columns = {table_name: []}\n if len(where_data) == 0:\n # no data, don't run predictor\n cols = dn.get_table_columns(predictor) + ['__mindsdb_row_id']\n columns[table_name] = [(c, c) for c in cols]\n values = []\n else:\n data = dn.query(\n table=predictor,\n where_data=where_data\n )\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n\n values = [{table_name: x} for x in data]\n\n if len(data) > 0:\n row = data[0]\n columns[table_name] = list(row.keys())\n # TODO else\n\n data = {\n 'values': values,\n 'columns': columns,\n 'tables': [table_name],\n 'types': {table_name: self.model_types}\n }\n except Exception as e:\n raise SqlApiUnknownError(f'error in apply predictor step: {e}') from e\n elif type(step) == JoinStep:\n try:\n left_data = steps_data[step.left.step_num]\n right_data = steps_data[step.right.step_num]\n\n # FIXME https://github.com/mindsdb/mindsdb_sql/issues/136\n # is_timeseries = False\n # if True in [type(step) == ApplyTimeseriesPredictorStep for step in plan.steps]:\n # right_data = steps_data[step.left.step_num]\n # left_data = steps_data[step.right.step_num]\n # is_timeseries = True\n\n if step.query.condition is not None:\n raise ErNotSupportedYet('At this moment supported only JOIN without condition')\n if step.query.join_type.upper() not in ('LEFT JOIN', 'JOIN'):\n raise ErNotSupportedYet('At this moment supported only JOIN and LEFT JOIN')\n\n if len(left_data['tables']) == 0 or len(right_data['tables']) == 0:\n raise ErLogicError('Table for join is not found')\n\n if (\n len(left_data['tables']) != 1 or len(right_data['tables']) != 1\n or left_data['tables'][0] == right_data['tables'][0]\n ):\n raise ErNotSupportedYet('At this moment supported only JOIN of two different tables')\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': list(set(left_data['tables'] + right_data['tables'])),\n 'types': {}\n }\n\n for data_part in [left_data, right_data]:\n for table_name in data_part['columns']:\n if table_name not in data['columns']:\n data['columns'][table_name] = data_part['columns'][table_name]\n # keep types\n data['types'][table_name] = data_part.get('types', {}).get(table_name, {}).copy()\n else:\n data['columns'][table_name].extend(data_part['columns'][table_name])\n # keep types\n data['types'][table_name].update(data_part.get('types', {}).get(table_name, {}))\n for table_name in data['columns']:\n data['columns'][table_name] = list(set(data['columns'][table_name]))\n\n left_key = left_data['tables'][0]\n right_key = right_data['tables'][0]\n\n left_columns_map = OrderedDict()\n left_columns_map_reverse = OrderedDict()\n for i, column_name in enumerate(left_data['columns'][left_key]):\n left_columns_map[f'a{i}'] = column_name\n left_columns_map_reverse[column_name] = f'a{i}'\n\n right_columns_map = {}\n right_columns_map_reverse = {}\n for i, column_name in enumerate(right_data['columns'][right_key]):\n right_columns_map[f'b{i}'] = column_name\n right_columns_map_reverse[column_name] = f'b{i}'\n\n left_df_data = []\n for row in left_data['values']:\n row = row[left_key]\n left_df_data.append({left_columns_map_reverse[key]: value for key, value in row.items()})\n\n right_df_data = []\n for row in right_data['values']:\n row = row[right_key]\n right_df_data.append({right_columns_map_reverse[key]: value for key, value in row.items()})\n\n df_a = pd.DataFrame(left_df_data, columns=left_columns_map.keys())\n df_b = pd.DataFrame(right_df_data, columns=right_columns_map.keys())\n\n a_name = f'a{round(time.time() * 1000)}'\n b_name = f'b{round(time.time() * 1000)}'\n con = duckdb.connect(database=':memory:')\n con.register(a_name, df_a)\n con.register(b_name, df_b)\n resp_df = con.execute(f).fetchdf()\n con.unregister(a_name)\n con.unregister(b_name)\n con.close()\n\n resp_df = resp_df.replace({np.nan: None})\n resp_dict = resp_df.to_dict(orient='records')\n\n for row in resp_dict:\n new_row = {left_key: {}, right_key: {}}\n for key, value in row.items():\n if key.startswith('a'):\n new_row[left_key][left_columns_map[key]] = value\n else:\n new_row[right_key][right_columns_map[key]] = value\n data['values'].append(new_row)\n\n # remove all records with empty data from predictor from join result\n # otherwise there are emtpy records in the final result:\n # +------------+------------+-------+-----------+----------+\n # | time | time | state | pnew_case | new_case |\n # +------------+------------+-------+-----------+----------+\n # | 2020-10-21 | 2020-10-24 | CA | 0.0 | 5945.0 |\n # | 2020-10-22 | 2020-10-23 | CA | 0.0 | 6141.0 |\n # | 2020-10-23 | 2020-10-22 | CA | 0.0 | 2940.0 |\n # | 2020-10-24 | 2020-10-21 | CA | 0.0 | 3707.0 |\n # | NULL | 2020-10-20 | NULL | nan | nan |\n # | NULL | 2020-10-19 | NULL | nan | nan |\n # | NULL | 2020-10-18 | NULL | nan | nan |\n # | NULL | 2020-10-17 | NULL | nan | nan |\n # | NULL | 2020-10-16 | NULL | nan | nan |\n # +------------+------------+-------+-----------+----------+\n # 9 rows in set (2.07 sec)\n\n # if is_timeseries:\n # data_values = []\n # for row in data['values']:\n # for key in row:\n # if 'mindsdb' in key:\n # if not is_empty_prediction_row(row[key]):\n # data_values.append(row)\n # break\n # data['values'] = data_values\n except Exception as e:\n raise SqlApiUnknownError(f'error in join step: {e}') from e\n\n elif type(step) == FilterStep:\n step_data = steps_data[step.dataframe.step_num]\n\n # dicts to look up column and table\n column_idx = {}\n tables_idx = {}\n col_table_idx = {}\n\n # prepare columns for dataframe. column name contains table name\n cols = set()\n for table, col_list in step_data['columns'].items():\n _, t_name, t_alias = table\n\n tables_idx[t_name] = t_name\n tables_idx[t_alias] = t_name\n for column in col_list:\n # table_column\n c_name, c_alias = column\n\n col_name = f'{t_name}^{c_name}'\n cols.add(col_name)\n\n col_table_idx[col_name] = (table, column)\n column_idx[c_name] = t_name\n\n # prepare dict for dataframe\n result = []\n for row in step_data['values']:\n data_row = {}\n for table, col_list in step_data['columns'].items():\n for col in col_list:\n col_name = f'{table[1]}^{col[0]}'\n data_row[col_name] = row[table][col]\n result.append(data_row)\n\n df = pd.DataFrame(result, columns=list(cols))\n\n # analyze condition and change name of columns", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 6885, "n_words": 1312, "vocab_size": 496, "complexity": 151, "nloc": 513, "token_counts": 3772, "n_ast_nodes": 3598, "n_identifiers": 157, "random_cut": "def execute_step(self, step, steps_data):\n if type(step) == GetPredictorColumns:\n predictor_name = step.predictor.parts[-1]\n dn = self.datahub.get(self.mindsdb_database_name)\n columns = dn.get_table_columns(predictor_name)\n columns = [\n (column_name, column_name) for column_name in columns\n ]\n data = {\n 'values': [],\n 'columns': {\n (self.mindsdb_database_name, predictor_name, predictor_name): columns\n },\n 'tables': [(self.mindsdb_database_name, predictor_name, predictor_name)]\n }\n elif type(step) == GetTableColumns:\n table = step.table\n dn = self.datahub.get(step.namespace)\n ds_query = Select(from_table=Identifier(table), targets=[Star()])\n\n data, columns_info = dn.query(ds_query)\n\n table_alias = (self.database, table, table)\n\n data = {\n 'values': [],\n 'columns': {\n table_alias: columns_info\n },\n 'tables': [table_alias]\n }\n elif type(step) == FetchDataframeStep:\n data = self._fetch_dataframe_step(step)\n elif type(step) == UnionStep:\n raise ErNotSupportedYet('Union step is not implemented')\n # TODO add union support\n # left_data = steps_data[step.left.step_num]\n # right_data = steps_data[step.right.step_num]\n # data = left_data + right_data\n elif type(step) == MapReduceStep:\n try:\n if step.reduce != 'union':\n raise ErLogicError(f'Unknown MapReduceStep type: {step.reduce}')\n\n step_data = steps_data[step.values.step_num]\n vars = []\n step_data_values = step_data['values']\n for row in step_data_values:\n var_group = {}\n vars.append(var_group)\n for row_data in row.values():\n for name, value in row_data.items():\n if name[0] != '__mindsdb_row_id':\n var_group[name[1] or name[0]] = value\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': []\n }\n substep = step.step\n if type(substep) == FetchDataframeStep:\n query = substep.query\n for var_group in vars:\n markQueryVar(query.where)\n for name, value in var_group.items():\n replaceQueryVar(query.where, value, name)\n sub_data = self._fetch_dataframe_step(substep)\n if len(data['columns']) == 0:\n data['columns'] = sub_data['columns']\n if len(data['tables']) == 0:\n data['tables'] = sub_data['tables']\n data['values'].extend(sub_data['values'])\n unmarkQueryVar(query.where)\n elif type(substep) == MultipleSteps:\n data = self._multiple_steps_reduce(substep, vars)\n else:\n raise ErLogicError(f'Unknown step type: {step.step}')\n except Exception as e:\n raise SqlApiUnknownError(f'error in map reduce step: {e}') from e\n elif type(step) == MultipleSteps:\n if step.reduce != 'union':\n raise ErNotSupportedYet(f\"Only MultipleSteps with type = 'union' is supported. Got '{step.type}'\")\n data = None\n for substep in step.steps:\n subdata = self.execute_step(substep, steps_data)\n if data is None:\n data = subdata\n else:\n data['values'].extend(subdata['values'])\n elif type(step) == ApplyPredictorRowStep:\n try:\n predictor = '.'.join(step.predictor.parts)\n dn = self.datahub.get(self.mindsdb_database_name)\n where_data = step.row_dict\n\n data = dn.query(\n table=predictor,\n where_data=where_data\n )\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n\n table_name = get_preditor_alias(step, self.database)\n values = [{table_name: x} for x in data]\n columns = {table_name: []}\n if len(data) > 0:\n row = data[0]\n columns[table_name] = list(row.keys())\n # TODO else\n\n data = {\n 'values': values,\n 'columns': columns,\n 'tables': [table_name]\n }\n except Exception as e:\n if isinstance(e, SqlApiException):\n raise e\n else:\n raise SqlApiUnknownError(f'error in apply predictor row step: {e}') from e\n elif type(step) in (ApplyPredictorStep, ApplyTimeseriesPredictorStep):\n try:\n # set row_id\n data = steps_data[step.dataframe.step_num]\n row_id_col = ('__mindsdb_row_id', '__mindsdb_row_id')\n for table in data['columns']:\n data['columns'][table].append(row_id_col)\n\n row_count = len(data['values'])\n\n for i, row in enumerate(data['values']):\n for n, table_name in enumerate(row):\n row[table_name][row_id_col] = self.row_id + i + n * row_count\n # shift counter\n self.row_id += self.row_id + row_count * len(data['tables'])\n\n dn = self.datahub.get(self.mindsdb_database_name)\n predictor = '.'.join(step.predictor.parts)\n where_data = []\n for row in steps_data[step.dataframe.step_num]['values']:\n new_row = {}\n for table_name in row:\n keys_intersection = set(new_row) & set(row[table_name])\n if len(keys_intersection) > 0:\n raise ErLogicError(\n f'The predictor got two identical keys from different datasources: {keys_intersection}'\n )\n new_row.update(row[table_name])\n where_data.append(new_row)\n\n where_data = [{key[1]: value for key, value in row.items()} for row in where_data]\n\n is_timeseries = self.planner.predictor_metadata[predictor]['timeseries']\n _mdb_forecast_offset = None\n if is_timeseries:\n if '> LATEST' in self.query_str:\n # stream mode -- if > LATEST, forecast starts on inferred next timestamp\n _mdb_forecast_offset = 1\n elif '= LATEST' in self.query_str:\n # override: when = LATEST, forecast starts on last provided timestamp instead of inferred next time\n _mdb_forecast_offset = 0\n else:\n # normal mode -- emit a forecast ($HORIZON data points on each) for each provided timestamp\n _mdb_forecast_offset = None\n for row in where_data:\n if '__mdb_forecast_offset' not in row:\n row['__mdb_forecast_offset'] = _mdb_forecast_offset\n\n # for row in where_data:\n # for key in row:\n # if isinstance(row[key], datetime.date):\n # row[key] = str(row[key])\n\n table_name = get_preditor_alias(step, self.database)\n columns = {table_name: []}\n if len(where_data) == 0:\n # no data, don't run predictor\n cols = dn.get_table_columns(predictor) + ['__mindsdb_row_id']\n columns[table_name] = [(c, c) for c in cols]\n values = []\n else:\n data = dn.query(\n table=predictor,\n where_data=where_data\n )\n\n data = [{(key, key): value for key, value in row.items()} for row in data]\n\n values = [{table_name: x} for x in data]\n\n if len(data) > 0:\n row = data[0]\n columns[table_name] = list(row.keys())\n # TODO else\n\n data = {\n 'values': values,\n 'columns': columns,\n 'tables': [table_name],\n 'types': {table_name: self.model_types}\n }\n except Exception as e:\n raise SqlApiUnknownError(f'error in apply predictor step: {e}') from e\n elif type(step) == JoinStep:\n try:\n left_data = steps_data[step.left.step_num]\n right_data = steps_data[step.right.step_num]\n\n # FIXME https://github.com/mindsdb/mindsdb_sql/issues/136\n # is_timeseries = False\n # if True in [type(step) == ApplyTimeseriesPredictorStep for step in plan.steps]:\n # right_data = steps_data[step.left.step_num]\n # left_data = steps_data[step.right.step_num]\n # is_timeseries = True\n\n if step.query.condition is not None:\n raise ErNotSupportedYet('At this moment supported only JOIN without condition')\n if step.query.join_type.upper() not in ('LEFT JOIN', 'JOIN'):\n raise ErNotSupportedYet('At this moment supported only JOIN and LEFT JOIN')\n\n if len(left_data['tables']) == 0 or len(right_data['tables']) == 0:\n raise ErLogicError('Table for join is not found')\n\n if (\n len(left_data['tables']) != 1 or len(right_data['tables']) != 1\n or left_data['tables'][0] == right_data['tables'][0]\n ):\n raise ErNotSupportedYet('At this moment supported only JOIN of two different tables')\n\n data = {\n 'values': [],\n 'columns': {},\n 'tables': list(set(left_data['tables'] + right_data['tables'])),\n 'types': {}\n }\n\n for data_part in [left_data, right_data]:\n for table_name in data_part['columns']:\n if table_name not in data['columns']:\n data['columns'][table_name] = data_part['columns'][table_name]\n # keep types\n data['types'][table_name] = data_part.get('types', {}).get(table_name, {}).copy()\n else:\n data['columns'][table_name].extend(data_part['columns'][table_name])\n # keep types\n data['types'][table_name].update(data_part.get('types', {}).get(table_name, {}))\n for table_name in data['columns']:\n data['columns'][table_name] = list(set(data['columns'][table_name]))\n\n left_key = left_data['tables'][0]\n right_key = right_data['tables'][0]\n\n left_columns_map = OrderedDict()\n left_columns_map_reverse = OrderedDict()\n for i, column_name in enumerate(left_data['columns'][left_key]):\n left_columns_map[f'a{i}'] = column_name\n left_columns_map_reverse[column_name] = f'a{i}'\n\n right_columns_map = {}\n right_columns_map_reverse = {}\n for i, column_name in enumerate(right_data['columns'][right_key]):\n right_columns_map[f'b{i}'] = column_name\n right_columns_map_reverse[column_name] = f'b{i}'\n\n left_df_data = []\n for row in left_data['values']:\n row = row[left_key]\n left_df_data.append({left_columns_map_reverse[key]: value for key, value in row.items()})\n\n right_df_data = []\n for row in right_data['values']:\n row = row[right_key]\n right_df_data.append({right_columns_map_reverse[key]: value for key, value in row.items()})\n\n df_a = pd.DataFrame(left_df_data, columns=left_columns_map.keys())\n df_b = pd.DataFrame(right_df_data, columns=right_columns_map.keys())\n\n a_name = f'a{round(time.time() * 1000)}'\n b_name = f'b{round(time.time() * 1000)}'\n con = duckdb.connect(database=':memory:')\n con.register(a_name, df_a)\n con.register(b_name, df_b)\n resp_df = con.execute(f).fetchdf()\n con.unregister(a_name)\n con.unregister(b_name)\n con.close()\n\n resp_df = resp_df.replace({np.nan: None})\n resp_dict = resp_df.to_dict(orient='records')\n\n for row in resp_dict:\n new_row = {left_key: {}, right_key: {}}\n for key, value in row.items():\n if key.startswith('a'):\n new_row[left_key][left_columns_map[key]] = value\n else:\n new_row[right_key][right_columns_map[key]] = value\n data['values'].append(new_row)\n\n # remo" }, { "id": 276710, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/control_flow_util.py", "file_name": "control_flow_util.py", "fun_name": "InXlaContext", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def InXlaContext(graph):\n ctxt = graph._get_control_flow_context() # pylint: disable=protected-access\n return GetContainingXLAContext(ctxt) is not None\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def InXlaContext(graph):\n ctxt = graph._get_control_flow_context() # pylint: disable=protected-access\n return GetContainingXLAContext(ctxt) is not None\n\n" }, { "id": 21357, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py", "file_name": "shutil.py", "fun_name": "_destinsrc", "commit_message": "Vendor in pip 22.1.2", "code": "def _destinsrc(src, dst):\n src = abspath(src)\n dst = abspath(dst)\n if not src.endswith(os.path.sep):\n src += os.path.sep\n if not dst.endswith(os.path.sep):\n dst += os.path.sep\n return dst.startswith(src)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 23, "vocab_size": 16, "complexity": 3, "nloc": 8, "token_counts": 66, "n_ast_nodes": 106, "n_identifiers": 9, "random_cut": "def _destinsrc(src, dst):\n src = abspath(src)\n dst = abspath(dst)\n if not src.endswith(os.path.sep):\n src += os.path.sep\n if not dst.endswith(os.path.sep):\n dst += os.path.sep\n return dst.sta" }, { "id": 122281, "commit_id": "2246887f7b39f291c647332251b1a105e9784341", "repo": "jax", "path": "jaxlib/lapack.py", "file_name": "lapack.py", "fun_name": "orgqr_mhlo", "commit_message": "Add input-output aliasing annotations for LAPACK calls on CPU.\n\nPiperOrigin-RevId: 480156067", "code": "def orgqr_mhlo(dtype, a, tau):\n a_type = ir.RankedTensorType(a.type)\n dims = a_type.shape\n assert len(dims) >= 2\n m, n = dims[-2:]\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n b = 1\n for d in batch_dims:\n b *= d\n\n tau_dims = ir.RankedTensorType(tau.type).shape\n assert tau_dims[:-1] == dims[:-2], (tau.type, a.type)\n k = tau_dims[-1]\n\n if dtype == np.float32:\n fn = b\"lapack_sorgqr\"\n lwork = _lapack.lapack_sorgqr_workspace(m, n, k)\n elif dtype == np.float64:\n fn = b\"lapack_dorgqr\"\n lwork = _lapack.lapack_dorgqr_workspace(m, n, k)\n elif dtype == np.complex64:\n fn = b\"lapack_cungqr\"\n lwork = _lapack.lapack_cungqr_workspace(m, n, k)\n elif dtype == np.complex128:\n fn = b\"lapack_zungqr\"\n lwork = _lapack.lapack_zungqr_workspace(m, n, k)\n else:\n raise NotImplementedError(f\"Unsupported dtype {dtype}\")\n\n scalar_layout = []\n layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))\n i32_type = ir.IntegerType.get_signless(32)\n out = custom_call(\n fn,\n [\n a.type,\n ir.RankedTensorType.get(batch_dims, i32_type),\n ir.RankedTensorType.get([lwork], a_type.element_type),\n ],\n [_mhlo_s32(int(b)), _mhlo_s32(m), _mhlo_s32(n), _mhlo_s32(k),\n _mhlo_s32(lwork), a, tau],\n operand_layouts=[scalar_layout] * 5 + [\n layout,\n tuple(range(num_bd, -1, -1)),\n ],\n result_layouts=[\n layout,\n tuple(range(num_bd - 1, -1, -1)),\n [0],\n ],\n operand_output_aliases={5: 0},\n )\n return out[:2]\n\n\n# ?potrf: Cholesky decomposition\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 320, "n_words": 163, "vocab_size": 105, "complexity": 6, "nloc": 51, "token_counts": 394, "n_ast_nodes": 591, "n_identifiers": 48, "random_cut": "def orgqr_mhlo(dtype, a, tau):\n a_type = ir.RankedTensorType(a.type)\n dims = a_type.shape\n assert len(dims) >= 2\n m, n = dims[-2:]\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n b = 1\n for d in batch_dims:\n b *= d\n\n tau_dims = ir.RankedTensorType(tau.type).shape\n assert tau_dims[:-1] == dims[:-2], (tau.type, a.type)\n k = tau_dims[-1]\n\n if dtype == np.flo" }, { "id": 197072, "commit_id": "a0daf4e99d77c586fcc62143c84846a0a98bc363", "repo": "sympy", "path": "sympy/utilities/tests/test_iterables.py", "file_name": "test_iterables.py", "fun_name": "test_deprecated_iterables", "commit_message": "compat: add back deprecated stub files in iterables\n\nThe ordered and default_sort_key functions where removed from\nsympy.utilities.iterables in GH-22357 but no deprecated stub functions\nwere left behind. This commit adds and tests the stubs to ensure that\nanyone depending on importing these functions like\n\n from sympy.utilities.iterables import default_sort_key\n from sympy.utilities.iterables import ordered\n\nwill see a deprecation warning rather than an error.\n\nThe proper way to import these functions both before and after these\nchanges is:\n\n from sympy import default_sort_key\n from sympy import ordered", "code": "def test_deprecated_iterables():\n from sympy.utilities.iterables import default_sort_key, ordered\n with warns_deprecated_sympy():\n assert list(ordered([y, x])) == [x, y]\n with warns_deprecated_sympy():\n assert sorted([y, x], key=default_sort_key) == [x, y]\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 61, "n_ast_nodes": 96, "n_identifiers": 12, "random_cut": "def test_deprecated_iterables():\n from sympy" }, { "id": 311861, "commit_id": "9f5d77e0df957c20a2af574d706140786f0a551a", "repo": "core", "path": "homeassistant/components/homekit_controller/media_player.py", "file_name": "media_player.py", "fun_name": "source", "commit_message": "Add missing type hints to homekit_controller (#65368)", "code": "def source(self) -> str | None:\n \n active_identifier = self.service.value(CharacteristicsTypes.ACTIVE_IDENTIFIER)\n if not active_identifier:\n return None\n\n this_accessory = self._accessory.entity_map.aid(self._aid)\n this_tv = this_accessory.services.iid(self._iid)\n\n input_source = this_accessory.services.first(\n service_type=ServicesTypes.INPUT_SOURCE,\n characteristics={CharacteristicsTypes.IDENTIFIER: active_identifier},\n parent_service=this_tv,\n )\n char = input_source[CharacteristicsTypes.CONFIGURED_NAME]\n return char.value\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 140, "n_words": 33, "vocab_size": 28, "complexity": 2, "nloc": 14, "token_counts": 95, "n_ast_nodes": 149, "n_identifiers": 27, "random_cut": "def source(self) -> str | None:\n \n active_identifier = self.service.value(CharacteristicsTypes.ACTIVE_IDENTIFIER)\n if not active_identifier:\n return None\n\n this_accessory = self._accessory.entity_map.aid(self._aid)\n this_tv = this_accessory.services.iid(self._iid)\n\n input_so" }, { "id": 322794, "commit_id": "93cae49c0c572b5c1ac972759140fbe924b0374d", "repo": "PaddleNLP", "path": "examples/model_interpretation/evaluation/accuracy/mrc_f1_evaluate.py", "file_name": "mrc_f1_evaluate.py", "fun_name": "read_model_prediction", "commit_message": "Add NLP model interpretation (#1752)\n\n* upload NLP interpretation\r\n\r\n* fix problems and relocate project\r\n\r\n* remove abandoned picture\r\n\r\n* remove abandoned picture\r\n\r\n* fix dead link in README\r\n\r\n* fix dead link in README\r\n\r\n* fix code style problems\r\n\r\n* fix CR round 1\r\n\r\n* remove .gitkeep files\r\n\r\n* fix code style\r\n\r\n* fix file encoding problem\r\n\r\n* fix code style\r\n\r\n* delete duplicated files due to directory rebuild\r\n\r\n* fix CR round 2\r\n\r\n* fix code style\r\n\r\n* fix ernie tokenizer\r\n\r\n* fix code style\r\n\r\n* fix problem from CR round 1\r\n\r\n* fix bugs\r\n\r\n* fix README\r\n\r\n* remove duplicated files\r\n\r\n* deal with diff of old and new tokenizer results\r\n\r\n* fix CR round 4\r\n\r\n* fix code style\r\n\r\n* add missing dependence\r\n\r\n* fix broken import path\r\n\r\n* move some data file to cloud\r\n\r\n* MRC upper case to lower case\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: binlinquge \r\nCo-authored-by: Guo Sheng ", "code": "def read_model_prediction(file_path):\n f = open(file_path, 'r')\n predict = {}\n for l in f.readlines():\n ins = json.loads(l)\n predict[ins['id']] = ins\n f.close()\n return predict\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 50, "n_words": 22, "vocab_size": 17, "complexity": 2, "nloc": 8, "token_counts": 50, "n_ast_nodes": 84, "n_identifiers": 11, "random_cut": "def read_model_prediction(file_path):\n f = open(file_path, 'r')\n predict = {}\n for l in f.readlines():\n ins = json.loads" }, { "id": 141102, "commit_id": "b5bc2b93c33f0f475af69dd6eca656dcf264612d", "repo": "ray", "path": "rllib/utils/exploration/tests/test_explorations.py", "file_name": "test_explorations.py", "fun_name": "test_ddpg", "commit_message": "[RLlib] Move all remaining algos into `algorithms` directory. (#25366)", "code": "def test_ddpg(self):\n # Switch off random timesteps at beginning. We want to test actual\n # GaussianNoise right away.\n config = ddpg.DEFAULT_CONFIG.copy()\n config[\"exploration_config\"][\"random_timesteps\"] = 0\n do_test_explorations(\n ddpg.DDPG,\n \"Pendulum-v1\",\n config,\n np.array([0.0, 0.1, 0.0]),\n expected_mean_action=0.0,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 129, "n_words": 33, "vocab_size": 31, "complexity": 1, "nloc": 10, "token_counts": 59, "n_ast_nodes": 82, "n_identifiers": 11, "random_cut": "def test_ddpg(self):\n # Switch off random timesteps at beginning. We want to test actual\n # GaussianNoise right away.\n config = ddpg.DEFAULT_CONF" }, { "id": 292283, "commit_id": "1bbc1f5f55de29bef86edbf7e504298c3d51bdc8", "repo": "core", "path": "tests/helpers/test_entity_registry.py", "file_name": "test_entity_registry.py", "fun_name": "test_invalid_entity_category_str", "commit_message": "Validate in split_entity_id (#66835)", "code": "async def test_invalid_entity_category_str(hass, registry, caplog):\n \n entry = er.RegistryEntry(\n entity_id=\"light.kitchen\",\n unique_id=\"5678\",\n platform=\"hue\",\n entity_category=\"invalid\",\n )\n\n assert entry.entity_category is None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 57, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 8, "token_counts": 39, "n_ast_nodes": 67, "n_identifiers": 11, "random_cut": "async def test_invalid_entity_category_str(hass, registry, caplog):\n \n entry = er.RegistryEntry(\n entity_id=\"light.kitchen\",\n " }, { "id": 30569, "commit_id": "17a5b8b43c6afe4455bb9baa436b6046186e5cd2", "repo": "OCRmyPDF", "path": "tests/test_validation.py", "file_name": "test_validation.py", "fun_name": "test_report_file_size", "commit_message": "Refactor reporting of optimization failures", "code": "def test_report_file_size(tmp_path, caplog):\n in_ = tmp_path / 'a.pdf'\n out = tmp_path / 'b.pdf'\n pdf = pikepdf.new()\n pdf.save(in_)\n pdf.save(out)\n opts = make_opts(output_type='pdf')\n vd.report_output_file_size(opts, in_, out)\n assert caplog.text == ''\n caplog.clear()\n\n waste_of_space = b'Dummy' * 5000\n pdf.Root.Dummy = waste_of_space\n pdf.save(in_)\n pdf.Root.Dummy2 = waste_of_space + waste_of_space\n pdf.save(out)\n\n vd.report_output_file_size(opts, in_, out, ['The optional dependency...'])\n assert 'optional dependency' in caplog.text\n caplog.clear()\n\n vd.report_output_file_size(opts, in_, out, [])\n assert 'No reason' in caplog.text\n caplog.clear()\n\n opts = make_opts(in_, out, optimize=0, output_type='pdf')\n vd.report_output_file_size(opts, in_, out, [\"Optimization was disabled.\"])\n assert 'disabled' in caplog.text\n caplog.clear()\n\n", "url": "https://github.com/ocrmypdf/OCRmyPDF.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 154, "n_words": 83, "vocab_size": 48, "complexity": 1, "nloc": 25, "token_counts": 189, "n_ast_nodes": 311, "n_identifiers": 21, "random_cut": "def test_report_file_size(tmp_path, caplog):\n in_ = tmp_path / 'a.pdf'\n out = tmp_path / 'b.pdf'\n pdf = pikepdf.new()\n pdf.save(in_)\n pdf.save(out)\n opts = make_opts(output_type='pdf')\n vd.report_output_file_size(opts, in_, out)\n assert caplog.text == ''\n caplog.clear()\n\n waste_of_space = b'Dummy' * 5000\n pdf.Root.Dummy = waste_of_space\n pdf.save(in_)\n p" }, { "id": 79632, "commit_id": "8691b199672c1b9406a5a5da220e48b0ca9198b6", "repo": "wagtail", "path": "wagtail/snippets/tests/test_snippets.py", "file_name": "test_snippets.py", "fun_name": "setUpTestData", "commit_message": "Make all usage reports use the reference index", "code": "def setUpTestData(cls):\n super().setUpTestData()\n management.call_command(\"rebuild_references_index\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 17, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def setUpTestData(cls):\n " }, { "id": 13268, "commit_id": "107631e955b21db8a4ddb3bee02130de3650d032", "repo": "jina", "path": "tests/integration/instrumentation/__init__.py", "file_name": "__init__.py", "fun_name": "spans_with_error", "commit_message": "feat(instrumentation): add OpenTelemetry tracing and metrics with basic configurations (#5175)", "code": "def spans_with_error(spans):\n error_spans = []\n for span in spans:\n for tag in span['tags']:\n if 'otel.status_code' == tag.get('key', '') and 'ERROR' == tag.get(\n 'value', ''\n ):\n error_spans.append(span)\n return error_spans\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 95, "n_words": 28, "vocab_size": 24, "complexity": 5, "nloc": 9, "token_counts": 53, "n_ast_nodes": 95, "n_identifiers": 7, "random_cut": "def spans_with_error(spans):\n error_spans = []\n for span in spans:\n for tag in span['tags']:\n if 'otel.status_code' == tag.get('key', '') and 'ERROR' == tag.get(\n 'value', ''\n ):\n " }, { "id": 82859, "commit_id": "d5478765d38210addf474dd73faf0d103052027a", "repo": "examples", "path": "imagenet/main.py", "file_name": "main.py", "fun_name": "train", "commit_message": "If the dataset is not exactly divisible by world_size, the validation accuracy is incorrect. We solve this problem with an auxiliary validation set. (#980)", "code": "def train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.display(i + 1)\n\n", "url": "https://github.com/pytorch/examples.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 357, "n_words": 134, "vocab_size": 98, "complexity": 5, "nloc": 31, "token_counts": 300, "n_ast_nodes": 486, "n_identifiers": 43, "random_cut": "def train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':.4e')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, data_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n if torch.cuda.is_available():\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n en" }, { "id": 7867, "commit_id": "5ba06c861fb8e01729af4185f1a056f09482cdee", "repo": "ludwig", "path": "tests/integration_tests/test_visualization_api.py", "file_name": "test_visualization_api.py", "fun_name": "test_roc_curves_vis_api", "commit_message": "Added conditional check for UNK token insertion into category feature vocab (#2429)\n\n* Added conditional check for UNK token\r\n\r\n* Fixing test failures\r\n\r\n* Fixing more tests\r\n\r\n* Fixing hyperopt test failures\r\n\r\n* Resolve issues with server related tests\r\n\r\n* Fix serving related failures\r\n\r\n* Fix last test\r\n\r\n* Added better logging and comments\r\n\r\n* Revert to old else case code for clarity\r\n\r\n* Bump fsspec", "code": "def test_roc_curves_vis_api(experiment_to_use):\n \n experiment = experiment_to_use\n probabilities = experiment.probabilities\n viz_outputs = (\"pdf\", \"png\")\n positive_label = 1\n with TemporaryDirectory() as tmpvizdir:\n for viz_output in viz_outputs:\n vis_output_pattern_pdf = tmpvizdir + f\"/*.{viz_output}\"\n visualize.roc_curves(\n [probabilities, probabilities],\n experiment.ground_truth,\n experiment.ground_truth_metadata,\n experiment.output_feature_name,\n positive_label,\n model_names=[\"Model1\", \"Model2\"],\n output_directory=tmpvizdir,\n file_format=viz_output,\n )\n figure_cnt = glob.glob(vis_output_pattern_pdf)\n assert 1 == len(figure_cnt)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 247, "n_words": 47, "vocab_size": 41, "complexity": 2, "nloc": 20, "token_counts": 98, "n_ast_nodes": 162, "n_identifiers": 21, "random_cut": "def test_roc_curves_vis_api(experiment_to_use):\n \n experiment = experiment_to_use\n probabilities = experiment.pr" }, { "id": 28216, "commit_id": "43765a4c1fc029a529827dd86a2d1912ac4c98b6", "repo": "saleor", "path": "saleor/tests/fixtures.py", "file_name": "fixtures.py", "fun_name": "product_with_two_variants", "commit_message": "Fix ORM crash when generating hundreds of search vector in SQL (#10261) (#10282)\n\nThis fixes a recursion error crash when generating hundreds of `SearchVector` for a single SQL update statement.\r\n\r\nKnown issue: PostgreSQL may reject the statement when thousands of `SearchVector` are being generated with the following error (fixed by #10279):\r\n```\r\ndjango.db.utils.OperationalError: stack depth limit exceeded\r\nHINT: Increase the configuration parameter \"max_stack_depth\" (currently 2048kB), after ensuring the platform's stack depth limit is adequate.\r\n```", "code": "def product_with_two_variants(product_type, category, warehouse, channel_USD):\n product = Product.objects.create(\n name=\"Test product with two variants\",\n slug=\"test-product-with-two-variant\",\n product_type=product_type,\n category=category,\n )\n\n ProductChannelListing.objects.create(\n product=product,\n channel=channel_USD,\n is_published=True,\n visible_in_listings=True,\n available_for_purchase_at=datetime.datetime(1999, 1, 1, tzinfo=pytz.UTC),\n )\n\n variants = [\n ProductVariant(\n product=product,\n sku=f\"Product variant #{i}\",\n )\n for i in (1, 2)\n ]\n ProductVariant.objects.bulk_create(variants)\n variants_channel_listing = [\n ProductVariantChannelListing(\n variant=variant,\n channel=channel_USD,\n price_amount=Decimal(10),\n cost_price_amount=Decimal(1),\n currency=channel_USD.currency_code,\n )\n for variant in variants\n ]\n ProductVariantChannelListing.objects.bulk_create(variants_channel_listing)\n Stock.objects.bulk_create(\n [\n Stock(\n warehouse=warehouse,\n product_variant=variant,\n quantity=10,\n )\n for variant in variants\n ]\n )\n product.search_vector = FlatConcat(*prepare_product_search_vector_value(product))\n product.save(update_fields=[\"search_vector\"])\n\n return product\n\n\n@pytest.fixture", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 396, "n_words": 79, "vocab_size": 54, "complexity": 4, "nloc": 46, "token_counts": 209, "n_ast_nodes": 318, "n_identifiers": 43, "random_cut": "def product_with_two_variants(product_type, category, warehouse, channel_USD):\n product = Product.objects.create(\n name=\"Test product with two variants\",\n slug=\"test-product-with-two-variant\",\n product_type=product_type,\n category=category,\n )\n\n ProductChannelListing.objects.create(\n product=product,\n channel=channel_USD,\n is_published=True,\n visible_in_listings=True,\n available_for_purchase_at=datetime.datetime(1999, 1, 1, tzinfo=pytz.UTC),\n )\n\n variants = [\n ProductVariant(\n product=product,\n sku=f\"Product variant #{i}\",\n )\n for i in (1, 2)\n ]\n ProductVariant.objects" }, { "id": 207175, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_inlines/tests.py", "file_name": "tests.py", "fun_name": "test_inline_change_m2m_view_only_perm", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_inline_change_m2m_view_only_perm(self):\n permission = Permission.objects.get(\n codename=\"view_book\", content_type=self.book_ct\n )\n self.user.user_permissions.add(permission)\n response = self.client.get(self.author_change_url)\n # View-only inlines.\n self.assertIs(\n response.context[\"inline_admin_formset\"].has_view_permission, True\n )\n self.assertIs(\n response.context[\"inline_admin_formset\"].has_add_permission, False\n )\n self.assertIs(\n response.context[\"inline_admin_formset\"].has_change_permission, False\n )\n self.assertIs(\n response.context[\"inline_admin_formset\"].has_delete_permission, False\n )\n self.assertContains(response, \"

    Author-book relationships

    \")\n self.assertContains(\n response,\n '',\n html=True,\n )\n # The field in the inline is read-only.\n self.assertContains(response, \"

    %s

    \" % self.book)\n self.assertNotContains(\n response,\n '',\n html=True,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 335, "n_words": 64, "vocab_size": 47, "complexity": 1, "nloc": 31, "token_counts": 152, "n_ast_nodes": 249, "n_identifiers": 25, "random_cut": "def test_inline_change_m2m_view_only_perm(self):\n permission = Permission.objects.get(\n codename=\"view_book\", content_type=self.book_ct\n )\n self.user.user_permissions.add(permission)\n response = self.client.get(self.author_change_url)\n # View-only inlines.\n self.assertIs(\n response.context[\"inline_admin_formset\"].has_view_permission, True\n )\n self.assertIs(\n response.context[\"inline_admin_formset\"].has_add_permission, False\n )\n self.assertIs(\n response.context[\"inline_admin_formset\"].has_change_permission, False\n )\n self.assertIs(\n response.context[\"inline_admin_formset\"].has_delete_permission, False\n )\n self.assertContains(response, \"

    Author-book relationships

    \")\n self.assertContains(\n response,\n '\r\nCo-authored-by: makseq-ubnt ", "code": "def get_serializer_context(self):\n context = super().get_serializer_context()\n project_id = self.request.data.get('project')\n if project_id:\n context['project'] = generics.get_object_or_404(Project, pk=project_id)\n return context\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 16, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 46, "n_ast_nodes": 78, "n_identifiers": 12, "random_cut": "def get_serializer_context(self):\n context = super().get_serializer_context()\n project_id = self.request.data.get('project')\n if project_id:\n context['project'] = generics.get_object_or_404(Project, pk=project_id)\n return contex" }, { "id": 180143, "commit_id": "9cd4c3121fc351da57491594279c6d3abbb45482", "repo": "gradio", "path": "test/test_components.py", "file_name": "test_components.py", "fun_name": "test_component_functions", "commit_message": "textbox-autoheight (#1009)\n\n* textbox-autoheight\r\n- add max-lines to textbox\r\n\r\n* textbox-autoheight\r\n- reformat\r\n\r\n* textbox-autoheight\r\n- add demo\r\n\r\n* textbox-autoheight\r\n- tweaks on scripts\r\n\r\n* textbox-autoheight\r\n- fix tests\r\n\r\n* textbox-autoheight\r\n- fix tests\r\n\r\n* textbox-autoheight\r\n- fix tests\r\n\r\n* textbox-autoheight\r\n- convert default max_height from 100 to 20\r\n\r\n* textbox-autoheight\r\n- convert default max_height from 100 to 20", "code": "def test_component_functions(self):\n \n text_input = gr.Textbox()\n self.assertEqual(text_input.preprocess(\"Hello World!\"), \"Hello World!\")\n self.assertEqual(text_input.preprocess_example(\"Hello World!\"), \"Hello World!\")\n self.assertEqual(text_input.postprocess(None), None)\n self.assertEqual(text_input.postprocess(\"Ali\"), \"Ali\")\n self.assertEqual(text_input.postprocess(2), \"2\")\n self.assertEqual(text_input.postprocess(2.14), \"2.14\")\n self.assertEqual(text_input.serialize(\"Hello World!\", True), \"Hello World!\")\n with tempfile.TemporaryDirectory() as tmpdirname:\n to_save = text_input.save_flagged(\n tmpdirname, \"text_input\", \"Hello World!\", None\n )\n self.assertEqual(to_save, \"Hello World!\")\n restored = text_input.restore_flagged(tmpdirname, to_save, None)\n self.assertEqual(restored, \"Hello World!\")\n\n with self.assertWarns(DeprecationWarning):\n _ = gr.Textbox(type=\"number\")\n\n self.assertEqual(\n text_input.tokenize(\"Hello World! Gradio speaking.\"),\n (\n [\"Hello\", \"World!\", \"Gradio\", \"speaking.\"],\n [\n \"World! Gradio speaking.\",\n \"Hello Gradio speaking.\",\n \"Hello World! speaking.\",\n \"Hello World! Gradio\",\n ],\n None,\n ),\n )\n text_input.interpretation_replacement = \"unknown\"\n self.assertEqual(\n text_input.tokenize(\"Hello World! Gradio speaking.\"),\n (\n [\"Hello\", \"World!\", \"Gradio\", \"speaking.\"],\n [\n \"unknown World! Gradio speaking.\",\n \"Hello unknown Gradio speaking.\",\n \"Hello World! unknown speaking.\",\n \"Hello World! Gradio unknown\",\n ],\n None,\n ),\n )\n self.assertEqual(\n text_input.get_template_context(),\n {\n \"lines\": 1,\n \"max_lines\": 20,\n \"placeholder\": None,\n \"default_value\": \"\",\n \"name\": \"textbox\",\n \"label\": None,\n \"css\": {},\n \"interactive\": None,\n },\n )\n self.assertIsInstance(text_input.generate_sample(), str)\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 844, "n_words": 139, "vocab_size": 79, "complexity": 1, "nloc": 59, "token_counts": 317, "n_ast_nodes": 551, "n_identifiers": 27, "random_cut": "def test_component_functions(self):\n \n text_input = gr.Textbox()\n self.assertEqual(text_input.preprocess(\"Hello World!\"), \"Hello World!\")\n self.assertEqual(text_input.preprocess_example(\"Hello World!\"), \"Hello World!\")\n self.assertEqual(text_input.postprocess(None), None)\n self.assertEqual(text_input.postprocess(\"Ali\"), \"Ali\")\n self.assertEqual(text_input.postprocess(2), \"2\")\n self.assertEqual(text_input.postprocess(2.14), \"2.14\")\n self.assertEqual(text_input.serialize(\"Hello World!\", True), \"Hello World!\")\n with tempfile.TemporaryDirectory() as tmpdirname:\n to_save = text_input.save_flagged(\n tmpdirname, \"text_input\", \"Hello World!\", None\n )\n self.assertEqual(to_save, \"Hello World!\")\n restored = text_input.restore_flagged(tmpdirname, to_save, None)\n self.assertEqual(restored, \"Hello World!\")\n\n with self.assertWarns(DeprecationWarning):\n _ = gr.Textbox(type=\"number\")\n\n self.assertEqual(\n text_input.tokenize(\"Hello World! Gradio speaking.\"),\n (\n [\"Hello\", \"World!\", \"Gradio\", \"speaking.\"],\n [\n \"World! Gradio speaking.\",\n \"Hello Gradio speaking.\",\n \"Hello World! speaking.\",\n \"Hello World! Gradio\",\n ],\n None,\n ),\n )\n text_input.interpretation_replacement = \"unknown\"\n self.assertEqual(\n text_input.tokenize(\"Hello World! Gradio speaking.\"),\n (\n [\"Hello\", \"World!\", \"Gradio\", \"speaking.\"],\n [\n \"unknown World! Gradio speaking.\",\n \"Hello unknown Gradio speaking.\",\n \"Hello World! unknown speaking.\",\n \"Hello World! Gradio unknown\",\n ],\n None,\n ),\n )\n self.assertEqual(\n text_input.get_template_context(),\n {\n \"lines\": 1,\n \"max_lines\": 20,\n \"placeholder\": None,\n \"default_val" }, { "id": 289816, "commit_id": "64eb316908f26c023d7f787b3d655c968e08cdad", "repo": "core", "path": "tests/components/risco/test_binary_sensor.py", "file_name": "test_binary_sensor.py", "fun_name": "test_error_on_connect", "commit_message": "Add alarmed binary sensor to Risco integration (#77315)\n\nCo-authored-by: Martin Hjelmare ", "code": "async def test_error_on_connect(hass, connect_with_error, local_config_entry):\n \n await hass.config_entries.async_setup(local_config_entry.entry_id)\n await hass.async_block_till_done()\n registry = er.async_get(hass)\n assert not registry.async_is_registered(FIRST_ENTITY_ID)\n assert not registry.async_is_registered(SECOND_ENTITY_ID)\n assert not registry.async_is_registered(FIRST_ALARMED_ENTITY_ID)\n assert not registry.async_is_registered(SECOND_ALARMED_ENTITY_ID)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 48, "n_words": 24, "vocab_size": 17, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 112, "n_identifiers": 16, "random_cut": "async def test_error_on_connect(hass, connect_with_error, local_config_entry):\n \n await hass.config_entries.async_setup(local_config_entry.entry_id)\n await hass.async_block_till_done()\n registry = er.async_get(hass)\n assert not registry.async_is_registered(FIRST_ENTITY_ID)\n assert not registry.async_is_registered(SECOND_ENTITY_ID)\n assert not registry.async_is_registered(FIRST_ALARMED_ENTITY_ID)\n assert not registry.async_is_registered(SECOND_ALARMED_ENTITY_ID)\n\n" }, { "id": 40100, "commit_id": "5dfa6b0782803cb0635119ee1dcf8775dd76c8a7", "repo": "dash", "path": "components/dash-table/tests/selenium/test_markdown.py", "file_name": "test_markdown.py", "fun_name": "test_mark002_emphasized_text", "commit_message": ":hocho: deprecated find_element(s)_by_css_selector", "code": "def test_mark002_emphasized_text(test):\n test.start_server(get_app())\n\n target = test.table(\"table\")\n\n target.column(1).sort(1)\n assert (\n target.cell(0, \"markdown-italics\")\n .find_inside(\".dash-cell-value > p > em\")\n .get_attribute(\"innerHTML\")\n == \"1\"\n )\n\n target.column(1).sort(1)\n assert (\n target.cell(0, \"markdown-italics\")\n .find_inside(\".dash-cell-value > p > em\")\n .get_attribute(\"innerHTML\")\n == \"98\"\n )\n\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 113, "n_words": 34, "vocab_size": 20, "complexity": 1, "nloc": 17, "token_counts": 89, "n_ast_nodes": 160, "n_identifiers": 11, "random_cut": "def test_mark002_emphasized_text(test):\n test.start_server(get_app())\n\n target = test.table(\"table\")\n\n target.column(1).sort(1)\n assert (\n target.cell(0, \"markdown-italics\")\n .find_inside(\".dash-cell-value > p > em\")\n .get_attribute(\"innerHTML\")\n == \"1\"\n )\n\n target.column(1).sort(1)\n assert (\n target.cell(0, \"markdown-italics\")\n .find_inside(\".dash-cell-value > p > em\")\n .get_attribute(\"innerHTML\")\n == \"98\"\n )\n\n" }, { "id": 74313, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_page_model.py", "file_name": "test_page_model.py", "fun_name": "test_update_aliases", "commit_message": "Reformat with black", "code": "def test_update_aliases(self):\n event_page = EventPage.objects.get(url_path=\"/home/events/christmas/\")\n alias = event_page.create_alias(update_slug=\"new-event-page\")\n alias_alias = alias.create_alias(update_slug=\"new-event-page-2\")\n\n # Update the title and add a speaker\n event_page.title = \"Updated title\"\n event_page.draft_title = \"A different draft title\"\n event_page.speakers.add(\n EventPageSpeaker(\n first_name=\"Ted\",\n last_name=\"Crilly\",\n )\n )\n event_page.save()\n\n # Nothing should've happened yet\n alias.refresh_from_db()\n alias_alias.refresh_from_db()\n self.assertEqual(alias.title, \"Christmas\")\n self.assertEqual(alias_alias.title, \"Christmas\")\n self.assertEqual(alias.speakers.count(), 1)\n self.assertEqual(alias_alias.speakers.count(), 1)\n\n PageLogEntry.objects.all().delete()\n\n event_page.update_aliases()\n\n # Check that the aliases have been updated\n alias.refresh_from_db()\n alias_alias.refresh_from_db()\n self.assertEqual(alias.title, \"Updated title\")\n self.assertEqual(alias_alias.title, \"Updated title\")\n self.assertEqual(alias.speakers.count(), 2)\n self.assertEqual(alias_alias.speakers.count(), 2)\n\n # Draft titles shouldn't update as alias pages do not have drafts\n self.assertEqual(alias.draft_title, \"Updated title\")\n self.assertEqual(alias_alias.draft_title, \"Updated title\")\n\n # Check log entries were created\n self.assertTrue(\n PageLogEntry.objects.filter(page=alias, action=\"wagtail.publish\").exists()\n )\n self.assertTrue(\n PageLogEntry.objects.filter(\n page=alias_alias, action=\"wagtail.publish\"\n ).exists()\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 437, "n_words": 107, "vocab_size": 74, "complexity": 1, "nloc": 37, "token_counts": 268, "n_ast_nodes": 461, "n_identifiers": 31, "random_cut": "def test_update_aliases(self):\n event_page = EventPage.objects.get(url_path=\"/home/events/christmas/\")\n alias = event_page.create_alias(update_slug=\"new-event-page\")\n alias_alias = alias.create_alias(update_slug=\"new-event-page-2\")\n\n # Update the title and add a speaker\n event_page.title = \"Updated title\"\n event_page.draft_title = \"A different draft title\"\n event_page.speakers.add(\n EventPageSpeaker(\n first_name=\"Ted\",\n last_name=\"Crilly\",\n )\n )\n event_page.save()\n\n # Nothing should've happened yet\n alias.refresh_from_db()\n alias_alias.refresh_from_db()\n self.assertEqual(alias.title, \"Christmas\")\n self.assertEqual(alias_alias.title, \"Christmas\")\n self.assertEqual(alias.speakers.count(), 1)\n self.assertEqual(alias_alias.speakers.count(), 1)\n\n PageLogEntry.objects.all().delete()\n\n event_page.update_aliases()\n\n # Check that the aliases have been updated\n alias.refresh_from_db()\n alias_alias.refresh_from_db()\n self.assertEqual(alias.title, \"Updated title\")\n self.assertEqual(alias_alias.title, \"Updated title\")\n self.assertEqual(alias.speakers.count(), 2)\n self.assertEqual(alias_alias.speakers.count(), 2)\n\n # Draft titles shouldn't update as alias pages do not have drafts\n self.assertEqual(alias.draft_title, \"Updated title\")\n self.assertEqual(alias_alias.draft_title, \"Updated title\")\n\n # Check log entries were created\n self.assertTrue(\n PageLogEntry.objects.filter(page=alias, action=\"wagtail.publish\").exists()\n )\n self.assertTrue(\n PageLogEntry.ob" }, { "id": 244446, "commit_id": "2cc631f7656258dec0d12bcce459f5fe3f781b68", "repo": "mmdetection", "path": "tests/test_datasets/test_coco.py", "file_name": "test_coco.py", "fun_name": "test_coco_dataset_without_filter_cfg", "commit_message": "Add Transforms", "code": "def test_coco_dataset_without_filter_cfg(self):\n # test CocoDataset without filter_cfg\n dataset = CocoDataset(\n data_prefix=dict(img='imgs'),\n ann_file='tests/data/coco_sample.json',\n pipeline=[])\n self.assertEqual(len(dataset), 2)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 68, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 38, "n_ast_nodes": 64, "n_identifiers": 11, "random_cut": "def test_coco_dataset_without_filter_cfg(self):\n # test CocoDataset without filter_cfg\n dataset = CocoDataset(\n data_prefix=dict(img='imgs'),\n ann_file='tests/data/coco_sample.json',\n pipeline=[])\n self.assertEqual(len(dataset)," }, { "id": 122054, "commit_id": "405a2310ce2db325a05ba292944ec1a23e463b6c", "repo": "jax", "path": "jax/experimental/pjit.py", "file_name": "pjit.py", "fun_name": "_python_pjit_helper", "commit_message": "Implement pjit fast path in cpp for jax.Array inputs\n\nPiperOrigin-RevId: 475988677", "code": "def _python_pjit_helper(infer_params, *args, **kwargs):\n args_flat, _, params, _, out_tree, _ = infer_params(*args, **kwargs)\n for arg in args_flat:\n _check_arg(arg)\n out_flat = pjit_p.bind(*args_flat, **params)\n outs = tree_unflatten(out_tree, out_flat)\n return outs, out_flat, out_tree\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 30, "vocab_size": 27, "complexity": 2, "nloc": 7, "token_counts": 66, "n_ast_nodes": 97, "n_identifiers": 15, "random_cut": "def _python_pjit_helper(infer_params, *args, **kwargs):\n args_flat, _, params, _, out_tree, _ = infer_params(*args, **kwargs)\n for arg in args_flat:\n _check_arg(arg)\n out_flat = pjit_p.bind(*args_flat, **p" }, { "id": 212429, "commit_id": "f0ea9eefccb1a1cc9b072dec7512916591eba88c", "repo": "bokeh", "path": "tests/unit/bokeh/plotting/test_contour.py", "file_name": "test_contour.py", "fun_name": "test_contour_colorbar", "commit_message": "Add contouring (#12020)\n\n* Contour demonstration\r\n\r\n* Use MultiLine with nans for contour lines\r\n\r\n* Add ContourRenderer\r\n\r\n* Try out ContourRenderer.data idea\r\n\r\n* Support different ways of specifying palettes\r\n\r\n* Contour ColorBar\r\n\r\n* Line, fill and hatch visuals on ContourColorBar\r\n\r\n* Refactor color bar classes\r\n\r\n* Use contour levels in color bar\r\n\r\n* Horizontal contour color bar\r\n\r\n* Support rendering just lines or just fill\r\n\r\n* figure.contour function\r\n\r\n* Contour level validation\r\n\r\n* Add tests, typing, docstrings\r\n\r\n* Fix codebase errors\r\n\r\n* Minimal test deps\r\n\r\n* Fix different python and js defaults for ContourRenderer\r\n\r\n* Address review comments\r\n\r\n* More review comments addressed\r\n\r\n* Fix ContourRenderer defaults and contour dataclass to dict\r\n\r\n* Update python unit tests to use dataclasses\r\n\r\n* isort fix", "code": "def test_contour_colorbar(xyz_levels):\n x, y, z, levels = xyz_levels\n cr = from_contour(x, y, z, levels, fill_color=\"red\", line_color=\"black\")\n color_bar = cr.construct_color_bar()\n assert color_bar.levels == levels\n assert color_bar.fill_renderer == cr.fill_renderer\n assert color_bar.line_renderer == cr.line_renderer\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n@pytest.fixture", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 50, "n_words": 37, "vocab_size": 27, "complexity": 1, "nloc": 7, "token_counts": 63, "n_ast_nodes": 106, "n_identifiers": 16, "random_cut": "def test_contour_colorbar(xyz_levels):\n x, y, z, levels = xyz_levels\n cr = from_contour(x, y, z, levels, fill_color=\"red\", line_color=\"black\")\n color_bar = cr.construct_color_bar()\n assert color_bar.levels == levels\n assert color_bar.fill_renderer == cr.fill_renderer\n assert color_bar.line_renderer " }, { "id": 66418, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/production_plan/production_plan.py", "file_name": "production_plan.py", "fun_name": "get_sales_orders", "commit_message": "style: format code with black", "code": "def get_sales_orders(self):\n\tso_filter = item_filter = \"\"\n\tbom_item = \"bom.item = so_item.item_code\"\n\n\tdate_field_mapper = {\n\t\t\"from_date\": (\">=\", \"so.transaction_date\"),\n\t\t\"to_date\": (\"<=\", \"so.transaction_date\"),\n\t\t\"from_delivery_date\": (\">=\", \"so_item.delivery_date\"),\n\t\t\"to_delivery_date\": (\"<=\", \"so_item.delivery_date\"),\n\t}\n\n\tfor field, value in date_field_mapper.items():\n\t\tif self.get(field):\n\t\t\tso_filter += f\" and {value[1]} {value[0]} %({field})s\"\n\n\tfor field in [\"customer\", \"project\", \"sales_order_status\"]:\n\t\tif self.get(field):\n\t\t\tso_field = \"status\" if field == \"sales_order_status\" else field\n\t\t\tso_filter += f\" and so.{so_field} = %({field})s\"\n\n\tif self.item_code and frappe.db.exists(\"Item\", self.item_code):\n\t\tbom_item = self.get_bom_item() or bom_item\n\t\titem_filter += \" and so_item.item_code = %(item_code)s\"\n\n\topen_so = frappe.db.sql(\n\t\tf,\n\t\tself.as_dict(),\n\t\tas_dict=1,\n\t)\n\n\treturn open_so\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 67, "n_words": 93, "vocab_size": 59, "complexity": 9, "nloc": 38, "token_counts": 158, "n_ast_nodes": 329, "n_identifiers": 20, "random_cut": "def get_sales_orders(self):\n\tso_filter = item_filter = \"\"\n\tbom_item = \"bom.item = so_item.item_code\"\n\n\tdate_field_mapper = {\n\t\t\"from_date\"" }, { "id": 208083, "commit_id": "1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc", "repo": "celery", "path": "t/integration/test_canvas.py", "file_name": "test_canvas.py", "fun_name": "test_nested_group_chord_counting_chord", "commit_message": "Canvas Header Stamping (#7384)\n\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Redo header stamping (#7341)\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Omer Katz \r\n\r\n* Added stamping mechanism\r\n\r\n* Manual stamping improved\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Add comma.\r\n\r\n* Moved groups to stamps\r\n\r\n* Fixed chord and added test for that\r\n\r\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* Fixed lint and elements\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* type -> isinstance\r\n\r\n* Added stamping mechanism\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Manual stamping improved\r\n\r\n* fail_ci_if_error uncommented\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Changes\r\n\r\n* Add comma.\r\n\r\n* Fixed chord and added test for that\r\n\r\n* canvas.py fixed\r\n\r\n* Test chord.py fixed\r\n\r\n* Fixed stamped_headers\r\n\r\n* collections import fixed\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* collections import fixed\r\n\r\n* Update celery/backends/base.py\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* ampq.py fixed\r\n\r\n* Refrain from using deprecated import path.\r\n\r\n* Fix test_complex_chain regression.\r\n\r\nWhenever we stamp a group we need to freeze it first if it wasn't already frozen.\r\nSomewhere along the line, the group id changed because we were freezing twice.\r\nThis commit places the stamping operation after preparing the chain's steps which fixes the problem somehow.\r\n\r\nWe don't know why yet.\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed issues with maybe_list. Add documentation\r\n\r\n* Fixed potential issue with integration tests\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed test_generator issues\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Update docs/userguide/canvas.rst\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* Fixed Couchbase\r\n\r\n* Better stamping intro\r\n\r\n* New GroupVisitor example\r\n\r\n* Adjust documentation.\r\n\r\nCo-authored-by: Naomi Elstein \r\nCo-authored-by: Omer Katz \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Asif Saif Uddin \r\nCo-authored-by: Omer Katz ", "code": "def test_nested_group_chord_counting_chord(self, manager):\n try:\n manager.app.backend.ensure_chords_allowed()\n except NotImplementedError as e:\n raise pytest.skip(e.args[0])\n\n gchild_count = 42\n gchild_sig = chord(\n (identity.si(1337),) * gchild_count, identity.si(31337),\n )\n child_chord = chord((gchild_sig,), identity.s())\n group_sig = group((child_chord,))\n res = group_sig.delay()\n # Wait for the result to land and confirm its value is as expected\n assert res.get(timeout=TIMEOUT) == [[31337]]\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 152, "n_words": 50, "vocab_size": 45, "complexity": 2, "nloc": 13, "token_counts": 108, "n_ast_nodes": 171, "n_identifiers": 25, "random_cut": "def test_nested_group_chord_counting_chord(self, manager):\n try:\n manager.app.backend.ensure_chords_allowed()\n except NotImplementedError as e:\n raise pytest.skip(e.args[0])\n\n gchild_count = 42\n gchild_sig = chord(\n (identity.si(1337),) * gchild_count, identity.si(31337),\n )\n child_chord = chord((gchild_sig,), identity.s())\n group_sig = group((child_chord,))\n res = group_sig.delay()\n # Wait for the result to land and confirm its value is as expected\n assert " }, { "id": 59157, "commit_id": "7092f0403a97154d3c3909e3fcd95e7db5776246", "repo": "prefect", "path": "tests/test_serializers.py", "file_name": "test_serializers.py", "fun_name": "test_simple_roundtrip_with_builtin_pickle", "commit_message": "Remove deep serialization from `PickleSerializer` and add tests (#7044)", "code": "def test_simple_roundtrip_with_builtin_pickle(self, data):\n serializer = PickleSerializer(picklelib=\"pickle\")\n serialized = serializer.dumps(data)\n assert serializer.loads(serialized) == data\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 9, "random_cut": "def test_simple_roundtrip_with_builtin_pickle(self, data):\n serializer = PickleSerializer(picklelib=\"pickle\")\n serialized = serializer.du" }, { "id": 143802, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/models/torch/torch_action_dist.py", "file_name": "torch_action_dist.py", "fun_name": "deterministic_sample", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def deterministic_sample(self) -> TensorType:\n arr = [torch.argmax(cat.probs, -1) for cat in self.cats]\n sample_ = torch.stack(arr, dim=1)\n if isinstance(self.action_space, gym.spaces.Box):\n sample_ = torch.reshape(sample_, [-1] + list(self.action_space.shape))\n self.last_sample = sample_\n return sample_\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 75, "n_words": 30, "vocab_size": 24, "complexity": 3, "nloc": 7, "token_counts": 83, "n_ast_nodes": 127, "n_identifiers": 21, "random_cut": "def deterministic_sample(self) -> TensorType:\n arr = [torch.argmax(cat.probs, -1) for cat in self.cats]\n " }, { "id": 257267, "commit_id": "f8e02310bf0dfbd1ab79a1c3c73434e0aeba4f4b", "repo": "haystack", "path": "test/test_pipeline_yaml.py", "file_name": "test_pipeline_yaml.py", "fun_name": "test_load_yaml_incompatible_version", "commit_message": "Validate YAML files without loading the nodes (#2438)\n\n* Remove BasePipeline and make a module for RayPipeline\r\n\r\n* Can load pipelines from yaml, plenty of issues left\r\n\r\n* Extract graph validation logic into _add_node_to_pipeline_graph & refactor load_from_config and add_node to use it\r\n\r\n* Fix pipeline tests\r\n\r\n* Move some tests out of test_pipeline.py and create MockDenseRetriever\r\n\r\n* myoy and pylint (silencing too-many-public-methods)\r\n\r\n* Fix issue found in some yaml files and in schema files\r\n\r\n* Fix paths to YAML and fix some typos in Ray\r\n\r\n* Fix eval tests\r\n\r\n* Simplify MockDenseRetriever\r\n\r\n* Fix Ray test\r\n\r\n* Accidentally pushed merge coinflict, fixed\r\n\r\n* Typo in schemas\r\n\r\n* Typo in _json_schema.py\r\n\r\n* Slightly reduce noisyness of version validation warnings\r\n\r\n* Fix version logs tests\r\n\r\n* Fix version logs tests again\r\n\r\n* remove seemingly unused file\r\n\r\n* Add check and test to avoid adding the same node to the pipeline twice\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Revert config to pipeline_config\r\n\r\n* Remo0ve unused import\r\n\r\n* Complete reverting to pipeline_config\r\n\r\n* Some more stray config=\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Feedback\r\n\r\n* Move back other_nodes tests into pipeline tests temporarily\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fixing tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fixing ray and standard pipeline tests\r\n\r\n* Rename colliding load() methods in dense retrievers and faiss\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix mypy on ray.py as well\r\n\r\n* Add check for no root node\r\n\r\n* Fix tests to use load_from_directory and load_index\r\n\r\n* Try to workaround the disabled add_node of RayPipeline\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix Ray test\r\n\r\n* Fix FAISS tests\r\n\r\n* Relax class check in _add_node_to_pipeline_graph\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Try to fix mypy in ray.py\r\n\r\n* unused import\r\n\r\n* Try another fix for Ray\r\n\r\n* Fix connector tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix ray\r\n\r\n* Update Documentation & Code Style\r\n\r\n* use BaseComponent.load() in pipelines/base.py\r\n\r\n* another round of feedback\r\n\r\n* stray BaseComponent.load()\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix FAISS tests too\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>\r\nCo-authored-by: tstadel <60758086+tstadel@users.noreply.github.com>", "code": "def test_load_yaml_incompatible_version(tmp_path, caplog):\n with open(tmp_path / \"tmp_config.yml\", \"w\") as tmp_file:\n tmp_file.write(\n \n )\n with caplog.at_level(logging.WARNING):\n Pipeline.load_from_yaml(path=tmp_path / \"tmp_config.yml\")\n assert \"version '1.1.0'\" in caplog.text\n assert f\"Haystack {haystack.__version__}\" in caplog.text\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 79, "n_words": 27, "vocab_size": 22, "complexity": 1, "nloc": 20, "token_counts": 58, "n_ast_nodes": 113, "n_identifiers": 15, "random_cut": "def test_load_yaml_incompatible_version(tmp_path, caplog):\n with open(tmp_path / \"tmp_config.yml\", \"w\") as tmp_file:\n tmp_file.write(\n \n )\n with caplog.at_level(logging.WARNING):\n Pipeline.load_from_yaml(path=tmp_path / \"tmp_config.yml\")\n assert \"version '1.1.0'\" in caplog.text\n " }, { "id": 217040, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/encodings/iso8859_4.py", "file_name": "iso8859_4.py", "fun_name": "getregentry", "commit_message": "add python 3.10.4 for windows", "code": "def getregentry():\n return codecs.CodecInfo(\n name='iso8859-4',\n encode=Codec().encode,\n decode=Codec().decode,\n incrementalencoder=IncrementalEncoder,\n incrementaldecoder=IncrementalDecoder,\n streamreader=StreamReader,\n streamwriter=StreamWriter,\n )\n\n\n### Decoding Table\n\ndecoding_table = (\n '\\x00' # 0x00 -> NULL\n '\\x01' # 0x01 -> START OF HEADING\n '\\x02' # 0x02 -> START OF TEXT\n '\\x03' # 0x03 -> END OF TEXT\n '\\x04' # 0x04 -> END OF TRANSMISSION\n '\\x05' # 0x05 -> ENQUIRY\n '\\x06' # 0x06 -> ACKNOWLEDGE\n '\\x07' # 0x07 -> BELL\n '\\x08' # 0x08 -> BACKSPACE\n '\\t' # 0x09 -> HORIZONTAL TABULATION\n '\\n' # 0x0A -> LINE FEED\n '\\x0b' # 0x0B -> VERTICAL TABULATION\n '\\x0c' # 0x0C -> FORM FEED\n '\\r' # 0x0D -> CARRIAGE RETURN\n '\\x0e' # 0x0E -> SHIFT OUT\n '\\x0f' # 0x0F -> SHIFT IN\n '\\x10' # 0x10 -> DATA LINK ESCAPE\n '\\x11' # 0x11 -> DEVICE CONTROL ONE\n '\\x12' # 0x12 -> DEVICE CONTROL TWO\n '\\x13' # 0x13 -> DEVICE CONTROL THREE\n '\\x14' # 0x14 -> DEVICE CONTROL FOUR\n '\\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE\n '\\x16' # 0x16 -> SYNCHRONOUS IDLE\n '\\x17' # 0x17 -> END OF TRANSMISSION BLOCK\n '\\x18' # 0x18 -> CANCEL\n '\\x19' # 0x19 -> END OF MEDIUM\n '\\x1a' # 0x1A -> SUBSTITUTE\n '\\x1b' # 0x1B -> ESCAPE\n '\\x1c' # 0x1C -> FILE SEPARATOR\n '\\x1d' # 0x1D -> GROUP SEPARATOR\n '\\x1e' # 0x1E -> RECORD SEPARATOR\n '\\x1f' # 0x1F -> UNIT SEPARATOR\n ' ' # 0x20 -> SPACE\n '!' # 0x21 -> EXCLAMATION MARK\n '\"' # 0x22 -> QUOTATION MARK\n '#' # 0x23 -> NUMBER SIGN\n '$' # 0x24 -> DOLLAR SIGN\n '%' # 0x25 -> PERCENT SIGN\n '&' # 0x26 -> AMPERSAND\n \"'\" # 0x27 -> APOSTROPHE\n '(' # 0x28 -> LEFT PARENTHESIS\n ')' # 0x29 -> RIGHT PARENTHESIS\n '*' # 0x2A -> ASTERISK\n '+' # 0x2B -> PLUS SIGN\n ',' # 0x2C -> COMMA\n '-' # 0x2D -> HYPHEN-MINUS\n '.' # 0x2E -> FULL STOP\n '/' # 0x2F -> SOLIDUS\n '0' # 0x30 -> DIGIT ZERO\n '1' # 0x31 -> DIGIT ONE\n '2' # 0x32 -> DIGIT TWO\n '3' # 0x33 -> DIGIT THREE\n '4' # 0x34 -> DIGIT FOUR\n '5' # 0x35 -> DIGIT FIVE\n '6' # 0x36 -> DIGIT SIX\n '7' # 0x37 -> DIGIT SEVEN\n '8' # 0x38 -> DIGIT EIGHT\n '9' # 0x39 -> DIGIT NINE\n ':' # 0x3A -> COLON\n ';' # 0x3B -> SEMICOLON\n '<' # 0x3C -> LESS-THAN SIGN\n '=' # 0x3D -> EQUALS SIGN\n '>' # 0x3E -> GREATER-THAN SIGN\n '?' # 0x3F -> QUESTION MARK\n '@' # 0x40 -> COMMERCIAL AT\n 'A' # 0x41 -> LATIN CAPITAL LETTER A\n 'B' # 0x42 -> LATIN CAPITAL LETTER B\n 'C' # 0x43 -> LATIN CAPITAL LETTER C\n 'D' # 0x44 -> LATIN CAPITAL LETTER D\n 'E' # 0x45 -> LATIN CAPITAL LETTER E\n 'F' # 0x46 -> LATIN CAPITAL LETTER F\n 'G' # 0x47 -> LATIN CAPITAL LETTER G\n 'H' # 0x48 -> LATIN CAPITAL LETTER H\n 'I' # 0x49 -> LATIN CAPITAL LETTER I\n 'J' # 0x4A -> LATIN CAPITAL LETTER J\n 'K' # 0x4B -> LATIN CAPITAL LETTER K\n 'L' # 0x4C -> LATIN CAPITAL LETTER L\n 'M' # 0x4D -> LATIN CAPITAL LETTER M\n 'N' # 0x4E -> LATIN CAPITAL LETTER N\n 'O' # 0x4F -> LATIN CAPITAL LETTER O\n 'P' # 0x50 -> LATIN CAPITAL LETTER P\n 'Q' # 0x51 -> LATIN CAPITAL LETTER Q\n 'R' # 0x52 -> LATIN CAPITAL LETTER R\n 'S' # 0x53 -> LATIN CAPITAL LETTER S\n 'T' # 0x54 -> LATIN CAPITAL LETTER T\n 'U' # 0x55 -> LATIN CAPITAL LETTER U\n 'V' # 0x56 -> LATIN CAPITAL LETTER V\n 'W' # 0x57 -> LATIN CAPITAL LETTER W\n 'X' # 0x58 -> LATIN CAPITAL LETTER X\n 'Y' # 0x59 -> LATIN CAPITAL LETTER Y\n 'Z' # 0x5A -> LATIN CAPITAL LETTER Z\n '[' # 0x5B -> LEFT SQUARE BRACKET\n '\\\\' # 0x5C -> REVERSE SOLIDUS\n ']' # 0x5D -> RIGHT SQUARE BRACKET\n '^' # 0x5E -> CIRCUMFLEX ACCENT\n '_' # 0x5F -> LOW LINE\n '`' # 0x60 -> GRAVE ACCENT\n 'a' # 0x61 -> LATIN SMALL LETTER A\n 'b' # 0x62 -> LATIN SMALL LETTER B\n 'c' # 0x63 -> LATIN SMALL LETTER C\n 'd' # 0x64 -> LATIN SMALL LETTER D\n 'e' # 0x65 -> LATIN SMALL LETTER E\n 'f' # 0x66 -> LATIN SMALL LETTER F\n 'g' # 0x67 -> LATIN SMALL LETTER G\n 'h' # 0x68 -> LATIN SMALL LETTER H\n 'i' # 0x69 -> LATIN SMALL LETTER I\n 'j' # 0x6A -> LATIN SMALL LETTER J\n 'k' # 0x6B -> LATIN SMALL LETTER K\n 'l' # 0x6C -> LATIN SMALL LETTER L\n 'm' # 0x6D -> LATIN SMALL LETTER M\n 'n' # 0x6E -> LATIN SMALL LETTER N\n 'o' # 0x6F -> LATIN SMALL LETTER O\n 'p' # 0x70 -> LATIN SMALL LETTER P\n 'q' # 0x71 -> LATIN SMALL LETTER Q\n 'r' # 0x72 -> LATIN SMALL LETTER R\n 's' # 0x73 -> LATIN SMALL LETTER S\n 't' # 0x74 -> LATIN SMALL LETTER T\n 'u' # 0x75 -> LATIN SMALL LETTER U\n 'v' # 0x76 -> LATIN SMALL LETTER V\n 'w' # 0x77 -> LATIN SMALL LETTER W\n 'x' # 0x78 -> LATIN SMALL LETTER X\n 'y' # 0x79 -> LATIN SMALL LETTER Y\n 'z' # 0x7A -> LATIN SMALL LETTER Z\n '{' # 0x7B -> LEFT CURLY BRACKET\n '|' # 0x7C -> VERTICAL LINE\n '}' # 0x7D -> RIGHT CURLY BRACKET\n '~' # 0x7E -> TILDE\n '\\x7f' # 0x7F -> DELETE\n '\\x80' # 0x80 -> \n '\\x81' # 0x81 -> \n '\\x82' # 0x82 -> \n '\\x83' # 0x83 -> \n '\\x84' # 0x84 -> \n '\\x85' # 0x85 -> \n '\\x86' # 0x86 -> \n '\\x87' # 0x87 -> \n '\\x88' # 0x88 -> \n '\\x89' # 0x89 -> \n '\\x8a' # 0x8A -> \n '\\x8b' # 0x8B -> \n '\\x8c' # 0x8C -> \n '\\x8d' # 0x8D -> \n '\\x8e' # 0x8E -> \n '\\x8f' # 0x8F -> \n '\\x90' # 0x90 -> \n '\\x91' # 0x91 -> \n '\\x92' # 0x92 -> \n '\\x93' # 0x93 -> \n '\\x94' # 0x94 -> \n '\\x95' # 0x95 -> \n '\\x96' # 0x96 -> \n '\\x97' # 0x97 -> \n '\\x98' # 0x98 -> \n '\\x99' # 0x99 -> \n '\\x9a' # 0x9A -> \n '\\x9b' # 0x9B -> \n '\\x9c' # 0x9C -> \n '\\x9d' # 0x9D -> \n '\\x9e' # 0x9E -> \n '\\x9f' # 0x9F -> \n '\\xa0' # 0xA0 -> NO-BREAK SPACE\n '\\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK\n '\\u0138' # 0xA2 -> LATIN SMALL LETTER KRA\n '\\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA\n '\\xa4' # 0xA4 -> CURRENCY SIGN\n '\\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE\n '\\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA\n '\\xa7' # 0xA7 -> SECTION SIGN\n '\\xa8' # 0xA8 -> DIAERESIS\n '\\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON\n '\\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON\n '\\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA\n '\\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE\n '\\xad' # 0xAD -> SOFT HYPHEN\n '\\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON\n '\\xaf' # 0xAF -> MACRON\n '\\xb0' # 0xB0 -> DEGREE SIGN\n '\\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK\n '\\u02db' # 0xB2 -> OGONEK\n '\\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA\n '\\xb4' # 0xB4 -> ACUTE ACCENT\n '\\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE\n '\\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA\n '\\u02c7' # 0xB7 -> CARON\n '\\xb8' # 0xB8 -> CEDILLA\n '\\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON\n '\\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON\n '\\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA\n '\\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE\n '\\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG\n '\\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON\n '\\u014b' # 0xBF -> LATIN SMALL LETTER ENG\n '\\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON\n '\\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE\n '\\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX\n '\\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE\n '\\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS\n '\\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE\n '\\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE\n '\\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK\n '\\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON\n '\\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE\n '\\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK\n '\\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS\n '\\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE\n '\\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE\n '\\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX\n '\\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON\n '\\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE\n '\\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA\n '\\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON\n '\\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA\n '\\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX\n '\\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE\n '\\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS\n '\\xd7' # 0xD7 -> MULTIPLICATION SIGN\n '\\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE\n '\\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK\n '\\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE\n '\\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX\n '\\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS\n '\\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE\n '\\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON\n '\\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S\n '\\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON\n '\\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE\n '\\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX\n '\\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE\n '\\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS\n '\\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE\n '\\xe6' # 0xE6 -> LATIN SMALL LETTER AE\n '\\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK\n '\\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON\n '\\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE\n '\\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK\n '\\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS\n '\\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE\n '\\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE\n '\\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX\n '\\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON\n '\\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE\n '\\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA\n '\\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON\n '\\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA\n '\\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX\n '\\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE\n '\\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS\n '\\xf7' # 0xF7 -> DIVISION SIGN\n '\\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE\n '\\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK\n '\\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE\n '\\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX\n '\\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS\n '\\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE\n '\\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON\n '\\u02d9' # 0xFF -> DOT ABOVE\n)\n\n### Encoding table\nencoding_table=codecs.charmap_build(decoding_table)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 4232, "n_words": 1945, "vocab_size": 677, "complexity": 1, "nloc": 10, "token_counts": 46, "n_ast_nodes": 1278, "n_identifiers": 18, "random_cut": "def getregentry():\n return codecs.CodecInfo(\n name='iso8859-4',\n encode=Codec().encode,\n decode=Codec().decode,\n incrementalencoder=IncrementalEncoder,\n incrementaldecoder=IncrementalDecoder,\n streamreader=StreamReader,\n streamwriter=StreamWriter,\n )\n\n\n### Decoding Table\n\ndecoding_table = (\n '\\x00' # 0x00 -> NULL\n '\\x01' # 0x01 -> START OF HEADING\n '\\x02' # 0x02 -> START OF TEXT\n '\\x03' # 0x03 -> END OF TEXT\n '\\x04' # 0x04 -> END OF TRANSMISSION\n '\\x05' # 0x05 -> ENQUIRY\n '\\x06' # 0x06 -> ACKNOWLEDGE\n '\\x07' # 0x07 -> BELL\n '\\x08' # 0x08 -> BACKSPACE\n '\\t' # 0x09 -> HORIZONTAL TABULATION\n '\\n' # 0x0A -> LINE FEED\n '\\x0b' # 0x0B -> VERTICAL TABULATION\n '\\x0c' # 0x0C -> FORM FEED\n '\\r' # 0x0D -> CARRIAGE RETURN\n '\\x0e' # 0x0E -> SHIFT OUT\n '\\x0f' # 0x0F -> SHIFT IN\n '\\x10' # 0x10 -> DATA LINK ESCAPE\n '\\x11' # 0x11 -> DEVICE CONTROL ONE\n '\\x12' # 0x12 -> DEVICE CONTROL TWO\n '\\x13' # 0x13 -> DEVICE CONTROL THREE\n '\\x14' # 0x14 -> DEVICE CONTROL FOUR\n '\\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE\n '\\x16' # 0x16 -> SYNCHRONOUS IDLE\n '\\x17' # 0x17 -> END OF TRANSMISSION BLOCK\n '\\x18' # 0x18 -> CANCEL\n '\\x19' # 0x19 -> END OF MEDIUM\n '\\x1a' # 0x1A -> SUBSTITUTE\n '\\x1b' # 0x1B -> ESCAPE\n '\\x1c' # 0x1C -> FILE SEPARATOR\n '\\x1d' # 0x1D -> GROUP SEPARATOR\n '\\x1e' # 0x1E -> RECORD SEPARATOR\n '\\x1f' # 0x1F -> UNIT SEPARATOR\n ' ' # 0x20 -> SPACE\n '!' # 0x21 -> EXCLAMATION MARK\n '\"' # 0x22 -> QUOTATION MARK\n '#' # 0x23 -> NUMBER SIGN\n '$' # 0x24 -> DOLLAR SIGN\n '%' # 0x25 -> PERCENT SIGN\n '&' # 0x26 -> AMPERSAND\n \"'\" # 0x27 -> APOSTROPHE\n '(' # 0x28 -> LEFT PARENTHESIS\n ')' # 0x29 -> RIGHT PARENTHESIS\n '*' # 0x2A -> ASTERISK\n '+' # 0x2B -> PLUS SIGN\n ',' # 0x2C -> COMMA\n '-' # 0x2D -> HYPHEN-MINUS\n '.' # 0x2E -> FULL STOP\n '/' # 0x2F -> SOLIDUS\n '0' # 0x30 -> DIGIT ZERO\n '1' # 0x31 -> DIGIT ONE\n '2' # 0x32 -> DIGIT TWO\n '3' # 0x33 -> DIGIT THREE\n '4' # 0x34 -> DIGIT FOUR\n '5' # 0x35 -> DIGIT FIVE\n '6' # 0x36 -> DIGIT SIX\n '7' # 0x37 -> DIGIT SEVEN\n '8' # 0x38 -> DIGIT EIGHT\n '9' # 0x39 -> DIGIT NINE\n ':' # 0x3A -> COLON\n ';' # 0x3B -> SEMICOLON\n '<' # 0x3C -> LESS-THAN SIGN\n '=' # 0x3D -> EQUALS SIGN\n '>' # 0x3E -> GREATER-THAN SIGN\n '?' # 0x3F -> QUESTION MARK\n '@' # 0x40 -> COMMERCIAL AT\n 'A' # 0x41 -> LATIN CAPITAL LETTER A\n 'B' # 0x42 -> LATIN CAPITAL LETTER B\n 'C' # 0x43 -> LATIN CAPITAL LETTER C\n 'D' # 0x44 -> LATIN CAPITAL LETTER D\n 'E' # 0x45 -> LATIN CAPITAL LETTER E\n 'F' # 0x46 -> LATIN CAPITAL LETTER F\n 'G' # 0x47 -> LATIN CAPITAL LETTER G\n 'H' # 0x48 -> LATIN CAPITAL LETTER H\n 'I' # 0x49 -> LATIN CAPITAL LETTER I\n 'J' # 0x4A -> LATIN CAPITAL LETTER J\n 'K' # 0x4B -> LATIN CAPITAL LETTER K\n 'L' # 0x4C -> LATIN CAPITAL LETTER L\n 'M' # 0x4D -> LATIN CAPITAL LETTER M\n 'N' # 0x4E -> LATIN CAPITAL LETTER N\n 'O' # 0x4F -> LATIN CAPITAL LETTER O\n 'P' # 0x50 -> LATIN CAPITAL LETTER P\n 'Q' # 0x51 -> LATIN CAPITAL LETTER Q\n 'R' # 0x52 -> LATIN CAPITAL LETTER R\n 'S' # 0x53 -> LATIN CAPITAL LETTER S\n 'T' # 0x54 -> LATIN CAPITAL LETTER T\n 'U' # 0x55 -> LATIN CAPITAL LETTER U\n 'V' # 0x56 -> LATIN CAPITAL LETTER V\n 'W' # 0x57 -> LATIN CAPITAL LETTER W\n 'X' # 0x58 -> LATIN CAPITAL LETTER X\n 'Y' # 0x59 -> LATIN CAPITAL LETTER Y\n 'Z' # 0x5A -> LATIN CAPITAL LETTER Z\n '[' # 0x5B -> LEFT SQUARE BRACKET\n '\\\\' # 0x5C -> REVERSE SOLIDUS\n ']' # 0x5D -> RIGHT SQUARE BRACKET\n '^' # 0x5E -> CIRCUMFLEX ACCENT\n '_' # 0x5F -> LOW LINE\n '`' # 0x60 -> GRAVE ACCENT\n 'a' # 0x61 -> LATIN SMALL LETTER A\n 'b' # 0x62 -> LATIN SMALL LETTER B\n 'c' # 0x63 -> LATIN SMALL LETTER C\n 'd' # 0x64 -> LATIN SMALL LETTER D\n 'e' # 0x65 -> LATIN SMALL LETTER E\n 'f' # 0x66 -> LATIN SMALL LETTER F\n 'g' # 0x67 -> LATIN SMALL LETTER G\n 'h' # 0x68 -> LATIN SMALL LETTER H\n 'i' # 0x69 -> LATIN SMALL LETTER I\n 'j' # 0x6A -> LATIN SMALL LETTER J\n 'k' # 0x6B -> LATIN SMALL LETTER K\n 'l' # 0x6C -> LATIN SMALL LETTER L\n 'm' # 0x6D -> LATIN SMALL LETTER M\n 'n' # 0x6E -> LATIN SMALL LETTER N\n 'o' # 0x6F -> LATIN SMALL LETTER O\n 'p' # 0x70 -> LATIN SMALL LETTER P\n 'q' # 0x71 -> LATIN SMALL LETTER Q\n 'r' # 0x72 -> LATIN SMALL LETTER R\n 's' # 0x73 -> LATIN SMALL LETTER S\n 't' # 0x74 -> LATIN SMALL LETTER T\n 'u' # 0x75 -> LATIN SMALL LETTER U\n 'v' # 0x76 -> LATIN SMALL LETTER V\n 'w' # 0x77 -> LATIN SMALL LETTER W\n 'x' # 0x78 -> LATIN SMALL LETTER X\n 'y' # 0x79 -> LATIN SMALL LETTER Y\n 'z' # 0x7A -> LATIN SMALL LETTER Z\n '{' # 0x7B -> LEFT CURLY BRACKET\n '|' # 0x7C -> VERTICAL LINE\n '}' # 0x7D -> RIGHT CURLY BRACKET\n '~' # 0x7E -> TILDE\n '\\x7f' # 0x7F -> DELETE\n '\\x80' # 0x80 -> \n '\\x81' # 0x81 -> \n '\\x82' # 0x82 -> \n '\\x83' # 0x83 -> \n '\\x84' # 0x84 -> \n '\\x85' # 0x85 -> \n '\\x86' # 0x86 -> \n '\\x87' # 0x87 -> \n '\\x88' # 0x88 -> \n '\\x89' # 0x89 -> \n '\\x8a' # 0x8A -> \n '\\x8b' # 0x8B -> \n '\\x8c' # 0x8C -> \n '\\x8d' # 0x8D -> \n '\\x8e' # 0x8E -> \n '\\x8f' # 0x8F -> \n '\\x90' # 0x90 -> \n '\\x91' # 0x91 -> \n '\\x92' # 0x92 -> \n '\\x93' # 0x93 -> \n '\\x94' # 0x94 -> \n '\\x95' # 0x95 -> \n '\\x96' # 0x96 -> \n '\\x97' # 0x97 -> \n '\\x98' # 0x98 -> \n '\\x99' # 0x99 -> \n '\\x9a' # 0x9A -> \n '\\x9b' # 0x9B -> \n '\\x9c' # 0x9C -> \n '\\x9d' # 0x9D -> \n '\\x9e' # 0x9E -> \n '\\x9f' # 0x9F -> \n '\\xa0' # 0xA0 -> NO-BREAK SPACE\n '\\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK\n '\\u0138' # 0xA2 -> LATIN SMALL LETTER KRA\n '\\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA\n '\\xa4' # 0xA4 -> CURRENCY SIGN\n '\\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE\n '\\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA\n '\\xa7' # 0xA7 -> SECTION SIGN\n '\\xa8' # 0xA8 -> DIAERESIS\n '\\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON\n '\\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON\n '\\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA\n '\\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE\n '\\xad' # 0xAD -> SOFT HYPHEN\n '\\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON\n '\\xaf' # 0xAF -> MACRON\n '\\xb0' # 0xB0 -> DEGREE SIGN\n '\\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK\n '\\u02db' # 0xB2 -> OGONEK\n '\\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA\n '\\xb4' # 0xB4 -> ACUTE ACCENT\n '\\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE\n '\\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA\n '\\u02c7' # 0xB7 -> CARON\n '\\xb8' # 0xB8 -> CEDILLA\n '\\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON\n '\\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON\n '\\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA\n '\\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE\n '\\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG\n '\\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON\n '\\u014b' # 0xBF -> LATIN SMALL LETTER ENG\n '\\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON\n '\\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE\n '\\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX\n '\\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE\n '\\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS\n '\\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE\n '\\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE\n '\\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK\n '\\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON\n '\\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE\n '\\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK\n '\\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS\n '\\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE\n '\\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE\n '\\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX\n '\\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON\n '\\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE\n '\\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA\n '\\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON\n '\\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA\n '\\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX\n '\\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE\n '\\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS\n '\\xd7' # 0xD7 -> MULTIPLICATION SIGN\n '\\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE\n '\\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK\n '\\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE\n '\\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX\n '\\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS\n '\\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE\n '\\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON\n '\\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S\n '\\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON\n '\\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE\n '\\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX\n '\\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE\n '\\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS\n '\\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE\n '\\xe6' # 0xE6 -> LATIN SMALL LETTER AE\n '\\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK\n '\\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON\n '\\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH " }, { "id": 21137, "commit_id": "2bf70b74167868133809a926aa6393438fb06db4", "repo": "pipenv", "path": "pipenv/utils/spinner.py", "file_name": "spinner.py", "fun_name": "create_spinner", "commit_message": "Removed usage of fs_str from vistir (#5062)\n\n* Removed usage of fs_str from vistir\r\n\r\n This function was all about compatability of py2-py3.3 versions.\r\n Later versions don't need it.\r\n\r\n* Explicitly convert dict values to strings\r\n\r\n* Add news fragment", "code": "def create_spinner(text, setting, nospin=None, spinner_name=None):\n from pipenv.vendor.vistir import spin\n\n if not spinner_name:\n spinner_name = setting.PIPENV_SPINNER\n if nospin is None:\n nospin = setting.PIPENV_NOSPIN\n with spin.create_spinner(\n spinner_name=spinner_name,\n start_text=text,\n nospin=nospin,\n write_to_stdout=False,\n ) as sp:\n yield sp\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 96, "n_words": 33, "vocab_size": 30, "complexity": 3, "nloc": 13, "token_counts": 69, "n_ast_nodes": 105, "n_identifiers": 14, "random_cut": "def create_spinner(text, setting, nospin=None, spinner_name=None):\n from pipenv.vendor.vistir import spin\n\n if not spinner_name:\n spinner_name = setting.PIPENV_SPINNER\n if nospin is None:\n nospin = setting.PIPENV_NOSPIN\n with spin.create_spinner(\n spinner_name=spinner_name,\n start_text=text,\n " }, { "id": 245724, "commit_id": "d915740fa8228cf57741b27d9e5d66e358456b8e", "repo": "mmdetection", "path": "mmdet/models/task_modules/coders/tblr_bbox_coder.py", "file_name": "tblr_bbox_coder.py", "fun_name": "encode", "commit_message": "[Refactor] Refactor anchor head and base head with boxlist (#8625)\n\n* Refactor anchor head\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Add a series of boxes tools\r\n\r\n* Fix box type to support n x box_dim boxes\r\n\r\n* revert box type changes\r\n\r\n* Add docstring\r\n\r\n* refactor retina_head\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Fix comments\r\n\r\n* modify docstring of coder and ioucalculator\r\n\r\n* Replace with_boxlist with use_box_type", "code": "def encode(self, bboxes, gt_bboxes):\n \n bboxes = get_box_tensor(bboxes)\n gt_bboxes = get_box_tensor(gt_bboxes)\n assert bboxes.size(0) == gt_bboxes.size(0)\n assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n encoded_bboxes = bboxes2tblr(\n bboxes, gt_bboxes, normalizer=self.normalizer)\n return encoded_bboxes\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 88, "n_words": 28, "vocab_size": 21, "complexity": 1, "nloc": 8, "token_counts": 70, "n_ast_nodes": 110, "n_identifiers": 9, "random_cut": "def encode(self, bboxes, gt_bboxes):\n \n bboxes = " }, { "id": 177407, "commit_id": "3724ba4ebee5b1cec2e36faab30777bfdc16a6fd", "repo": "networkx", "path": "networkx/algorithms/tests/test_lowest_common_ancestors.py", "file_name": "test_lowest_common_ancestors.py", "fun_name": "test_tree_all_pairs_lca_default_root", "commit_message": "Renamed test functions in test_lowest_common_ancestors (#6110)\n\n* Renamed test functions in test_lowest_common_ancestors\r\n\r\n* Updated test method names.\r\n\r\n* Removed redundant docstrings\r\n\r\n* Minor touchups.\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def test_tree_all_pairs_lca_default_root(self):\n assert dict(tree_all_pairs_lca(self.DG)) == self.ans\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 31, "n_identifiers": 6, "random_cut": "def test_tree_all_pairs_lca_default_root(self):\n assert dict(tree_all_pairs_lca(sel" }, { "id": 300864, "commit_id": "8f4caf414124f380a8f5e1d54aedb54a8f6c5c05", "repo": "core", "path": "homeassistant/scripts/benchmark/__init__.py", "file_name": "__init__.py", "fun_name": "state_changed_helper", "commit_message": "Clean up accessing event helpers via hass (#72011)", "code": "async def state_changed_helper(hass):\n \n count = 0\n entity_id = \"light.kitchen\"\n event = asyncio.Event()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 12, "vocab_size": 10, "complexity": 3, "nloc": 18, "token_counts": 114, "n_ast_nodes": 38, "n_identifiers": 7, "random_cut": "async def state_changed_helper(hass):\n \n count = 0\n entity_id = \"light.kitchen\"\n event = asyncio.Event()\n" }, { "id": 107498, "commit_id": "f156db08eee54d285ab0fb4e031e48d078ba6aa3", "repo": "matplotlib", "path": "lib/matplotlib/backend_bases.py", "file_name": "backend_bases.py", "fun_name": "_update_view", "commit_message": "DOC: More cleanup axes -> Axes", "code": "def _update_view(self):\n \n nav_info = self._nav_stack()\n if nav_info is None:\n return\n # Retrieve all items at once to avoid any risk of GC deleting an Axes\n # while in the middle of the loop below.\n items = list(nav_info.items())\n for ax, (view, (pos_orig, pos_active)) in items:\n ax._set_view(view)\n # Restore both the original and modified positions\n ax._set_position(pos_orig, 'original')\n ax._set_position(pos_active, 'active')\n self.canvas.draw_idle()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 169, "n_words": 58, "vocab_size": 49, "complexity": 3, "nloc": 10, "token_counts": 73, "n_ast_nodes": 125, "n_identifiers": 14, "random_cut": "def _update_view(self):\n \n nav_info = self._nav_stack()\n if nav_info is None:\n return\n # Retrieve all items at once to avoid any risk of GC deleting an Axes\n # while in the middle of the loop below.\n items = list(nav_info.items())\n " }, { "id": 94074, "commit_id": "e4f3e0a2e26224c5b8883c03ac81f08e99f1bc5b", "repo": "sentry", "path": "src/sentry/api/serializers/models/sentry_function.py", "file_name": "sentry_function.py", "fun_name": "serialize", "commit_message": "Sentry Functions: Webhooks Migrations (#37313)\n\n* feat(integrations): new field for sentry_functions table\r\n\r\n* fix(integrations): add working integration, no default", "code": "def serialize(self, obj, attrs, user):\n events = [event for event in obj.events]\n data = {\n \"name\": obj.name,\n \"slug\": obj.slug,\n \"author\": obj.author,\n \"code\": obj.code,\n \"overview\": obj.overview,\n \"external_id\": obj.external_id,\n \"events\": events,\n }\n return data\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 136, "n_words": 32, "vocab_size": 30, "complexity": 2, "nloc": 12, "token_counts": 68, "n_ast_nodes": 108, "n_identifiers": 14, "random_cut": "def serialize(self, obj, attrs, user):\n events = [event for event in obj.events]\n data = {\n \"name\": obj.name,\n \"slug\": obj.slug,\n \"author\": obj.author,\n \"code\": obj.code,\n \"overview\": " }, { "id": 219026, "commit_id": "0820c040ec2815f40bd0e469e27c2bf4d2cc33bc", "repo": "XX-Net", "path": "code/default/lib/noarch/front_base/openssl_wrap.py", "file_name": "openssl_wrap.py", "fun_name": "notbefore", "commit_message": "v4.6.0 compactiable with python 2.7.", "code": "def notbefore(self):\n t = self.x509.get_notBefore()\n return datetime.datetime.strptime(t, \"%Y%m%d%H%M%SZ\")\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 42, "n_identifiers": 7, "random_cut": "def notbefore(self):\n " }, { "id": 128311, "commit_id": "c3ff77f5a13395631a2af580ea4429ceb5dfea13", "repo": "ray", "path": "python/ray/data/tests/test_dataset_numpy.py", "file_name": "test_dataset_numpy.py", "fun_name": "test_numpy_read_partitioning", "commit_message": "[Datasets] Add `partitioning` parameter to `read_` functions (#28413)", "code": "def test_numpy_read_partitioning(ray_start_regular_shared, tmp_path):\n path = os.path.join(tmp_path, \"country=us\", \"data.npy\")\n os.mkdir(os.path.dirname(path))\n np.save(path, np.arange(4).reshape([2, 2]))\n\n ds = ray.data.read_numpy(path, partitioning=Partitioning(\"hive\"))\n\n assert ds.schema().names == [\"data\", \"country\"]\n assert [r[\"country\"] for r in ds.take()] == [\"us\", \"us\"]\n\n\n@pytest.mark.parametrize(\"from_ref\", [False, True])", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"from_ref\", [False, True])", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 49, "n_words": 33, "vocab_size": 30, "complexity": 2, "nloc": 7, "token_counts": 108, "n_ast_nodes": 202, "n_identifiers": 25, "random_cut": "def test_numpy_read_partitioning(ray_start_regular_shared, tmp_path):\n path = os.path.join(tmp_path, \"country=us\", \"data.npy\")\n os.mkdir(os.path.dirname(path))\n np.save(path, np.arange(4).reshape([" }, { "id": 106062, "commit_id": "cd3169f3f35afcf73a36a8276113e1881d92e5e0", "repo": "datasets", "path": "src/datasets/fingerprint.py", "file_name": "fingerprint.py", "fun_name": "is_caching_enabled", "commit_message": "Clean up Dataset and DatasetDict (#5344)\n\n* clean up docstrings\r\n\r\n* make style\r\n\r\n* apply review\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def is_caching_enabled() -> bool:\n \n global _CACHING_ENABLED\n return bool(_CACHING_ENABLED)\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 17, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 19, "token_counts": 14, "n_ast_nodes": 26, "n_identifiers": 3, "random_cut": "def is_caching_enabled() -> bool:\n \n global _C" }, { "id": 289678, "commit_id": "073951177b31f90be7232b03df0fd4db77cb3089", "repo": "core", "path": "tests/components/subaru/test_init.py", "file_name": "test_init.py", "fun_name": "test_invalid_credentials", "commit_message": "Code quality update for Subaru sensors (#79482)\n\n* Use distance device class for sensors\r\n\r\n* Change sensor name casing and unique_id\r\n\r\n* Migrate sensor entity unique_id\r\n\r\n* Match title-cased unique_id when migrating\r\n\r\n* Remove unneeded regex to find '_' delimited id suffix\r\n\r\n* Incorporate PR review comments\r\n\r\n* Add check to prevent extra odometer entity migration", "code": "async def test_invalid_credentials(hass, subaru_config_entry):\n \n await setup_subaru_config_entry(\n hass,\n subaru_config_entry,\n connect_effect=InvalidCredentials(\"Invalid Credentials\"),\n vehicle_list=[TEST_VIN_2_EV],\n vehicle_data=VEHICLE_DATA[TEST_VIN_2_EV],\n vehicle_status=VEHICLE_STATUS_EV,\n )\n check_entry = hass.config_entries.async_get_entry(subaru_config_entry.entry_id)\n assert check_entry\n assert check_entry.state is ConfigEntryState.SETUP_ERROR\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 83, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 62, "n_ast_nodes": 96, "n_identifiers": 19, "random_cut": "async def test_invalid_credentials(hass, subaru_config_entry):\n \n await setup_subaru_config_entry(\n hass,\n subaru_config_entry,\n connect_effect=InvalidCredentials(\"Invalid Credential" }, { "id": 109157, "commit_id": "e94dfed864a8bbeb215bab5705a490325ac07819", "repo": "matplotlib", "path": "lib/mpl_toolkits/axes_grid1/axes_divider.py", "file_name": "axes_divider.py", "fun_name": "set_anchor", "commit_message": "Improve argument checking", "code": "def set_anchor(self, anchor):\n \n if isinstance(anchor, str):\n _api.check_in_list(mtransforms.Bbox.coefs, anchor=anchor)\n elif not isinstance(anchor, (tuple, list)) or len(anchor) != 2:\n raise TypeError(\"anchor must be str or 2-tuple\")\n self._anchor = anchor\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 27, "vocab_size": 25, "complexity": 4, "nloc": 6, "token_counts": 60, "n_ast_nodes": 97, "n_identifiers": 15, "random_cut": "def set_anchor(self, anchor):\n \n if isinstance(anchor, str):\n _api.check_in_list(mtransforms.Bbox.coefs, anchor=anchor)\n elif not isinstance(anchor, (tupl" }, { "id": 307469, "commit_id": "93b7f604d5e94c38964dca47daa2f84c9bc253f0", "repo": "core", "path": "homeassistant/components/landisgyr_heat_meter/config_flow.py", "file_name": "config_flow.py", "fun_name": "validate_and_create_entry", "commit_message": "Landis+Gyr integration: increase timeout and add debug logging (#78025)", "code": "async def validate_and_create_entry(self, dev_path):\n \n model, device_number = await self.validate_ultraheat(dev_path)\n\n _LOGGER.debug(\"Got model %s and device_number %s\", model, device_number)\n await self.async_set_unique_id(device_number)\n self._abort_if_unique_id_configured()\n data = {\n CONF_DEVICE: dev_path,\n \"model\": model,\n \"device_number\": device_number,\n }\n return self.async_create_entry(\n title=model,\n data=data,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 153, "n_words": 35, "vocab_size": 30, "complexity": 1, "nloc": 14, "token_counts": 71, "n_ast_nodes": 117, "n_identifiers": 14, "random_cut": "async def validate_and_create_entry(self, dev_path):\n \n model, device_number = await self.validate_ultraheat(dev_path)\n\n _LOGGER.debug(\"Got model %s and device_number %s\", model, device_number)\n await self.async_set_unique_id(device_number)\n self._abort_if_unique_id_configured()\n " }, { "id": 224646, "commit_id": "3035ad18f1706c262bf0efbc2c7fa9832f523584", "repo": "mkdocs", "path": "mkdocs/contrib/search/search_index.py", "file_name": "search_index.py", "fun_name": "handle_starttag", "commit_message": "Cleanup: replace unnecessary list comprehensions (#2949)", "code": "def handle_starttag(self, tag, attrs):\n \n\n # We only care about the opening tag for headings.\n if tag not in _HEADER_TAGS:\n return\n\n # We are dealing with a new header, create a new section\n # for it and assign the ID if it has one.\n self.is_header_tag = True\n self.section = ContentSection()\n self.data.append(self.section)\n\n for attr in attrs:\n if attr[0] == \"id\":\n self.section.id = attr[1]\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 161, "n_words": 61, "vocab_size": 46, "complexity": 4, "nloc": 9, "token_counts": 62, "n_ast_nodes": 103, "n_identifiers": 12, "random_cut": "def handle_starttag(self, tag, attrs):\n \n\n # We only care about the opening tag for headings.\n if tag not in _HEADER_TAGS:\n return\n\n # We are dealing with a new header, create a new section" }, { "id": 29013, "commit_id": "b8598fa2cf84f8bb473f2066f075ad7a374c3c80", "repo": "saleor", "path": "saleor/payment/tests/test_payment.py", "file_name": "test_payment.py", "fun_name": "test_payment_owned_by_user_anonymous_user", "commit_message": "Drop `AnonymouUser` from the context, and assign None instead (#10575)\n\n* Fix error when app deleted product added to draft order; Fixes #10574\r\n\r\n* Get rid of AnonymousUser from context\r\n\r\n* Ger rid of AnonymousUser\r\n\r\n* Drop anonymous_user fixture\r\n\r\n* Clean events\r\n\r\n* Fix test_checkout_complete.py file\r\n\r\n* Drop changelog entry\r\n\r\n* Update resolver for me query\r\n\r\n* Apply code review remarks\r\n\r\n* Apply changes after rebasing with main branch\r\n\r\n* Fix review remarks\r\n\r\n* Update create order from checkout tests\r\n\r\n* Drop remaining uses of is_anonymous\r\n\r\nCo-authored-by: IKarbowiak ", "code": "def test_payment_owned_by_user_anonymous_user(payment):\n # given\n user = None\n\n # when\n is_owned = payment_owned_by_user(payment.pk, user)\n\n # then\n assert not is_owned\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 18, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 6, "random_cut": "def test_payment_owned_by_user_anonymous_user(payment):\n # given\n user = None\n\n # when\n is_owned = payment_owned_by_user(payment.p" }, { "id": 87518, "commit_id": "1dab08bfd4006ccdccfeda9623cc5d60c6adb63c", "repo": "sentry", "path": "tests/sentry/snuba/test_profiles.py", "file_name": "test_profiles.py", "fun_name": "test_aggregate_resolution", "commit_message": "feat(profiling): Introduce profile timeseries query builder (#40745)\n\nAnalogous to #40557, this introduces a new query builder to be able to\r\nreturn timeseries data.", "code": "def test_aggregate_resolution(query_builder_fn, params, field, resolved):\n builder = query_builder_fn(\n dataset=Dataset.Profiles,\n params=params,\n selected_columns=[field],\n )\n assert builder.columns == [resolved]\n\n\n@pytest.mark.parametrize(\n \"field,message\",\n [\n pytest.param(\"foo\", \"Unknown field: foo\", id=\"foo\"),\n pytest.param(\"count(id)\", \"count: expected 0 argument\\\\(s\\\\)\", id=\"count(id)\"),\n pytest.param(\n \"count_unique(foo)\",\n \"count_unique: column argument invalid: foo is not a valid column\",\n id=\"count_unique(foo)\",\n ),\n *[\n pytest.param(\n f\"p{qt}(foo)\",\n f\"p{qt}: column argument invalid: foo is not a valid column\",\n id=f\"p{qt}(foo)\",\n )\n for qt in [\"50\", \"75\", \"95\", \"99\"]\n ],\n *[\n pytest.param(\n f\"p{qt}(id)\",\n f\"p{qt}: column argument invalid: id is not a numeric column\",\n id=f\"p{qt}(id)\",\n )\n for qt in [\"50\", \"75\", \"95\", \"99\"]\n ],\n pytest.param(\n \"percentile(foo,0.25)\",\n \"percentile: column argument invalid: foo is not a valid column\",\n id=\"percentile(foo,0.25)\",\n ),\n pytest.param(\n \"percentile(id,0.25)\",\n \"percentile: column argument invalid: id is not a numeric column\",\n id=\"percentile(id,0.25)\",\n ),\n *[\n pytest.param(\n f\"{fn}(foo)\",\n f\"{fn}: column argument invalid: foo is not a valid column\",\n id=f\"{fn}(foo)\",\n )\n for fn in [\"min\", \"max\", \"avg\", \"sum\"]\n ],\n *[\n pytest.param(\n f\"{fn}(id)\",\n f\"{fn}: column argument invalid: id is not a numeric column\",\n id=f\"{fn}(id)\",\n )\n for fn in [\"min\", \"max\", \"avg\", \"sum\"]\n ],\n ],\n)\n@query_builder_fns()\n@pytest.mark.django_db", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"field,message\",\n [\n pytest.param(\"foo\", \"Unknown field: foo\", id=\"foo\"),\n pytest.param(\"count(id)\", \"count: expected 0 argument\\\\(s\\\\)\", id=\"count(id)\"),\n pytest.param(\n \"count_unique(foo)\",\n \"count_unique: column argument invalid: foo is not a valid column\",\n id=\"count_unique(foo)\",\n ),\n *[\n pytest.param(\n f\"p{qt}(foo)\",\n f\"p{qt}: column argument invalid: foo is not a valid column\",\n id=f\"p{qt}(foo)\",\n )\n for qt in [\"50\", \"75\", \"95\", \"99\"]\n ],\n *[\n pytest.param(\n f\"p{qt}(id)\",\n f\"p{qt}: column argument invalid: id is not a numeric column\",\n id=f\"p{qt}(id)\",\n )\n for qt in [\"50\", \"75\", \"95\", \"99\"]\n ],\n pytest.param(\n \"percentile(foo,0.25)\",\n \"percentile: column argument invalid: foo is not a valid column\",\n id=\"percentile(foo,0.25)\",\n ),\n pytest.param(\n \"percentile(id,0.25)\",\n \"percentile: column argument invalid: id is not a numeric column\",\n id=\"percentile(id,0.25)\",\n ),\n *[\n pytest.param(\n f\"{fn}(foo)\",\n f\"{fn}: column argument invalid: foo is not a valid column\",\n id=f\"{fn}(foo)\",\n )\n for fn in [\"min\", \"max\", \"avg\", \"sum\"]\n ],\n *[\n pytest.param(\n f\"{fn}(id)\",\n f\"{fn}: column argument invalid: id is not a numeric column\",\n id=f\"{fn}(id)\",\n )\n for fn in [\"min\", \"max\", \"avg\", \"sum\"]\n ],\n ],\n)\n@query_builder_fns()\n@pytest.mark.django_db", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 725, "n_words": 168, "vocab_size": 77, "complexity": 1, "nloc": 7, "token_counts": 40, "n_ast_nodes": 467, "n_identifiers": 20, "random_cut": "def test_aggregate_resolution(query_builder_fn, params, field, resolved):\n builder = query_builder_fn(\n dataset=Dataset.Profiles,\n params=params,\n selected_columns=[field],\n )\n assert builder.columns == [resolved]\n\n\n@pytest.mark.parametrize(\n \"field,message\",\n [\n pytest.param(\"foo\", \"Unknown field: foo\", id=\"foo\"),\n pytest.param(\"count(id)\", \"count: expected 0 argument\\\\(s\\\\)\", id=\"count(id)\"),\n pytest.param(\n \"count_unique(foo)\",\n \"count_unique: column argument invalid: foo is not a valid column\",\n id=\"count_unique(foo)\",\n ),\n *[\n pytest.param(\n f\"p{qt}(foo)\",\n f\"p{qt}: column argument invalid: foo is not a valid column\",\n id=f\"p{qt}(foo)\",\n )\n for qt in [\"50\", \"75\", \"95\", \"99\"]\n ],\n *[\n pytest.param(\n f\"p{qt}(id)\",\n f\"p{qt}: column argument invalid: id is not a numeric column\",\n " }, { "id": 194652, "commit_id": "c5ff6db790f738a0e2d5f1dc91c5d883791357d3", "repo": "kivy", "path": "kivy/tests/test_logger.py", "file_name": "test_logger.py", "fun_name": "test_colonsplittinglogrecord_without_colon", "commit_message": "Refactored logging.ColoredFormatter to avoid deepcopy. (#7962)\n\n* Refactor ColoredFormatter\r\n\r\nRemoved old formatter and support code.\r\nAdded 3 LogRecord shims, new formatter, new unit tests for above and a unit test that used to fail to confirm bugs have been fixed.\r\n\r\n* PEP8 fixes\r\n\r\nMatch project-style.\r\n\r\n* PEP8 fixes\r\n\r\n(I can't run `black` on these files without\r\nmaking the review too hard.)\r\n\r\n* PEP8 Fixes\r\n\r\nNote to self: Do a `black` refactor of key files so I don't get stuck in this loop again.", "code": "def test_colonsplittinglogrecord_without_colon():\n from kivy.logger import ColonSplittingLogRecord\n\n originallogrecord = logging.LogRecord(\n name=\"kivy.test\",\n level=logging.DEBUG,\n pathname=\"test.py\",\n lineno=1,\n msg=\"Part1 Part2 Part 3\",\n args=(\"args\",),\n exc_info=None,\n func=\"test_colon_splitting\",\n sinfo=None,\n )\n shimmedlogrecord = ColonSplittingLogRecord(originallogrecord)\n # No colons means no change.\n assert str(originallogrecord) == str(shimmedlogrecord)\n\n", "url": "https://github.com/kivy/kivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 115, "n_words": 35, "vocab_size": 34, "complexity": 1, "nloc": 15, "token_counts": 74, "n_ast_nodes": 118, "n_identifiers": 19, "random_cut": "def test_colonsplittinglogrecord_without_colon():\n from kivy.logger import ColonSplittingLogRecord\n\n originallogrecord = logging.LogRecord(\n name=\"kivy.test\",\n level=logging.DEBUG,\n pathname=\"test.py\",\n lineno=1,\n msg=\"Part1 Part2 Part 3\",\n args=(\"args\",),\n exc_info=None,\n func=\"test_colon_splitting\",\n sinfo=None,\n )\n shimmedlogrecord = ColonSplittingLogRecord(originallogrecord)\n # No colons means no change.\n assert str(originallogrecord) == str(shimmedlogrecord)\n\n" }, { "id": 74911, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/views/multiple.py", "file_name": "multiple.py", "fun_name": "get_context_data", "commit_message": "Reformat with black", "code": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update(\n {\n \"max_title_length\": self.form.fields[\"title\"].max_length,\n }\n )\n\n return context\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 78, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 8, "token_counts": 41, "n_ast_nodes": 69, "n_identifiers": 9, "random_cut": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update(\n {\n \"max_title_length\": self.form.fields[\"ti" }, { "id": 45514, "commit_id": "33edeb2cb1c83c61f2ce5981066228d10a77df5b", "repo": "airflow", "path": "tests/providers/amazon/aws/hooks/test_lambda_function.py", "file_name": "test_lambda_function.py", "fun_name": "test_get_conn_returns_a_boto3_connection", "commit_message": "Feature: Add invoke lambda function operator (#21686)", "code": "def test_get_conn_returns_a_boto3_connection(self):\n hook = LambdaHook(aws_conn_id='aws_default')\n assert hook.conn is not None\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 6, "random_cut": "def test_get_conn_returns_a_boto3_connection(self):\n hook = LambdaHook(aws_conn_id='" }, { "id": 27561, "commit_id": "7ea7916c65357741c3911e307acb58d547a5e91a", "repo": "saleor", "path": "saleor/webhook/observability/tests/test_buffer.py", "file_name": "test_buffer.py", "fun_name": "test_put_events_max_size", "commit_message": "Observability reporter (#9803)\n\n* Initial commit\r\n\r\n* Add observability celery beat task\r\n\r\n* Add observability_reporter_task and observability_send_events\r\n\r\n* Convert payload to camel case\r\n\r\n* Add fakeredis to dev dependencies\r\n\r\n* Add redis buffer tests\r\n\r\n* Refactor buffer\r\n\r\n* Update\r\n\r\n* Optimize buffer\r\n\r\n* Add tests\r\n\r\n* Add types-redis to dev dependencies\r\n\r\n* Refactor\r\n\r\n* Fix after rebase\r\n\r\n* Refactor opentracing\r\n\r\n* Add opentracing to observability tasks\r\n\r\n* Add more tests\r\n\r\n* Fix buffer fixtures\r\n\r\n* Report dropped events\r\n\r\n* Fix buffer tests\r\n\r\n* Refactor get_buffer\r\n\r\n* Refactor unit tests\r\n\r\n* Set Redis connection client_name\r\n\r\n* Refactor redis tests\r\n\r\n* Fix test_get_or_create_connection_pool\r\n\r\n* Fix JsonTruncText comparison\r\n\r\n* Add more generate_event_delivery_attempt_payload tests", "code": "def test_put_events_max_size(buffer):\n events = [{\"event\": \"data\"}] * MAX_SIZE * 2\n dropped = buffer.put_events(events)\n assert buffer.size() == MAX_SIZE\n assert dropped == MAX_SIZE\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 32, "n_words": 21, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 38, "n_ast_nodes": 65, "n_identifiers": 7, "random_cut": "def test_put_events_max_size(buffer):\n events = [{\"event\": \"data\"}] * MAX_SIZE * 2\n dropped = buffer.put_events(even" }, { "id": 179834, "commit_id": "6b259bde9572930d4c699fe5b75fc3b6b7c62234", "repo": "gradio", "path": "test/test_outputs.py", "file_name": "test_outputs.py", "fun_name": "test_as_component", "commit_message": "removed outdated outputs tests", "code": "def test_as_component(self):\n ht_output = gr.outputs.HighlightedText(color_map={\"pos\": \"green\", \"neg\": \"red\"})\n self.assertEqual(\n ht_output.get_template_context(),\n {\n \"color_map\": {\"pos\": \"green\", \"neg\": \"red\"},\n \"name\": \"highlightedtext\",\n \"label\": None,\n \"show_legend\": False,\n \"css\": {}\n },\n )\n ht = {\"pos\": \"Hello \", \"neg\": \"World\"}\n with tempfile.TemporaryDirectory() as tmpdirname:\n to_save = ht_output.save_flagged(tmpdirname, \"ht_output\", ht, None)\n self.assertEqual(to_save, '{\"pos\": \"Hello \", \"neg\": \"World\"}')\n self.assertEqual(\n ht_output.restore_flagged(tmpdirname, to_save, None),\n {\"pos\": \"Hello \", \"neg\": \"World\"},\n )\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 275, "n_words": 59, "vocab_size": 44, "complexity": 1, "nloc": 20, "token_counts": 135, "n_ast_nodes": 247, "n_identifiers": 16, "random_cut": "def test_as_component(self):\n ht_output = gr.outputs.HighlightedText(color_map={\"pos\": \"green\", \"neg\": \"red\"})\n self.assertEqual(\n ht_output.get_template_context(),\n {\n \"color_map\": {\"pos\": \"green\", \"neg\": \"red\"},\n \"name\": \"highlightedtext\",\n \"label\": None,\n \"show_legend\": False,\n \"css\": {}\n },\n )\n ht = {\"pos\": \"Hello \", \"neg\": \"World\"}\n with tempfile.TemporaryDirectory() as tmpdirname:\n to_save = ht_output.save_flagged(tmpdirname, \"ht_output\", ht, None)\n self.assertEqual(to_save, '{\"pos\": \"Hello \", \"neg\": \"World\"}')\n self.assertEqual(\n ht_output.restore_flagged(tmpdirname, to_save, None),\n {\"pos\": \"Hello \", \"neg\": \"World\"},\n )" }, { "id": 13712, "commit_id": "8794fcd378b1f6fadc3f84a6492441ca0168483c", "repo": "jina", "path": "jina/serve/networking.py", "file_name": "networking.py", "fun_name": "host_is_local", "commit_message": "refactor: add more debug info to prints (#5475)\n\nSigned-off-by: Johannes Messner ", "code": "def host_is_local(hostname):\n \n import socket\n\n fqn = socket.getfqdn(hostname)\n if fqn in ('localhost', '0.0.0.0') or hostname == '0.0.0.0':\n return True\n\n try:\n return ipaddress.ip_address(hostname).is_loopback\n except ValueError:\n return False\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 64, "n_words": 25, "vocab_size": 22, "complexity": 4, "nloc": 9, "token_counts": 47, "n_ast_nodes": 84, "n_identifiers": 9, "random_cut": "def host_is_local(hostname):\n \n " }, { "id": 104061, "commit_id": "5c9ad28ed03716e02eb1b95bd6094914cdd27df8", "repo": "datasets", "path": "datasets/red_caps/red_caps.py", "file_name": "red_caps.py", "fun_name": "_config_name_to_description", "commit_message": "Add RedCaps dataset (#3424)\n\n* Add RedCaps script\r\n\r\n* Improve script\r\n\r\n* Add underscore to global variables\r\n\r\n* Add README file\r\n\r\n* Add info for main config\r\n\r\n* Small improvements\r\n\r\n* Add dummy data\r\n\r\n* Minor fix in README\r\n\r\n* Specify timezone in features dict\r\n\r\n* Specify dataset name in README :)\r\n\r\n* Add instructions on how to download data\r\n\r\n* Specify user-agent", "code": "def _config_name_to_description(config_name):\n if config_name == \"all\":\n return \"Contains data from all the subreddits\"\n else:\n if re.match(r\".*_\\d{4}$\", config_name):\n subreddit, year = config_name.split(\"_\")\n year_str = \"2008 - 2017\" if year == \"2017\" else year\n else:\n subreddit = config_name\n year_str = \", \".join(\n [\"2008 - 2017\" if year == \"2017\" else year for year in _SUBREDDIT_TO_YEAR[config_name]]\n )\n return f\"Contains data from the {subreddit} subreddit posted in {year_str}\"\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 167, "n_words": 64, "vocab_size": 38, "complexity": 6, "nloc": 13, "token_counts": 75, "n_ast_nodes": 141, "n_identifiers": 10, "random_cut": "def _config_name_to_description(config_name):\n if config_name == \"all\":\n return \"Contains data from all the subreddits\"\n else:\n if re.match(r\".*_\\d{4}$\", confi" }, { "id": 220325, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/base_events.py", "file_name": "base_events.py", "fun_name": "_asyncgen_finalizer_hook", "commit_message": "add python 3.10.4 for windows", "code": "def _asyncgen_finalizer_hook(self, agen):\n self._asyncgens.discard(agen)\n if not self.is_closed():\n self.call_soon_threadsafe(self.create_task, agen.aclose())\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 33, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 4, "token_counts": 37, "n_ast_nodes": 60, "n_identifiers": 9, "random_cut": "def _asyncgen_finalizer_hook(self, agen):\n self._asyncgens." }, { "id": 263522, "commit_id": "64ccb7aea824fbec57f7ed1bbe483ec486183c13", "repo": "pyinstaller", "path": "bootloader/waflib/Tools/irixcc.py", "file_name": "irixcc.py", "fun_name": "configure", "commit_message": "Bootloader: Building: Unpack waf's lib archive.\n\nDoing so makes it easier to modify. This is a temporary measure until the next\nwaf version is released (although I'm tempted to keep it since it's much more\nIDE completion friendly).", "code": "def configure(conf):\n conf.find_irixcc()\n conf.find_ar()\n conf.irixcc_common_flags()\n conf.cc_load_tools()\n conf.cc_add_flags()\n conf.link_add_flags()\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 25, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 7, "token_counts": 35, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "def configure(conf):\n conf.find_irixcc()\n conf.find_ar()\n conf.irixcc_common_flags()\n conf.cc_load_tools()\n conf.cc_add_flags()\n conf.link_add_flags()\n" }, { "id": 298274, "commit_id": "8cbbdf21f3d2ecaedb95d44b667a60302c137fbf", "repo": "core", "path": "homeassistant/components/todoist/calendar.py", "file_name": "calendar.py", "fun_name": "calendar_event", "commit_message": "Update todoist integration to use new official rest api library (#79481)\n\n* Swapping out libraries.\r\n\r\n* Adding types\r\n\r\n* Add ability to add task.\r\n\r\n* Removed remaining todos.\r\n\r\n* Fix lint errors.\r\n\r\n* Fixing tests.\r\n\r\n* Update to v2 of the rest api.\r\n\r\n* Swapping out libraries.\r\n\r\n* Adding types\r\n\r\n* Add ability to add task.\r\n\r\n* Removed remaining todos.\r\n\r\n* Fix lint errors.\r\n\r\n* Fix mypy errors\r\n\r\n* Fix custom projects.\r\n\r\n* Bump DEPENDENCY_CONFLICTS const\r\n\r\n* Remove conflict bump\r\n\r\n* Addressing PR feedback.\r\n\r\n* Removing utc offset logic and configuration.\r\n\r\n* Addressing PR feedback.\r\n\r\n* Revert date range logic check", "code": "def calendar_event(self) -> CalendarEvent | None:\n \n if not self.event:\n return None\n\n start = self.event[START]\n if self.event.get(ALL_DAY) or self.event[END] is None:\n return CalendarEvent(\n summary=self.event[SUMMARY],\n start=start.date(),\n end=start.date() + timedelta(days=1),\n )\n\n return CalendarEvent(\n summary=self.event[SUMMARY], start=start, end=self.event[END]\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 165, "n_words": 34, "vocab_size": 27, "complexity": 4, "nloc": 14, "token_counts": 106, "n_ast_nodes": 162, "n_identifiers": 15, "random_cut": "def calendar_event(self) -> CalendarEvent | None:\n \n if not self.event:\n return None\n\n start = self.event[START]\n if self.event.get(ALL_DAY) or self.event[END] is None:\n return CalendarEvent(\n summary=self.event[SUMMARY],\n " }, { "id": 72885, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/api/v2/tests/test_pages.py", "file_name": "test_pages.py", "fun_name": "test_empty_searches_work", "commit_message": "Reformat with black", "code": "def test_empty_searches_work(self):\n response = self.get_response(search=\"\")\n content = json.loads(response.content.decode(\"UTF-8\"))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[\"Content-type\"], \"application/json\")\n self.assertEqual(content[\"meta\"][\"total_count\"], 0)\n\n # REGRESSION TESTS\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 65, "n_ast_nodes": 113, "n_identifiers": 11, "random_cut": "def test_empty_searches_work(self):\n response = self.get_response(search=\"\")\n content = json.loads(response.content.decode(\"UTF-8\"))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response[\"Content-type\"], \"application/json\")\n self.assertEqual(content[\"meta\"][\"total_count\"], 0)" }, { "id": 313553, "commit_id": "b1f2e5f897540967ebef2ccf98026d70009b5c4f", "repo": "core", "path": "tests/components/generic/test_camera.py", "file_name": "test_camera.py", "fun_name": "test_stream_source_error", "commit_message": "Use create_stream in generic camera config flow (#73237)\n\n* Use create_stream in generic camera config flow", "code": "async def test_stream_source_error(hass, hass_client, hass_ws_client, fakeimgbytes_png):\n \n respx.get(\"http://example.com\").respond(stream=fakeimgbytes_png)\n\n assert await async_setup_component(\n hass,\n \"camera\",\n {\n \"camera\": {\n \"name\": \"config_test\",\n \"platform\": \"generic\",\n \"still_image_url\": \"http://example.com\",\n # Does not exist\n \"stream_source\": 'http://example.com/{{ states.sensor.temp.state + \"a\" }}',\n \"limit_refetch_to_url_change\": True,\n },\n },\n )\n assert await async_setup_component(hass, \"stream\", {})\n await hass.async_block_till_done()\n\n with patch(\n \"homeassistant.components.camera.Stream.endpoint_url\",\n return_value=\"http://home.assistant/playlist.m3u8\",\n ) as mock_stream_url:\n # Request playlist through WebSocket\n client = await hass_ws_client(hass)\n\n await client.send_json(\n {\"id\": 1, \"type\": \"camera/stream\", \"entity_id\": \"camera.config_test\"}\n )\n msg = await client.receive_json()\n\n # Assert WebSocket response\n assert mock_stream_url.call_count == 0\n assert msg[\"id\"] == 1\n assert msg[\"type\"] == TYPE_RESULT\n assert msg[\"success\"] is False\n assert msg[\"error\"] == {\n \"code\": \"start_stream_failed\",\n \"message\": \"camera.config_test does not support play stream service\",\n }\n\n\n@respx.mock", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@respx.mock", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 402, "n_words": 108, "vocab_size": 84, "complexity": 1, "nloc": 34, "token_counts": 169, "n_ast_nodes": 320, "n_identifiers": 21, "random_cut": "async def test_stream_source_error(hass, hass_client, hass_ws_client, fakeimgbytes_png):\n \n respx.get(\"http://example.com\").respond(stream=fakeimgbytes_png)\n\n assert await async_setup_component(\n hass,\n \"camera\",\n {\n \"camera\": {\n \"name\": \"config_test\",\n \"platform\": \"generic\",\n \"still_ima" }, { "id": 301798, "commit_id": "db9c586404d8fb0e520e731ccb0229d08ffd7161", "repo": "core", "path": "homeassistant/components/frontier_silicon/media_player.py", "file_name": "media_player.py", "fun_name": "async_update", "commit_message": "Address late comments for frontier silicon (#72745)\n\nCo-authored-by: Martin Hjelmare ", "code": "async def async_update(self):\n \n afsapi = self.fs_device\n try:\n if await afsapi.get_power():\n status = await afsapi.get_play_status()\n self._state = {\n PlayState.PLAYING: STATE_PLAYING,\n PlayState.PAUSED: STATE_PAUSED,\n PlayState.STOPPED: STATE_IDLE,\n PlayState.LOADING: STATE_OPENING,\n None: STATE_IDLE,\n }.get(status)\n else:\n self._state = STATE_OFF\n except FSConnectionError:\n if self._attr_available:\n _LOGGER.warning(\n \"Could not connect to %s. Did it go offline?\",\n self._name or afsapi.webfsapi_endpoint,\n )\n self._attr_available = False\n return\n\n if not self._attr_available:\n _LOGGER.info(\n \"Reconnected to %s\",\n self._name or afsapi.webfsapi_endpoint,\n )\n\n self._attr_available = True\n if not self._name:\n self._name = await afsapi.get_friendly_name()\n\n if not self._source_list:\n self.__modes_by_label = {\n mode.label: mode.key for mode in await afsapi.get_modes()\n }\n self._source_list = list(self.__modes_by_label)\n\n # The API seems to include 'zero' in the number of steps (e.g. if the range is\n # 0-40 then get_volume_steps returns 41) subtract one to get the max volume.\n # If call to get_volume fails set to 0 and try again next time.\n if not self._max_volume:\n self._max_volume = int(await afsapi.get_volume_steps() or 1) - 1\n\n if self._state != STATE_OFF:\n info_name = await afsapi.get_play_name()\n info_text = await afsapi.get_play_text()\n\n self._title = \" - \".join(filter(None, [info_name, info_text]))\n self._artist = await afsapi.get_play_artist()\n self._album_name = await afsapi.get_play_album()\n\n self._source = (await afsapi.get_mode()).label\n self._mute = await afsapi.get_mute()\n self._media_image_url = await afsapi.get_play_graphic()\n\n volume = await self.fs_device.get_volume()\n\n # Prevent division by zero if max_volume not known yet\n self._volume_level = float(volume or 0) / (self._max_volume or 1)\n else:\n self._title = None\n self._artist = None\n self._album_name = None\n\n self._source = None\n self._mute = None\n self._media_image_url = None\n\n self._volume_level = None\n\n # Management actions\n # power control", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 951, "n_words": 237, "vocab_size": 145, "complexity": 15, "nloc": 56, "token_counts": 368, "n_ast_nodes": 606, "n_identifiers": 58, "random_cut": "async def async_update(self):\n \n afsapi = self.fs_device\n try:\n if await afsapi.get_power():\n status = await afsapi.get_play_status()\n self._state = {\n PlayState.PLAYING: STATE_PLAYING,\n PlayState.PAUSED: STATE_PAUSED,\n PlayState.STOPPED: STATE_IDLE,\n PlayState.LOADING: STATE_OPENING,\n None: STATE_IDLE,\n }.get(status)\n else:\n self._state = STATE_OFF\n except FSConnectionError:\n if self._attr_available:\n _LOGGER.warning(\n \"Could not connect to %s. Did it go offline?\",\n self._name or afsapi.webfsapi_endpoint,\n )\n self._attr_available = False\n return\n\n if not self._attr_available:\n _LOGGER.info(\n \"Reconnected to %s\",\n self._name or afsapi.webfsapi_" }, { "id": 156122, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/diagnostics/profile.py", "file_name": "profile.py", "fun_name": "_plot", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def _plot(self, **kwargs):\n from dask.diagnostics.profile_visualize import plot_tasks\n\n return plot_tasks(self.results, self._dsk, **kwargs)\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 44, "n_identifiers": 9, "random_cut": "def _plot(self, **kwargs):\n from dask.diagnostics.profile_visualize import plot_tasks\n\n return plot_t" }, { "id": 279805, "commit_id": "c492e45a017ecff5196a45d962d1618cac89467a", "repo": "keras", "path": "keras/callbacks_test.py", "file_name": "callbacks_test.py", "fun_name": "test_EarlyStopping_with_start_from_epoch", "commit_message": "add unit test for start_from_epoch to EarlyStop", "code": "def test_EarlyStopping_with_start_from_epoch(self):\n with self.cached_session():\n np.random.seed(1337)\n\n (data, labels), _ = test_utils.get_test_data(\n train_samples=100,\n test_samples=50,\n input_shape=(1,),\n num_classes=NUM_CLASSES,\n )\n model = test_utils.get_small_sequential_mlp(\n num_hidden=1, num_classes=1, input_dim=1\n )\n model.compile(\n optimizer=\"sgd\", loss=\"binary_crossentropy\", metrics=[\"acc\"]\n )\n start_from_epoch = 2\n patience = 3\n stopper = keras.callbacks.EarlyStopping(\n monitor=\"acc\",\n patience=patience,\n start_from_epoch=start_from_epoch,\n )\n hist = model.fit(\n data, labels, callbacks=[stopper], verbose=0, epochs=20\n )\n assert len(hist.epoch) >= patience + start_from_epoch\n\n start_from_epoch = 2\n patience = 0\n stopper = keras.callbacks.EarlyStopping(\n monitor=\"acc\",\n patience=patience,\n start_from_epoch=start_from_epoch,\n )\n hist = model.fit(\n data, labels, callbacks=[stopper], verbose=0, epochs=20\n )\n assert len(hist.epoch) >= start_from_epoch\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 528, "n_words": 81, "vocab_size": 45, "complexity": 1, "nloc": 37, "token_counts": 210, "n_ast_nodes": 319, "n_identifiers": 37, "random_cut": "def test_EarlyStopping_with_start_from_epoch(self):\n with self.cached_session():\n np.random.seed(1337)\n\n (data, labels), _ = test_utils.get_test_data(\n train_samples=100,\n test_samples=50,\n input_shape=(1,),\n num_classes=NUM_CLASSES,\n )\n model = test_utils.get_small_sequential_mlp(\n num_hidden=1, num_classes=1, input_dim=1\n )\n model.compile(\n optimizer=\"sgd\", loss=\"binary_crossentropy\", metrics=[\"acc\"]\n )\n start_from_epoch = 2\n patience = 3\n stopper = keras.callbacks.EarlyStopping(\n monitor=\"acc\",\n patience=patience,\n start_from_epoch=start_from_epoch,\n )\n hist = model.fit(\n data, labels, callbacks=[stopper], verbose=0, epochs=20\n )\n assert len(hist.epoch) >= patience + start_from_epoch\n\n start_from_epoch = 2\n patience = 0\n stopper = ke" }, { "id": 55969, "commit_id": "a05e44c89acf0b6073ac876479be24a5e51d7754", "repo": "prefect", "path": "src/prefect/orion/models/block_schemas.py", "file_name": "block_schemas.py", "fun_name": "_find_block_schema_via_checksum", "commit_message": "Nested Block Schemas (PrefectHQ/orion#1846)\n\n* Adds models and migration for block schema and block document references\r\n\r\n* Adds customization to the generation of a block schema's fields\r\n\r\n* Adds ability to reconstruct block schema fields on read\r\n\r\n* Adds ability to reconstruct block schema when read by checksum\r\n\r\n* Adds schema reconstruction when reading multiple block schemas\r\n\r\n* Adds ordering to query of recursive CTE\r\n\r\n* Refactors to make code path and purpose easier to follow", "code": "def _find_block_schema_via_checksum(block_schemas_with_references, checksum):\n \n return next(\n (\n block_schema\n for block_schema, _, _ in block_schemas_with_references\n if block_schema.checksum == checksum\n ),\n None,\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 83, "n_words": 20, "vocab_size": 20, "complexity": 3, "nloc": 9, "token_counts": 32, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "def _find_block_schema_via_checksum(block_schemas_with_references, checksum):\n \n return next(\n (\n block_schema\n for block_schema, _, _ in block_schemas_with_references\n if block_schema.checksum == checksum\n ),\n None,\n )\n\n" }, { "id": 80579, "commit_id": "443bdc1234682dd0004bae372078512fcf37cce9", "repo": "awx", "path": "awx/main/tests/unit/test_tasks.py", "file_name": "test_tasks.py", "fun_name": "test_vars_unsafe_by_default", "commit_message": "Decoupled callback functions from BaseTask Class\n\n--- Removed all callback functions from 'jobs.py' and put them in a new file '/awx/main/tasks/callback.py'\n--- Modified Unit tests unit moved\n--- Moved 'update_model' from jobs.py to /awx/main/utils/update_model.py", "code": "def test_vars_unsafe_by_default(self, job, private_data_dir):\n job.created_by = User(pk=123, username='angry-spud')\n job.inventory = Inventory(pk=123, name='example-inv')\n\n task = jobs.RunJob()\n task.build_extra_vars_file(job, private_data_dir)\n\n fd = open(os.path.join(private_data_dir, 'env', 'extravars'))\n extra_vars = yaml.load(fd, Loader=SafeLoader)\n\n # ensure that strings are marked as unsafe\n for unsafe in [\n 'awx_job_template_name',\n 'tower_job_template_name',\n 'awx_user_name',\n 'tower_job_launch_type',\n 'awx_project_revision',\n 'tower_project_revision',\n 'tower_user_name',\n 'awx_job_launch_type',\n 'awx_inventory_name',\n 'tower_inventory_name',\n ]:\n assert hasattr(extra_vars[unsafe], '__UNSAFE__')\n\n # ensure that non-strings are marked as safe\n for safe in [\n 'awx_job_template_id',\n 'awx_job_id',\n 'awx_user_id',\n 'tower_user_id',\n 'tower_job_template_id',\n 'tower_job_id',\n 'awx_inventory_id',\n 'tower_inventory_id',\n ]:\n assert not hasattr(extra_vars[safe], '__UNSAFE__')\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 380, "n_words": 77, "vocab_size": 59, "complexity": 3, "nloc": 31, "token_counts": 150, "n_ast_nodes": 256, "n_identifiers": 28, "random_cut": "def test_vars_unsafe_by_default(self, job, private_data_dir):\n job.created_by = User(pk=123, username='angry-spud')\n job.inventory = Inventory(pk=123, name='example-inv')\n\n task = jobs.RunJob()\n task.build_extra_vars_file(job, private_data_dir)\n\n fd = open(os.path.join(private_data_dir, 'env', 'extravars'))\n extra_vars = yaml.load(fd, Loader=SafeLoader)\n\n # ensure that strings are marked as unsafe\n for unsafe in [\n 'awx_job_template_name',\n 'tower_job_template_name',\n 'awx_user_name',\n 'tower_job_launch_type',\n 'awx_project_revision',\n 'tower_project_revision',\n 'tower_user_name',\n 'awx_job_launch_type',\n 'awx_inventory_name',\n " }, { "id": 178838, "commit_id": "70b7eee9555c8d5599d096eaf600521475b001d9", "repo": "Nuitka", "path": "nuitka/utils/CStrings.py", "file_name": "CStrings.py", "fun_name": "encodePythonUnicodeToC", "commit_message": "Python3.7+: Added support for get_resource_reader to our loader\n\n* This allows to avoid a useless file copy to a temporary file\n in case a \"importlib.resources.path\" is used.\n\n* Also fixed a few typos in tests.\n\n* And avoid compiling the meta path based loader separately, so it\n can use compiled code helpers easily.", "code": "def encodePythonUnicodeToC(value):\n \n assert type(value) is unicode, type(value)\n\n result = \"\"\n\n for c in value:\n cv = ord(c)\n\n result += r\"\\%o\" % cv\n\n return 'L\"%s\"' % result\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 55, "n_words": 26, "vocab_size": 20, "complexity": 2, "nloc": 7, "token_counts": 42, "n_ast_nodes": 73, "n_identifiers": 8, "random_cut": "def encodePythonUnicodeToC(value):\n \n assert type(value) is unicode, type(value)\n\n result = \"\"\n\n for c in " }, { "id": 102422, "commit_id": "2378421340e5aec0033d564be7b706e8f903b146", "repo": "pytorch", "path": "test/distributed/_sharded_tensor/ops/test_binary_cmp.py", "file_name": "test_binary_cmp.py", "fun_name": "get_random_tensors", "commit_message": "Implement torch.allclose for sharded tensor. (#70331)\n\nSummary:\nImplement torch.allclose op for sharded tensors.\n\nPull Request resolved: https://github.com/pytorch/pytorch/pull/70331\n\nTest Plan:\nAutomated test added.\npritamdamania87\nFixes https://github.com/pytorch/pytorch/issues/67112\n\ncc pietern mrshenli pritamdamania87 zhaojuanmao satgera rohan-varma gqchen aazzolini osalpekar jiayisuse SciPioneer H-Huang\n\nReviewed By: pritamdamania87\n\nDifferential Revision: D33339137\n\nPulled By: kumpera\n\nfbshipit-source-id: 4263e468eaa117317b190f69877bf3f8bbac5658", "code": "def get_random_tensors(self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0):\n pg1 = _get_default_group() if pg1 is None else pg1\n pg2 = _get_default_group() if pg2 is None else pg2\n torch.manual_seed(TestShardedTensorBinaryOps.seed)\n st1 = _sharded_tensor.rand(spec1, sizes, process_group=pg1)\n torch.manual_seed(TestShardedTensorBinaryOps.seed + seed_offset)\n st2 = _sharded_tensor.rand(spec2, sizes, process_group=pg2)\n\n TestShardedTensorBinaryOps.seed += 1\n return st1, st2\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 101, "n_words": 46, "vocab_size": 32, "complexity": 3, "nloc": 9, "token_counts": 101, "n_ast_nodes": 149, "n_identifiers": 18, "random_cut": "def get_random_tensors(self, spec1, spec2, *sizes, pg1=None, pg2=None, seed_offset=0):\n pg1 = _get_default_" }, { "id": 46956, "commit_id": "582e0d53af78f881cc0f9e5b063bef11f18f7999", "repo": "airflow", "path": "tests/timetables/test_events_timetable.py", "file_name": "test_events_timetable.py", "fun_name": "restricted_timetable", "commit_message": "Events Timetable (#22332)\n\nThis Timetable will be widely useful for timing based on sporting events, planned communication campaigns,\r\nand other schedules that are arbitrary and irregular but predictable.", "code": "def restricted_timetable():\n return EventsTimetable(event_dates=EVENT_DATES, restrict_to_events=True)\n\n\n@pytest.mark.parametrize(\n \"start, end\",\n list(zip(EVENT_DATES, EVENT_DATES)),\n)", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"start, end\",\n list(zip(EVENT_DATES, EVENT_DATES)),\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 17, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 56, "n_identifiers": 10, "random_cut": "def restricted_timetable():\n return EventsTimet" }, { "id": 61385, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/vcs/subversion.py", "file_name": "subversion.py", "fun_name": "switch", "commit_message": "upd; format", "code": "def switch(self, dest, url, rev_options):\n # type: (str, HiddenText, RevOptions) -> None\n cmd_args = make_command(\n 'switch', self.get_remote_call_options(), rev_options.to_args(),\n url, dest,\n )\n self.run_command(cmd_args)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 71, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 6, "token_counts": 40, "n_ast_nodes": 61, "n_identifiers": 10, "random_cut": "def switch(self, dest, url, rev_options):\n # type: (str, HiddenText, RevOptions) -> None\n cmd_args = make_command(\n 'switch', self.get_remote_call_options(), rev_options.to_args(),\n url, dest,\n )\n self.run_command(cmd_args)\n" }, { "id": 83726, "commit_id": "b4a9311ef296da9d50f176b775d8452f99d12c55", "repo": "zulip", "path": "zerver/tests/test_events.py", "file_name": "test_events.py", "fun_name": "test_user_group_events", "commit_message": "actions: Add function to add and remove subgroups from a user group.", "code": "def test_user_group_events(self) -> None:\n othello = self.example_user(\"othello\")\n events = self.verify_action(\n lambda: check_add_user_group(\n self.user_profile.realm, \"backend\", [othello], \"Backend team\"\n )\n )\n check_user_group_add(\"events[0]\", events[0])\n\n # Test name update\n backend = UserGroup.objects.get(name=\"backend\")\n events = self.verify_action(lambda: do_update_user_group_name(backend, \"backendteam\"))\n check_user_group_update(\"events[0]\", events[0], \"name\")\n\n # Test description update\n description = \"Backend team to deal with backend code.\"\n events = self.verify_action(lambda: do_update_user_group_description(backend, description))\n check_user_group_update(\"events[0]\", events[0], \"description\")\n\n # Test add members\n hamlet = self.example_user(\"hamlet\")\n events = self.verify_action(lambda: bulk_add_members_to_user_group(backend, [hamlet.id]))\n check_user_group_add_members(\"events[0]\", events[0])\n\n # Test remove members\n hamlet = self.example_user(\"hamlet\")\n events = self.verify_action(lambda: remove_members_from_user_group(backend, [hamlet.id]))\n check_user_group_remove_members(\"events[0]\", events[0])\n\n api_design = create_user_group(\n \"api-design\", [hamlet], hamlet.realm, description=\"API design team\"\n )\n\n # Test add subgroups\n events = self.verify_action(lambda: add_subgroups_to_user_group(backend, [api_design]))\n check_user_group_add_subgroups(\"events[0]\", events[0])\n\n # Test remove subgroups\n events = self.verify_action(lambda: remove_subgroups_from_user_group(backend, [api_design]))\n check_user_group_remove_subgroups(\"events[0]\", events[0])\n\n # Test remove event\n events = self.verify_action(lambda: check_delete_user_group(backend.id, othello))\n check_user_group_remove(\"events[0]\", events[0])\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 392, "n_words": 128, "vocab_size": 67, "complexity": 1, "nloc": 29, "token_counts": 284, "n_ast_nodes": 473, "n_identifiers": 33, "random_cut": "def test_user_group_events(self) -> None:\n othello = self.example_user(\"othello\")\n events = self.verify_action(\n lambda: check_add_user_group(\n self.user_profile.realm, \"backend\", [othello], \"Backend team\"\n )\n )\n check_user_group_add(\"events[0]\", events[0])\n\n # Test name update\n backend = UserGroup.objects.get(name=\"backend\")\n " }, { "id": 53828, "commit_id": "bd98b7589b4da7405da6f93fd0df5b452ef02b4b", "repo": "prefect", "path": "tests/orion/models/test_work_queues.py", "file_name": "test_work_queues.py", "fun_name": "test_read_work_queue", "commit_message": "Add work queue models and schemas", "code": "async def test_read_work_queue(self, work_queues, session):\n read_work_queue = await models.work_queues.read_work_queues(session=session)\n assert len(read_work_queue) == len(work_queues)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 26, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "async def test_read_work_queue(self, work_queues, session):\n read_wor" }, { "id": 42103, "commit_id": "a259ac55c4233ab3418459d3b6cd195ebe2cb521", "repo": "seaborn", "path": "tests/_core/test_plot.py", "file_name": "test_plot.py", "fun_name": "check_facet_results_2d", "commit_message": "Allow Plot.label to control title(s) (#2934)\n\n* Don't show facet variable names in facet titles\r\n\r\n* Don't document Plot.label as accepting None as a value\r\n\r\n* Allow Plot.label to control titles, including when faceting\r\n\r\n* Don't include separator in labeled facet title\r\n\r\n* Clean up title typing\r\n\r\n* Fix legend test\r\n\r\n* Fix legend contents typing after rebase\r\n\r\n* Add theme update to Plot.clone and remove outdated todo", "code": "def check_facet_results_2d(self, p, df, variables, order=None):\n\n p = p.plot()\n\n if order is None:\n order = {dim: categorical_order(df[key]) for dim, key in variables.items()}\n\n levels = itertools.product(*[order[dim] for dim in [\"row\", \"col\"]])\n assert len(p._subplots) == len(list(levels))\n\n for subplot, (row_level, col_level) in zip(p._subplots, levels):\n assert subplot[\"row\"] == row_level\n assert subplot[\"col\"] == col_level\n assert subplot[\"axes\"].get_title() == (\n f\"{col_level} | {row_level}\"\n )\n assert_gridspec_shape(\n subplot[\"axes\"], len(levels[\"row\"]), len(levels[\"col\"])\n )\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 203, "n_words": 62, "vocab_size": 48, "complexity": 5, "nloc": 15, "token_counts": 156, "n_ast_nodes": 255, "n_identifiers": 23, "random_cut": "def check_facet_results_2d(self, p, df, variables, order=None):\n\n p = p.plot()\n\n if order is None:\n order = {dim: categorical_order(df[key]) for dim, key in variables.items()}\n\n levels = itertools.product(*[order[dim] for dim in [\"row\", \"col" }, { "id": 250763, "commit_id": "3a5550a09cd40d76acfe71aa45c7a8309525ad51", "repo": "mitmproxy", "path": "test/mitmproxy/addons/test_save.py", "file_name": "test_save.py", "fun_name": "test_tcp", "commit_message": "Rotate stream files (#5097)\n\n* Example addon for saving streamed data including a small bug fix to make it work.\r\n\r\n* Revert \"Example addon for saving streamed data including a small bug fix to make it work.\"\r\n\r\nThis reverts commit 02ab78def9a52eaca1a89d0757cd9475ce250eaa.\r\n\r\n* Add support for rotating stream files every hour or day\r\n\r\n* Added tests\r\n\r\n* Modified to change the stream file every time the formating string changes as time moves on.\r\n\r\n* Update to more compact version\r\n\r\n* simplify save addon logic\r\n\r\n* make mypy happy\r\n\r\n* fix compatibility with Python 3.8\r\n\r\nCo-authored-by: Maximilian Hils ", "code": "def test_tcp(tmp_path):\n sa = save.Save()\n with taddons.context(sa) as tctx:\n p = str(tmp_path / \"foo\")\n tctx.configure(sa, save_stream_file=p)\n\n tt = tflow.ttcpflow()\n sa.tcp_start(tt)\n sa.tcp_end(tt)\n\n tt = tflow.ttcpflow()\n sa.tcp_start(tt)\n sa.tcp_error(tt)\n\n tctx.configure(sa, save_stream_file=None)\n assert len(rd(p)) == 2\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 32, "vocab_size": 25, "complexity": 1, "nloc": 13, "token_counts": 98, "n_ast_nodes": 165, "n_identifiers": 20, "random_cut": "def test_tcp(tmp_path):\n sa = save.Save()\n with taddons.context(sa) as tctx:\n p = str(tmp_path / \"foo\")\n tctx.configure(sa, save_stream_file=p)\n\n tt = tflow.ttcpflow()\n sa.tcp_start(tt)\n sa.tcp_end(tt)\n\n tt = tflow.ttcpflow()\n sa.tcp_sta" }, { "id": 91826, "commit_id": "350ecb60d81a26ba63614fb4c87448cfeaceac7c", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_private_transactions_derived_metric", "commit_message": "feat(snuba): Inject meta into snuba results converter [TET-131] (#35675)\n\n* feat(snuba): Inject meta into snuba results converter [TET-131]\r\n\r\nAdd meta data in the resulting response from get_series\r\nas we are just discarding it and returning the coalesced data portion.\r\n\r\nFixes [TET-131]", "code": "def test_private_transactions_derived_metric(self):\n response = self.get_response(\n self.organization.slug,\n project=[self.project.id],\n field=[\"transaction.all\"],\n statsPeriod=\"1m\",\n interval=\"1m\",\n )\n\n assert response.data[\"detail\"] == (\n \"Failed to parse 'transaction.all'. Must be something like 'sum(my_metric)', \"\n \"or a supported aggregate derived metric like `session.crash_free_rate`\"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 138, "n_words": 34, "vocab_size": 32, "complexity": 1, "nloc": 12, "token_counts": 54, "n_ast_nodes": 91, "n_identifiers": 12, "random_cut": "def test_private_transactions_derived_metric(self):\n response = self.get_response(\n self.organization.slug,\n project=[self.project.id],\n field=[\"transaction.all\"],\n " }, { "id": 271601, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "from_config", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def from_config(cls, config, custom_objects=None):\n # `from_config` assumes `cls` is either `Functional` or a child class of\n # `Functional`. In the case that `cls` is meant to behave like a child class\n # of `Functional` but only inherits from the `Model` class, we have to call\n # `cls(...)` instead of `Functional.from_config`.\n from keras.engine import (\n functional,\n ) # pylint: disable=g-import-not-at-top\n\n with generic_utils.SharedObjectLoadingScope():\n functional_model_keys = [\n \"name\",\n \"layers\",\n \"input_layers\",\n \"output_layers\",\n ]\n if all(key in config for key in functional_model_keys):\n inputs, outputs, layers = functional.reconstruct_from_config(\n config, custom_objects\n )\n model = cls(\n inputs=inputs, outputs=outputs, name=config.get(\"name\")\n )\n functional.connect_ancillary_layers(model, layers)\n return model\n\n # The config does not contain all the information necessary to revive a\n # Functional model. This happens when the user creates subclassed models\n # where `get_config()` is returning insufficient information to be\n # considered a Functional model. In this case, we fall back to provide\n # all config into the constructor of the class.\n optimizer, loss = None, None\n\n optimizer_dict = config.pop(\"optimizer\", {})\n if optimizer_dict:\n optimizer = saving_lib.deserialize_keras_object(optimizer_dict)\n\n loss_dict = config.pop(\"loss\", {})\n if loss_dict:\n loss = saving_lib.deserialize_keras_object(loss_dict)\n\n input_shape = config.pop(\"input_shape\", {})\n\n try:\n model = cls(**config)\n except TypeError as e:\n raise TypeError(\n \"Unable to revive model from config. When overriding \"\n \"the `get_config()`, make sure that the returned \"\n \"config contains all items used as arguments in the \"\n f\"constructor to {cls}, which is the default behavior. \"\n \"You can override this default behavior by defining a \"\n \"`from_config` method to specify how to create an \"\n f\"instance of {cls.__name__} from the config. \\n\\n\"\n f\"Error encountered during deserialization:\\n{e}\"\n )\n\n if saving_lib._ENABLED: # pylint: disable=protected-access\n\n if optimizer or loss:\n model.compile(optimizer=optimizer, loss=loss)\n\n if input_shape:\n model.build(input_shape)\n\n return model\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1013, "n_words": 271, "vocab_size": 175, "complexity": 10, "nloc": 47, "token_counts": 220, "n_ast_nodes": 393, "n_identifiers": 34, "random_cut": "def from_config(cls, config, custom_objects=None):\n # `from_config` assumes `cls` is either `Functional` or a child class of\n # `Functional`. In the case that `cls` is meant to behave like a child class\n # of `Functional` but only inherits from the `Model` class, we have to call\n # `cls(...)` instead of `Functional.from_config`.\n from keras.engine import (\n functional,\n ) # pylint: disable=g-import-not-at-top\n\n with generic_utils.SharedObjectLoadingScope():\n functional_model_keys = [\n \"name\",\n \"layers\",\n \"input_" }, { "id": 162344, "commit_id": "85fee2215295b099d34350d9a9ff42c086e3aef2", "repo": "yt-dlp", "path": "yt_dlp/extractor/prx.py", "file_name": "prx.py", "fun_name": "_story_playlist_entry", "commit_message": "[PRX] Add Extractors (#2245)\n\nCloses #2144, https://github.com/ytdl-org/youtube-dl/issues/15948\r\n\r\nAuthored by: coletdjnz", "code": "def _story_playlist_entry(self, response):\n story = self._extract_story_info(response)\n if not story:\n return\n story.update({\n '_type': 'url',\n 'url': 'https://beta.prx.org/stories/%s' % story['id'],\n 'ie_key': PRXStoryIE.ie_key()\n })\n return story\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 100, "n_words": 22, "vocab_size": 20, "complexity": 2, "nloc": 10, "token_counts": 49, "n_ast_nodes": 88, "n_identifiers": 8, "random_cut": "def _story_playlist_entry(self, response):\n story = self._extract_story_info(response)\n if not story:\n return\n story.update({\n " }, { "id": 21558, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/rich/_win32_console.py", "file_name": "_win32_console.py", "fun_name": "erase_start_of_line", "commit_message": "Vendor in pip 22.1.2", "code": "def erase_start_of_line(self) -> None:\n \n row, col = self.cursor_position\n start = WindowsCoordinates(row, 0)\n FillConsoleOutputCharacter(self._handle, \" \", length=col, start=start)\n FillConsoleOutputAttribute(\n self._handle, self._default_attrs, length=col, start=start\n )\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 76, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 8, "token_counts": 57, "n_ast_nodes": 88, "n_identifiers": 12, "random_cut": "def erase_start_of_line(self) -> None:\n \n row, col = self.cursor_" }, { "id": 265603, "commit_id": "b4877e7fac49282a766ebcdd2f886f71e8d61fa5", "repo": "netbox", "path": "netbox/dcim/filtersets.py", "file_name": "filtersets.py", "fun_name": "filter_is_occupied", "commit_message": "#8580 add interface filters for connected", "code": "def filter_is_occupied(self, queryset, name, value):\n if value:\n return queryset.filter(Q(cable__isnull=False) | Q(mark_connected=True))\n else:\n return queryset.filter(cable__isnull=True, mark_connected=False)\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 50, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 48, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def filter_is_occupied(self, queryset, name, value):\n if value:\n return queryset.filter(Q(cable__isnull=False) | Q(mark_connected=True))\n else:\n " }, { "id": 181412, "commit_id": "96297c0bad09ee82e65d56a53f96ee9814bb8360", "repo": "gradio", "path": "test/test_components.py", "file_name": "test_components.py", "fun_name": "test_legend_position", "commit_message": "LinePlot (#2807)\n\n* First draft\r\n\r\n* Fix tests\r\n\r\n* Fix pdb\r\n\r\n* Figure out stroke dash + legend position\r\n\r\n* Add legend position\r\n\r\n* Add back gif\r\n\r\n* Add demo + update demo\r\n\r\n* Format notebooks\r\n\r\n* Changelog\r\n\r\n* More changelog\r\n\r\n* Undo notebooks\r\n\r\n* Reword\r\n\r\n* Set lower bound for altair\r\n\r\n* Modify docstrings\r\n\r\n* Add LinePlot image to changelog", "code": "def test_legend_position(self):\n plot = gr.ScatterPlot(\n show_label=False,\n title=\"Two encodings\",\n x=\"Horsepower\",\n y=\"Miles_per_Gallon\",\n color=\"Acceleration\",\n color_legend_position=\"none\",\n color_legend_title=\"Foo\",\n shape=\"Origin\",\n shape_legend_position=\"none\",\n shape_legend_title=\"Bar\",\n size=\"Acceleration\",\n size_legend_title=\"Accel\",\n size_legend_position=\"none\",\n )\n output = plot.postprocess(cars)\n config = json.loads(output[\"plot\"])\n assert config[\"encoding\"][\"color\"][\"legend\"] is None\n assert config[\"encoding\"][\"shape\"][\"legend\"] is None\n assert config[\"encoding\"][\"size\"][\"legend\"] is None\n\n output = gr.ScatterPlot.update(\n value=cars,\n title=\"Two encodings\",\n x=\"Horsepower\",\n y=\"Miles_per_Gallon\",\n color=\"Acceleration\",\n color_legend_position=\"top\",\n color_legend_title=\"Foo\",\n shape=\"Origin\",\n shape_legend_position=\"bottom\",\n shape_legend_title=\"Bar\",\n size=\"Acceleration\",\n size_legend_title=\"Accel\",\n size_legend_position=\"left\",\n )\n\n config = json.loads(output[\"value\"][\"plot\"])\n assert config[\"encoding\"][\"color\"][\"legend\"][\"orient\"] == \"top\"\n assert config[\"encoding\"][\"shape\"][\"legend\"][\"orient\"] == \"bottom\"\n assert config[\"encoding\"][\"size\"][\"legend\"][\"orient\"] == \"left\"\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 447, "n_words": 71, "vocab_size": 43, "complexity": 1, "nloc": 40, "token_counts": 245, "n_ast_nodes": 439, "n_identifiers": 26, "random_cut": "def test_legend_position(self):\n plot = gr.ScatterPlot(\n show_label=False,\n title=\"Two encodings\",\n x=\"Horsepower\",\n y=\"Miles_per_Gallon\",\n color=\"Acceleration\",\n color_legend_position=\"none\",\n color_legend_title=\"Foo\",\n shape=\"Origin\",\n shape_legend_position=\"none\",\n shape_legend_title=\"Bar\",\n size=\"Acceleration\",\n size_legend_title=\"Accel\",\n size_legend_position=\"none\",\n )\n output = plot.postprocess(cars)\n config = json.loads(output[\"plot\"])\n assert config[\"encoding\"][\"color\"][\"legend\"] is None\n assert config[\"encoding\"][\"shape\"][\"legend\"] is None\n assert config[\"encoding\"][\"size\"][\"legend\"] is None\n\n output = gr.ScatterPlot.update(\n value=cars,\n title=\"Two encodings\",\n x=\"Horsepower\",\n y=\"Miles_per" }, { "id": 217347, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/filecmp.py", "file_name": "filecmp.py", "fun_name": "_cmp", "commit_message": "add python 3.10.4 for windows", "code": "def _cmp(a, b, sh, abs=abs, cmp=cmp):\n try:\n return not abs(cmp(a, b, sh))\n except OSError:\n return 2\n\n\n# Return a copy with items that occur in skip removed.\n#", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 45, "n_words": 28, "vocab_size": 25, "complexity": 2, "nloc": 5, "token_counts": 37, "n_ast_nodes": 56, "n_identifiers": 7, "random_cut": "def _cmp(a, b, sh, abs=abs, cmp=cmp):\n try:\n return not abs(cmp(a, b, sh))\n except OS" }, { "id": 167045, "commit_id": "730b307ad1c31840fc5ffc1543117cc92fe0ee1e", "repo": "pandas", "path": "pandas/tests/window/test_rolling.py", "file_name": "test_rolling.py", "fun_name": "test_numeric_only_series", "commit_message": "ENH: Add numeric_only to window ops (#47265)\n\n* ENH: Add numeric_only to window ops\r\n\r\n* Fix corr/cov for Series; add tests", "code": "def test_numeric_only_series(arithmetic_win_operators, numeric_only, dtype):\n # GH#46560\n kernel = arithmetic_win_operators\n ser = Series([1], dtype=dtype)\n rolling = ser.rolling(2, min_periods=1)\n op = getattr(rolling, kernel)\n if numeric_only and dtype is object:\n msg = f\"Rolling.{kernel} does not implement numeric_only\"\n with pytest.raises(NotImplementedError, match=msg):\n op(numeric_only=numeric_only)\n else:\n result = op(numeric_only=numeric_only)\n expected = ser.agg([kernel]).reset_index(drop=True).astype(float)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\"kernel\", [\"corr\", \"cov\"])\n@pytest.mark.parametrize(\"use_arg\", [True, False])\n@pytest.mark.parametrize(\"dtype\", [int, object])", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"kernel\", [\"corr\", \"cov\"])\n@pytest.mark.parametrize(\"use_arg\", [True, False])\n@pytest.mark.parametrize(\"dtype\", [int, object])", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 119, "n_words": 56, "vocab_size": 49, "complexity": 3, "nloc": 13, "token_counts": 113, "n_ast_nodes": 257, "n_identifiers": 29, "random_cut": "def test_numeric_only_series(arithmetic_win_operators, numeric_only, dtype):\n # GH#46560\n kernel = arithmetic_win_operators\n ser = Series([1], dtype=dtype)\n rolling = ser.rolling(2, min_periods=1)\n op = getattr(rolling, kernel)\n if numeric_only and dtype is object:\n msg = f\"Rolling.{kernel} does not implement numeric_only\"\n with pytest.raises(NotImplementedError, match=msg):\n op(numeric_only=numeric_only)\n else:\n result = op(numeric_only=numeric_only)\n expected = ser.agg([kernel]).reset_index(drop=True).astype(float)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\"kernel\", [\"corr\", \"c" }, { "id": 247783, "commit_id": "9d21ecf7ceab55bc19c4457b8b07401b0b1623a7", "repo": "synapse", "path": "tests/storage/test_background_update.py", "file_name": "test_background_update.py", "fun_name": "test_background_update_duration_set_in_config", "commit_message": "Add type hints to tests files. (#12256)", "code": "def test_background_update_duration_set_in_config(self) -> None:\n \n # Duration of one background update item\n duration_ms = 10\n\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n values={\"update_name\": \"test_update\", \"progress_json\": '{\"my_key\": 1}'},\n )\n )\n\n self.update_handler.side_effect = self.update\n self.update_handler.reset_mock()\n res = self.get_success(\n self.updates.do_next_background_update(False),\n by=0.02,\n )\n self.assertFalse(res)\n\n # the first update was run with the default batch size, this should be run with 500ms as the\n # desired duration", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 215, "n_words": 57, "vocab_size": 45, "complexity": 1, "nloc": 21, "token_counts": 106, "n_ast_nodes": 141, "n_identifiers": 17, "random_cut": "def test_background_update_duration_set_in_config(self) -> None:\n \n # Duration of one background update item\n duration_ms = 10\n\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n values={\"update_name\": \"test_update\", \"progress_json\": '{\"my_key\": 1}'},\n )\n )\n\n " }, { "id": 25674, "commit_id": "5e577ff6d55fc0de8dc85d6b1fb0c6a241c5049b", "repo": "saleor", "path": "saleor/plugins/avatax/tests/test_avatax.py", "file_name": "test_avatax.py", "fun_name": "test_api_post_request_handles_request_errors", "commit_message": "Shipping dataclass refactor + shipping types split in 3.1 (#8737)\n\n* move resolver logic to dataloader\r\n\r\n* SALEOR-4842 Convert Money to TaxedMoney for ShippingMethodData\r\n\r\n* Update calculate_price_for_shipping_method implementation\r\n\r\n* Do not refer to local shipping methods\r\n\r\n* Change import styles\r\n\r\n* Update type hints for ShippingMethod root object type\r\n\r\n* Remove obsolete TODO\r\n\r\n* Optimize channel listing queries\r\n\r\n* Optimize several queries to match benchmarks\r\n\r\n* Allow listing queries in benchmarks\r\n\r\n* SALEOR-4842 Remove unnecessary Unions\r\n\r\n* Fix dataloader implementation\r\n\r\n* SALEOR-4842 Fix tests\r\n\r\n* wip\r\n\r\n* SALEOR-4842 fix failing tests\r\n\r\n* Rename type ShippingMethod -> ShippingMethodType\r\n\r\n* Merge with SALEOR-4545\r\n\r\n* Fix tests after merge\r\n\r\n* SALEOR-4842 CR fixes\r\n\r\n* SALEOR-4842 fix price in get_valid_shipping_methods_for_order\r\n\r\n* SALEOR-4842 use dataloader to fetch channel listings in order resolve_shipping_method\r\n\r\n* SALEOR-4842 remove additional dbquery\r\n\r\n* wip\r\n\r\n* SALEOR-4842 change obsolete test\r\n\r\n* SALEOR-4842 Rename identical_taxed_money\r\n\r\n* Restore return value of clean_delivery_method\r\n\r\n* Remove redundant condition\r\n\r\n* Remove Optional from message field in ShippingMethodData\r\n\r\n* Drop identical taxed money\r\n\r\n* SALEOR-4842 remove redundant code\r\n\r\n* SALEOR-4842 fix failing tests\r\n\r\n* SALEOR-4842 deprecate apply_taxes_to_shipping as it is not used anymore\r\n\r\n* SALEOR-4842 remove obsolete code\r\n\r\n* Apply several code style fixes\r\n\r\n* Do not add shipping method channel listings to checkout info\r\n\r\n* Use the shipping dataclass in shop resolvers\r\n\r\n* Remove ChannelContext for ShippingMethodData\r\n\r\n* Add tests covering more shipping method fields\r\n\r\n* Use a dataloader to fetch shipping method listings for orders\r\n\r\n* SALEOR-4842 Add missing resolvers to new shipping method type\r\n\r\n* SALEOR-4842 refactor resolve_private_metadata\r\n\r\n* Extend checkout tests\r\n\r\n* SALEOR-4842 extend checkout tests fix\r\n\r\n* Test min/max order price for checkouts\r\n\r\n* Test public metadata for checkouts\r\n\r\n* Add a test for shipping method type\r\n\r\n* Test metadata for order shipping methods\r\n\r\n* Fix invalid order status for orders with waiting for approvals fulfillments (#8584)\r\n\r\n* Improve message when the cursor in pagination is invalid (#8594)\r\n\r\n* Drop unused function (#8635)\r\n\r\n* Bump dependencies (#8637)\r\n\r\n* Bump braintree from 4.12.0 to 4.13.1 (#8632)\r\n\r\nBumps [braintree](https://github.com/braintree/braintree_python) from 4.12.0 to 4.13.1.\r\n- [Release notes](https://github.com/braintree/braintree_python/releases)\r\n- [Changelog](https://github.com/braintree/braintree_python/blob/master/CHANGELOG.md)\r\n- [Commits](https://github.com/braintree/braintree_python/compare/4.12.0...4.13.1)\r\n\r\n---\r\nupdated-dependencies:\r\n- dependency-name: braintree\r\n dependency-type: direct:production\r\n update-type: version-update:semver-minor\r\n...\r\n\r\nSigned-off-by: dependabot[bot] \r\n\r\nCo-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>\r\n\r\n* Add GraphQL query cost validation (#8526)\r\n\r\n* Add query cost validator\r\n\r\n* Add costs validation files\r\n\r\n* Update changelog\r\n\r\n* DRY query cost tests a little\r\n\r\n* Commit basic cost map\r\n\r\n* Fix most of linter errors\r\n\r\n* Fix mypy error\r\n\r\n* Port query cost fix from Ariadne\r\n\r\n* Silence type error\r\n\r\n* Fix type comment\r\n\r\n* Change types\r\n\r\n* Add default cost limits for some types, and docs\r\n\r\n* Use custom GraphQL Error for query cost failure to make it detectable from client\r\n\r\n* Remove unused import\r\n\r\n* Fix comment\r\n\r\n* Use different tax code when amount is zero -3.1 (#8615)\r\n\r\n* Use different tax code when amount is zero\r\n\r\n* Update changelog\r\n\r\n* Remove explicit requirement for value in variant selection + test (#8535)\r\n\r\n* Fix Avalara tax fetching form cache (#8648)\r\n\r\n* Add reservations to preorders (#8429)\r\n\r\n* WIP preorder reservations: reserve stock, check reservations in available stock\r\n\r\n* Add reservations to preorder checkout line items\r\n\r\n* Remove unused code, fix some linter errors\r\n\r\n* Add remaining tests, fix variant availability API\r\n\r\n* Remove debug prints, fix linters\r\n\r\n* Fix tests\r\n\r\n* Change types\r\n\r\n* Fix types\r\n\r\n* Add comment and make code little more consistent\r\n\r\n* Fix type annotations\r\n\r\n* Fix test\r\n\r\n* Rebase\r\n\r\n* Bugfix reported issues\r\n\r\n* Fix zero for subtotal with avatax 3.1 (#8612)\r\n\r\n* Fix zero for subtotal with avatax\r\n\r\n* Apply changes after review\r\n\r\n* Add option to set shipping tax code for Avatax -3.1 (#8606)\r\n\r\n* Add to set shipping tax code for Avatax\r\n\r\n* Update changelog\r\n\r\n* Remove added records by mistake\r\n\r\n* Fix tests after changes in main branch\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>\r\n\r\n* Trigger staging deployment for tagged releases (#8650)\r\n\r\n* Fix user search (#8646)\r\n\r\n* Extend User model with search_document field\r\n\r\n* Add account utils for updating search_document value\r\n\r\n* Update search_document user field when updating user data\r\n\r\n* Add search_document value to generated random users\r\n\r\n* Update account search\r\n\r\n* Update User indexes\r\n\r\n* Update preparing search document value in random data\r\n\r\n* Update checking search_document value in test_account.py\r\n\r\n* Introduce account search.py file\r\n\r\n* Apply code review suggestion - unify function for preparing user search value\r\n\r\n* Fix paginations for queries with fragments (#8592)\r\n\r\n* Bump django-phonenumber-field from 5.2.0 to 6.0.0 (#8579)\r\n\r\nBumps [django-phonenumber-field](https://github.com/stefanfoulis/django-phonenumber-field) from 5.2.0 to 6.0.0.\r\n- [Release notes](https://github.com/stefanfoulis/django-phonenumber-field/releases)\r\n- [Changelog](https://github.com/stefanfoulis/django-phonenumber-field/blob/main/CHANGELOG.rst)\r\n- [Commits](https://github.com/stefanfoulis/django-phonenumber-field/compare/5.2.0...6.0.0)\r\n\r\n---\r\nupdated-dependencies:\r\n- dependency-name: django-phonenumber-field\r\n dependency-type: direct:production\r\n update-type: version-update:semver-major\r\n...\r\n\r\nSigned-off-by: dependabot[bot] \r\n\r\nCo-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>\r\n\r\n* Bump dependencies (#8679)\r\n\r\n* Drop channel listing from CheckoutInfo\r\n\r\n* Restore min/max order weight fields for ShippingMethod\r\n\r\n* SALEOR-4842 Fix failing tests\r\n\r\n* Add a test for shipping method translation\r\n\r\n* Test shipping method description\r\n\r\n* Fix message field for selected shipping method\r\n\r\n* Add tests for the remaining checkout fields\r\n\r\n* Remove type from query\r\n\r\n* Document test skip\r\n\r\n* Fix skip syntax\r\n\r\n* Fix translation queries for shipping methods\r\n\r\n* Handle string ids in ShippingMethodTranslationByIdAndLanguageCodeLoader\r\n\r\n* Add assert for translated name\r\n\r\n* Add exception for dataclass ChannelContextTypeForObjectType resolve_translation\r\n\r\n* Fix incorrect comparison\r\n\r\n* Add assert for maximum order price\r\n\r\n* SALEOR-4842 refactor resolve_private_metadata\r\n\r\n* Test private metadata handling\r\n\r\n* Restore type in tests\r\n\r\n* Update function signature\r\n\r\n* Tweak calculation helper name\r\n\r\n* Introduce CheckoutInfo changes from 3.0\r\n\r\n* Fix always-true check\r\n\r\n* Remove an unused function\r\n\r\n* Add test for SMD private metadata\r\n\r\n* Remove repeated shipping API calls from checkout lines mutations #8744\r\n\r\n* Add assertion for list_shipping_methods_for_checkout webhook calls count to benchmark\r\n\r\n* Update benchmarks\r\n\r\n* Refactor fetching delivery method info\r\n\r\n* Fix failing tests\r\n\r\n* Add select related in checkout mutations\r\n\r\n* Add quantize_price call\r\n\r\n* Add docstrings\r\n\r\n* Use the database replica in a new data loader\r\n\r\nCo-authored-by: Filip Owczarek \r\n\r\n* Fix black\r\n\r\n* Remove redundant resolvers\r\n\r\n* Revert \"Fix black\"\r\n\r\nThis reverts commit 6fc2f2b190fb528600f8d9ac16e971fbf7720128.\r\n\r\n* Revert \"Use the database replica in a new data loader\"\r\n\r\nThis reverts commit 5f423a63bd13af1bf0f37cc197fcfdffab817cca.\r\n\r\n* Drop unused ShippingMethodData fields\r\n\r\n* Restore ShippingMethod.type field\r\n\r\n* Minor code review changes\r\n\r\n* Simplify translation resolvers\r\n\r\n* Rename variables according to code review\r\n\r\n* Rephrase ShippingMethodType description\r\n\r\n* Add local variable to hide IDE errors\r\n\r\n* Add a comment about lazy evaluation\r\n\r\n* Return None for shipping methods with a missing listing\r\n\r\n* Use first instead of get when fetching order related listings\r\n\r\n* Flip condition\r\n\r\n* Add a missing resolver\r\n\r\n* Fix mypy error\r\n\r\n* Fix one more mypy error\r\n\r\n* Fix tax calculation for shipping in avalara (#8968)\r\n\r\n* Fix test\r\n\r\nCo-authored-by: kczan \r\nCo-authored-by: kczan <53467854+kczan@users.noreply.github.com>\r\nCo-authored-by: Iga Karbowiak <40886528+IKarbowiak@users.noreply.github.com>\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>\r\nCo-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>\r\nCo-authored-by: Rafał Pitoń \r\nCo-authored-by: Maciej Korycinski \r\nCo-authored-by: Paweł Kucharski \r\nCo-authored-by: Filip Owczarek \r\nCo-authored-by: Cezary Miącz \r\nCo-authored-by: Filip Owczarek ", "code": "def test_api_post_request_handles_request_errors(product, monkeypatch, avatax_config):\n mocked_response = Mock(side_effect=RequestException())\n monkeypatch.setattr(\"saleor.plugins.avatax.requests.post\", mocked_response)\n\n config = avatax_config\n url = \"https://www.avatax.api.com/some-get-path\"\n\n response = api_post_request(url, {}, config)\n\n assert mocked_response.called\n assert response == {}\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 46, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 8, "token_counts": 53, "n_ast_nodes": 86, "n_identifiers": 14, "random_cut": "def test_api_post_request_handles_request_errors(product, monkeypatch, avatax_config):\n mocked_response = Mock(side_effect=RequestException())\n monkeypatch.setattr(\"saleor.plugins.avatax.requests.post\", mocked_response)\n\n config = avatax_config\n url = \"https://www.avatax.api.com/some-get-path\"\n\n response = api_post_request(url, {}, config)\n\n assert mocked_respon" }, { "id": 310902, "commit_id": "3a09090a4b6e2508595500f193cd8107c7f38bf0", "repo": "core", "path": "homeassistant/components/alexa/smart_home_http.py", "file_name": "smart_home_http.py", "fun_name": "should_expose", "commit_message": "Drop use of deprecated ENTITY_CATEGORIES (#64607)\n\nCo-authored-by: epenet ", "code": "def should_expose(self, entity_id):\n \n if not self._config[CONF_FILTER].empty_filter:\n return self._config[CONF_FILTER](entity_id)\n\n entity_registry = er.async_get(self.hass)\n if registry_entry := entity_registry.async_get(entity_id):\n auxiliary_entity = registry_entry.entity_category is not None\n else:\n auxiliary_entity = False\n return not auxiliary_entity\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 103, "n_words": 28, "vocab_size": 20, "complexity": 3, "nloc": 9, "token_counts": 65, "n_ast_nodes": 105, "n_identifiers": 13, "random_cut": "def should_expose(self, entity_id):\n \n if not self._config[CONF_FILTER].empty_filter:\n return self._config[CONF_FILTER](entit" }, { "id": 60978, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/req/req_file.py", "file_name": "req_file.py", "fun_name": "preprocess", "commit_message": "upd; format", "code": "def preprocess(content):\n # type: (str) -> ReqFileLines\n \n lines_enum = enumerate(content.splitlines(), start=1) # type: ReqFileLines\n lines_enum = join_lines(lines_enum)\n lines_enum = ignore_comments(lines_enum)\n lines_enum = expand_env_variables(lines_enum)\n return lines_enum\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 25, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 40, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "def preprocess(content):\n # type: (str) -> ReqFileLines\n \n lines_enum = enumerate(" }, { "id": 89641, "commit_id": "fde0bd87826bee3e66006559746133e0b8cd2a60", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_monitor_details.py", "file_name": "test_monitor_details.py", "fun_name": "test_simple", "commit_message": "chore(hybrid-cloud): use organization_slug in MonitorDetails (#42300)", "code": "def test_simple(self):\n self.login_as(user=self.user)\n monitor = self._create_monitor()\n\n with self.feature({\"organizations:monitors\": True}):\n for path_func in self._get_path_functions():\n path = path_func(monitor)\n resp = self.client.get(path)\n\n assert resp.status_code == 200, resp.content\n assert resp.data[\"id\"] == str(monitor.guid)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 119, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 9, "token_counts": 83, "n_ast_nodes": 137, "n_identifiers": 18, "random_cut": "def test_simple(self):\n self.login_as(user=self.user)\n monitor = self._create_monitor()\n\n with self.feature({\"organizations:monitors\": True}):\n " }, { "id": 276878, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/generic_utils_test.py", "file_name": "generic_utils_test.py", "fun_name": "test_snake_case", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_snake_case(self):\n self.assertEqual(generic_utils.to_snake_case(\"SomeClass\"), \"some_class\")\n self.assertEqual(generic_utils.to_snake_case(\"Conv2D\"), \"conv2d\")\n self.assertEqual(\n generic_utils.to_snake_case(\"ConvLSTM2D\"), \"conv_lstm2d\"\n )\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 44, "n_ast_nodes": 81, "n_identifiers": 5, "random_cut": "def test_snake_case(self):\n self.assertEqual(generic_utils.to_snake_case(\"SomeClass\"), \"some_class\")\n self.assertEqual(gener" }, { "id": 109429, "commit_id": "bc4b0295161db92fe7232eb46ddb97eba287287d", "repo": "matplotlib", "path": "lib/matplotlib/pyplot.py", "file_name": "pyplot.py", "fun_name": "get_cmap", "commit_message": "API: Add pending deprecation to mpl.cm top level functions\n\n- matplotlib.cm.get_cmap\n- matplotlib.cm.register_cmap\n- matplotlib.cm.unregister_cmap\n- matplotlib.pyplot.register_cmap\n\nin preference for working with the ColormapRegistry on the top level module.\n\nCo-authored-by: Greg Lucas ", "code": "def get_cmap(name=None, lut=None):\n return cm._get_cmap(name=name, lut=lut)\nget_cmap.__doc__ = cm._get_cmap.__doc__\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 10, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 50, "n_identifiers": 6, "random_cut": "def get_cmap(name=None, lut=None):\n return cm._g" }, { "id": 91323, "commit_id": "284e980df0018f8baee659999268bdd4c7d08255", "repo": "sentry", "path": "tests/sentry/api/test_issue_search.py", "file_name": "test_issue_search.py", "fun_name": "test_is_query_invalid", "commit_message": "ref: replace self.assertRaises with pytest.raises (#35685)\n\n* add flake8 plugin to detect assertRaises\r\n\r\n* ref: replace self.assertRaises with pytest.raises\r\n\r\n* non-sed fixes", "code": "def test_is_query_invalid(self):\n with pytest.raises(InvalidSearchQuery) as excinfo:\n parse_search_query(\"is:wrong\")\n\n assert str(excinfo.value).startswith('Invalid value for \"is\" search, valid values are')\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 40, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 58, "n_identifiers": 10, "random_cut": "def test_is_query_invalid(self):\n with pytest.raises(InvalidSearchQuery) as excinfo:\n parse_search_query(\"is:wrong\")\n\n assert str(excinfo.value" }, { "id": 20525, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/__init__.py", "file_name": "__init__.py", "fun_name": "__version__", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def __version__(self):\n return \"{}.{}.{}\".format(self.major, self.minor, self.micro) + (\n \"{}{}{}\".format(\n \"r\" if self.releaselevel[0] == \"c\" else \"\",\n self.releaselevel[0],\n self.serial,\n ),\n \"\",\n )[self.releaselevel == \"final\"]\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 114, "n_words": 23, "vocab_size": 21, "complexity": 2, "nloc": 9, "token_counts": 64, "n_ast_nodes": 104, "n_identifiers": 8, "random_cut": "def __version__(self):\n return \"{}.{}.{}\".format(self.major, self.minor, self.micro) + (\n " }, { "id": 189232, "commit_id": "9d312e733120a62954ae2798cd59182b9fabddd4", "repo": "aws-cli", "path": "awscli/customizations/eks/get_token.py", "file_name": "get_token.py", "fun_name": "_get_presigned_url", "commit_message": "Add support for cluster id\n\ncr: https://code.amazon.com/reviews/CR-70518881", "code": "def _get_presigned_url(self, k8s_aws_id):\n return self._sts_client.generate_presigned_url(\n 'get_caller_identity',\n Params={K8S_AWS_ID_HEADER: k8s_aws_id},\n ExpiresIn=URL_TIMEOUT,\n HttpMethod='GET',\n )\n\n", "url": "https://github.com/aws/aws-cli.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 68, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 33, "n_ast_nodes": 51, "n_identifiers": 10, "random_cut": "def _get_presigned_url(self, k8s_aws_id):\n return self._sts_client.generate_presigned_url(\n 'get_caller_identity',\n Params={K8S_AWS_ID_HEADER: k8s_aws_id},\n ExpiresIn=URL_TIMEOUT,\n" }, { "id": 247670, "commit_id": "dea577998f221297d3ff30bdf904f7147f3c3d8a", "repo": "synapse", "path": "tests/storage/test_database.py", "file_name": "test_database.py", "fun_name": "test_exception_callback", "commit_message": "Add tests for database transaction callbacks (#12198)\n\nSigned-off-by: Sean Quah ", "code": "def test_exception_callback(self) -> None:\n \n _test_txn = Mock(side_effect=ZeroDivisionError)\n after_callback, exception_callback = self._run_interaction(_test_txn)\n\n after_callback.assert_not_called()\n exception_callback.assert_called_once_with(987, 654, extra=321)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 50, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 43, "n_ast_nodes": 71, "n_identifiers": 12, "random_cut": "def test_exception_callback(self) -> None:\n \n _test_txn = Mock(side_effect=ZeroDivisionError)\n after_cal" }, { "id": 135988, "commit_id": "e707ce4fb3717e3c05118c57f503dfbd03552ca9", "repo": "ray", "path": "rllib/algorithms/tests/test_worker_failures.py", "file_name": "test_worker_failures.py", "fun_name": "test_long_failure_period_restore_env", "commit_message": "[RLlib] Refactor `WorkerSet` on top of `FaultTolerantActorManager`. (#29938)\n\nSigned-off-by: Jun Gong ", "code": "def test_long_failure_period_restore_env(self):\n # Counter that will survive restarts.\n COUNTER_NAME = \"test_long_failure_period_restore_env\"\n counter = Counter.options(name=COUNTER_NAME).remote()\n\n config = (\n PGConfig()\n .rollouts(\n num_rollout_workers=1,\n create_env_on_local_worker=False,\n # Worker fault tolerance.\n recreate_failed_workers=True, # Restore failed workers.\n restart_failed_sub_environments=True, # And create failed envs.\n )\n .training(\n model={\"fcnet_hiddens\": [4]},\n )\n .environment(\n env=\"fault_env\",\n # Workers do not fault and no fault tolerance.\n env_config={\n \"p_done\": 0.0,\n \"max_episode_len\": 100,\n \"bad_indices\": [1],\n # Env throws error between steps 50 and 150.\n \"failure_start_count\": 30,\n \"failure_stop_count\": 80,\n \"counter\": COUNTER_NAME,\n },\n )\n .evaluation(\n evaluation_num_workers=1,\n evaluation_interval=1,\n evaluation_config={\n \"env_config\": {\n \"evaluation\": True,\n }\n },\n )\n )\n\n for _ in framework_iterator(config, frameworks=(\"tf2\", \"torch\")):\n # Reset interaciton counter.\n ray.wait([counter.reset.remote()])\n\n a = config.build()\n\n # Before train loop, workers are fresh and not recreated.\n self.assertTrue(\n not any(a.workers.foreach_worker(is_recreated, local_worker=False))\n )\n # Eval workers are also fresh and not recreated.\n self.assertTrue(\n not any(\n a.evaluation_workers.foreach_worker(\n is_recreated, local_worker=False\n )\n )\n )\n\n result = a.train()\n\n # Should see a lot of faulty episodes.\n self.assertGreaterEqual(result[\"num_faulty_episodes\"], 50)\n self.assertGreaterEqual(result[\"evaluation\"][\"num_faulty_episodes\"], 50)\n\n self.assertTrue(result[\"num_healthy_workers\"] == 1)\n # All workers are still not restored, since env are restored.\n self.assertTrue(\n not any(a.workers.foreach_worker(is_recreated, local_worker=False))\n )\n\n self.assertTrue(result[\"evaluation\"][\"num_healthy_workers\"] == 1)\n # All eval workers are still not restored, since env are recreated.\n self.assertTrue(\n not any(a.workers.foreach_worker(is_recreated, local_worker=False))\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1102, "n_words": 189, "vocab_size": 121, "complexity": 2, "nloc": 59, "token_counts": 307, "n_ast_nodes": 509, "n_identifiers": 42, "random_cut": "def test_long_failure_period_restore_env(self):\n # Counter that will survive restarts.\n COUNTER_NAME = \"test_long_failure_period_restore_env\"\n counter = Counter.options(name=COUNTER_NAME).remote()\n\n config = (\n PGConfig()\n .rollouts(\n num_rollout_workers=1,\n create_env_on_local_worker=False,\n # Worker fault tolerance.\n recreate_failed_workers=True, # Restore failed workers.\n restart_failed_sub_environments=True, # And create failed envs.\n )\n .training(\n model={\"fcnet_hiddens\": [4]},\n )\n .environment(\n env=\"fault_env\",\n # Workers do not fault and no fault tolerance.\n env_config={\n \"p_done\": 0.0,\n \"max_episode_len\": 100,\n \"bad_indices\": [1],\n # Env throws error between steps 50 and 150.\n \"failure_start_count\": 30,\n \"failure_s" }, { "id": 331579, "commit_id": "1aa617cb3b13832c29b4f5c4a1aba221acb4013e", "repo": "pytorch-image-models", "path": "timm/models/resnet.py", "file_name": "resnet.py", "fun_name": "resnetblur101d", "commit_message": "Add AvgPool2d anti-aliasing support to ResNet arch (as per OpenAI CLIP models), add a few blur aa models as well", "code": "def resnetblur101d(pretrained=False, **kwargs):\n \n model_args = dict(\n block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=BlurPool2d,\n stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnetblur101d', pretrained, **model_args)\n\n\n@register_model", "url": "https://github.com/huggingface/pytorch-image-models.git", "language": "Python", "ast_errors": "@register_model", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 43, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 5, "token_counts": 60, "n_ast_nodes": 93, "n_identifiers": 15, "random_cut": "def resnetblur101d(pretrained=False, **kwargs):\n \n model_args = dict(\n block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=BlurPool2d,\n stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnetblur101d', pretrained, **model_args)\n\n\n@register_model" }, { "id": 189839, "commit_id": "48747a74d932cc36394c1b7a69994b51855be359", "repo": "manim", "path": "manim/utils/docbuild/manim_directive.py", "file_name": "manim_directive.py", "fun_name": "visit", "commit_message": "Refactored structure of documentation; added :doc:`FAQ section ` (#2732)\n\n* move tutorials -> tutorials_guide\r\n\r\n* change title in tutorials_guides\r\n\r\n* rename: a_deeper_look -> output_and_config\r\n\r\n* splitting Tutorials\r\n\r\n* reorder index (sidebar), move some top level sections elsewhere\r\n\r\n* rename some tutorials\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* replace recommonmark with rest for rendering md files\r\n\r\n* fixed broken references\r\n\r\n* fixed **all** warnings and errors during docbuild\r\n\r\n* faq: help, more structure for landing page\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix deprecation tests\r\n\r\n* prepare some sort of skeleton for installation faq\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* ensure that pip from poetry venv is used in pipeline\r\n\r\n* added myst-parser as dev dependency\r\n\r\n* remove fixed sphinx version from requirement.txt, don't re-install dependencies\r\n\r\n* move and improve versions and troubleshooting content to FAQ\r\n\r\n* resolve broken references\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* help blacken-docs\r\n\r\n* new: navigating the docs\r\n\r\n* make different versions question more prominent\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fixed order of tutorials\r\n\r\n* added explicit references to building blocks and quickstart tutorial\r\n\r\n* docs -> doc\r\n\r\n* change a page-reference to a paragraph-reference\r\n\r\n* pypi manimlib, split answer regarding chocolatey failing\r\n\r\n* added a note on #beginner-resources\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def visit(self, node, name=\"\"):\n self.visit_admonition(node, name)\n if not isinstance(node[0], nodes.title):\n node.insert(0, nodes.title(\"skip-manim\", \"Example Placeholder\"))\n\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 26, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 4, "token_counts": 48, "n_ast_nodes": 77, "n_identifiers": 9, "random_cut": "def visit(self, node, name=\"\"):\n self.visit_admonition(node, name)\n if not isinstance(node[0], nodes.title):\n node.inse" }, { "id": 19068, "commit_id": "964f5ab75098c55f028f8acfeeae05df35ea68d5", "repo": "mlflow", "path": "tests/models/test_default_evaluator.py", "file_name": "test_default_evaluator.py", "fun_name": "test_get_classifier_per_class_metrics", "commit_message": "Evaluation Default evaluator (#5092)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* rename module\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert black change\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* change module path\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert export\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix curcit import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix conftest.py\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* Revert \"fix conftest.py\"\r\n\r\nThis reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b.\r\n\r\n* fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* default evaluator\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update hash algo\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comment\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add more tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* remove scikitplot dep\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add pr curve\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap.summary_plot\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* log explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve explainer code\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update explainer creating\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update predict_proba\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add multi-class metrics artifacts\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add log_loss metric\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address ben comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* prevent show shap logo, add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* support spark model\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap version check\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update docs, loose classifier label limit\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* multiclass classifier merge metrics/plots\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* zfill feature name\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve label handling\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* black\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* increase plot dpi\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix test fixture\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use matplot rc_context\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix shap import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor EvaluationDataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* limit user specify shap algos\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* clean\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update evaluation dataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use svg fig\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert svg\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* curve dashline, legend display ap/roc, legend move out\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* linewidth 1\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* keyword arguments for evaluate, fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* mark abc.abstractmethod, kw args for ModelEvaluator methods\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def test_get_classifier_per_class_metrics():\n y = [0, 1, 0, 1, 0, 1, 0, 1, 1, 0]\n y_pred = [0, 1, 1, 0, 1, 1, 0, 1, 1, 0]\n\n expected_metrics = {\n \"true_negatives\": 3,\n \"false_positives\": 2,\n \"false_negatives\": 1,\n \"true_positives\": 4,\n \"recall\": 0.8,\n \"precision\": 0.6666666666666666,\n \"f1_score\": 0.7272727272727272,\n }\n metrics = _get_classifier_per_class_metrics(y, y_pred)\n assert_dict_equal(metrics, expected_metrics, rtol=1e-3)\n\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 117, "n_words": 51, "vocab_size": 31, "complexity": 1, "nloc": 14, "token_counts": 108, "n_ast_nodes": 142, "n_identifiers": 8, "random_cut": "def test_get_classifier_per_class_metrics():\n y = [0, 1, 0, 1, 0, 1, 0, 1, 1, 0]\n y_pred = [0, 1, 1, 0, 1, 1, 0, 1, 1, 0]\n\n expected_metrics = {\n \"true_negatives\": 3,\n \"false_positives\": 2,\n \"false_negatives\": 1,\n \"true_positives\": 4,\n \"recall\": 0.8,\n \"precision\": 0.6666666666666666,\n \"f" }, { "id": 150846, "commit_id": "4b7e640f31f2a35ba3b73a3bfbbfb2882ecb7a81", "repo": "freqtrade", "path": "freqtrade/strategy/interface.py", "file_name": "interface.py", "fun_name": "load_freqAI_model", "commit_message": "reduce code duplication, optimize auto data download per tf", "code": "def load_freqAI_model(self) -> None:\n if self.config.get('freqai', {}).get('enabled', False):\n # Import here to avoid importing this if freqAI is disabled\n from freqtrade.freqai.data_kitchen import (download_all_data_for_training)\n from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver\n self.freqai = FreqaiModelResolver.load_freqaimodel(self.config)\n self.freqai_info = self.config[\"freqai\"]\n\n # download the desired data in dry/live\n if self.config.get('runmode') in (RunMode.DRY_RUN, RunMode.LIVE):\n logger.info(\n \"Downloading all training data for all pairs in whitelist and \"\n \"corr_pairlist, this may take a while if you do not have the \"\n \"data saved\"\n )\n # data_load_timerange = get_required_data_timerange(self.config)\n download_all_data_for_training(self.dp, self.config)\n\n else:\n # Gracious failures if freqAI is disabled but \"start\" is called.", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 309, "n_words": 91, "vocab_size": 69, "complexity": 3, "nloc": 17, "token_counts": 121, "n_ast_nodes": 180, "n_identifiers": 19, "random_cut": "def load_freqAI_model(self) -> None:\n if self.config.get('freqai', {}).get('enabled', False):\n # Import here to avoid importing this if freqAI is disabled\n from freqtrade.freqai.data_kitchen import (download_all_data_for_training)\n from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver\n self.freqai = FreqaiModelResolver.load_freqaimodel(self.config)\n self.freqai_info = self.config[\"freqai\"]\n\n # download the desired data in dry/live\n if self.config.get('runmode') in (RunMode.DRY_RUN, RunMode.LIVE):\n logger.info(\n \"Downloading all training data for all pairs in whitelist and \"\n \"corr_pairlist, this may take a while if you do not have the \"\n \"data saved\"\n " }, { "id": 4504, "commit_id": "55ae3f856acfa02f1611646ea3ad512941fd74f9", "repo": "airbyte", "path": "octavia-cli/unit_tests/test_apply/test_resources.py", "file_name": "test_resources.py", "fun_name": "test_catalog", "commit_message": "🐛 octavia-cli: propagate open api spec update (#11441)", "code": "def test_catalog(self, mocker, mock_api_client, local_configuration):\n mocker.patch.object(resources.Source, \"source_discover_schema_request_body\")\n source = resources.Source(mock_api_client, \"workspace_id\", local_configuration, \"bar.yaml\")\n source.api_instance = mocker.Mock()\n catalog = source.catalog\n assert catalog == source.api_instance.discover_schema_for_source.return_value.catalog\n source.api_instance.discover_schema_for_source.assert_called_with(\n source.source_discover_schema_request_body, _check_return_type=False\n )\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 86, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 79, "n_ast_nodes": 123, "n_identifiers": 18, "random_cut": "def test_catalog(self, mocker, mock_api_client, local_configuration):\n mocker.patch.object(resources.Source, \"source_discover_schema_request_body\")\n source = resources.Source(mock_api_client, \"workspace_id\", l" }, { "id": 5128, "commit_id": "8c394b3734e2ebfd8738e372b85556e6b83ce9e9", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-github/unit_tests/test_stream.py", "file_name": "test_stream.py", "fun_name": "test_stream_commit_comment_reactions_incremental_read", "commit_message": "🐛 Source Github: add incremental for `repositories`, `workflows`, `pull_request_comment_reactions`, `issue_reactions`, `issue_comment_reactions`, `commit_comment_reactions` (#12294)\n\nSigned-off-by: Sergey Chvalyuk ", "code": "def test_stream_commit_comment_reactions_incremental_read():\n\n repository_args = {\"repositories\": [\"airbytehq/integration-test\"], \"page_size_for_large_streams\": 100}\n stream = CommitCommentReactions(**repository_args)\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments\",\n json=[\n {\"id\": 55538825, \"updated_at\": \"2021-01-01T15:00:00Z\"},\n {\"id\": 55538826, \"updated_at\": \"2021-01-01T16:00:00Z\"},\n ],\n )\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments/55538825/reactions\",\n json=[\n {\"id\": 154935429, \"created_at\": \"2022-01-01T15:00:00Z\"},\n {\"id\": 154935430, \"created_at\": \"2022-01-01T16:00:00Z\"},\n ],\n )\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments/55538826/reactions\",\n json=[{\"id\": 154935431, \"created_at\": \"2022-01-01T17:00:00Z\"}],\n )\n\n stream_state = {}\n records = read_incremental(stream, stream_state)\n\n assert stream_state == {\n \"airbytehq/integration-test\": {\n \"55538825\": {\"created_at\": \"2022-01-01T16:00:00Z\"},\n \"55538826\": {\"created_at\": \"2022-01-01T17:00:00Z\"},\n }\n }\n\n assert records == [\n {\"id\": 154935429, \"comment_id\": 55538825, \"created_at\": \"2022-01-01T15:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n {\"id\": 154935430, \"comment_id\": 55538825, \"created_at\": \"2022-01-01T16:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n {\"id\": 154935431, \"comment_id\": 55538826, \"created_at\": \"2022-01-01T17:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n ]\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments\",\n json=[\n {\"id\": 55538825, \"updated_at\": \"2021-01-01T15:00:00Z\"},\n {\"id\": 55538826, \"updated_at\": \"2021-01-01T16:00:00Z\"},\n {\"id\": 55538827, \"updated_at\": \"2022-02-01T15:00:00Z\"},\n ],\n )\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments/55538826/reactions\",\n json=[\n {\"id\": 154935431, \"created_at\": \"2022-01-01T17:00:00Z\"},\n {\"id\": 154935432, \"created_at\": \"2022-02-01T16:00:00Z\"},\n ],\n )\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments/55538827/reactions\",\n json=[{\"id\": 154935433, \"created_at\": \"2022-02-01T17:00:00Z\"}],\n )\n\n records = read_incremental(stream, stream_state)\n\n assert records == [\n {\"id\": 154935432, \"comment_id\": 55538826, \"created_at\": \"2022-02-01T16:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n {\"id\": 154935433, \"comment_id\": 55538827, \"created_at\": \"2022-02-01T17:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n ]\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 554, "n_words": 162, "vocab_size": 63, "complexity": 1, "nloc": 64, "token_counts": 361, "n_ast_nodes": 668, "n_identifiers": 10, "random_cut": "def test_stream_commit_comment_reactions_incremental_read():\n\n repository_args = {\"repositories\": [\"airbytehq/integration-test\"], \"page_size_for_large_streams\": 100}\n stream = CommitCommentReactions(**repository_args)\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments\",\n json=[\n {\"id\": 55538825, \"updated_at\": \"2021-01-01T15:00:00Z\"},\n {\"id\": 55538826, \"updated_at\": \"2021-01-01T16:00:00Z\"},\n ],\n )\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments/55538825/reactions\",\n json=[\n {\"id\": 154935429, \"created_at\": \"2022-01-01T15:00:00Z\"},\n {\"id\": 154935430, \"created_at\": \"2022-01-01T16:00:00Z\"},\n ],\n )\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments/55538826/reactions\",\n json=[{\"id\": 154935431, \"created_at\": \"2022-01-01T17:00:00Z\"}],\n )\n\n stream_state = {}\n records = read_incremental(stream, stream_state)\n\n assert stream_state == {\n \"airbytehq/integration-test\": {\n \"55538825\": {\"created_at\": \"2022-01-01T16:00:00Z\"},\n \"55538826\": {\"created_at\": \"2022-01-01T17:00:00Z\"},\n }\n }\n\n assert records == [\n {\"id\": 154935429, \"comment_id\": 55538825, \"created_at\": \"2022-01-01T15:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n {\"id\": 154935430, \"comment_id\": 55538825, \"created_at\": \"2022-01-01T16:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n {\"id\": 154935431, \"comment_id\": 55538826, \"created_at\": \"2022-01-01T17:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n ]\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments\",\n json=[\n {\"id\": 55538825, \"updated_at\": \"2021-01-01T15:00:00Z\"},\n {\"id\": 55538826, \"updated_at\": \"2021-01-01T16:00:00Z\"},\n {\"id\": 55538827, \"updated_at\": \"2022-02-01T15:00:00Z\"},\n ],\n )\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments/55538826/reactions\",\n json=[\n {\"id\": 154935431, \"created_at\": \"2022-01-01T17:00:00Z\"},\n {\"id\": 154935432, \"created_at\": \"2022-02-01T16:00:00Z\"},\n ],\n )\n\n responses.add(\n \"GET\",\n \"https://api.github.com/repos/airbytehq/integration-test/comments/55538827/reactions\",\n json=[{\"id\": 154935433, \"created_at\": \"2022-02-01T17:00:00Z\"}],\n )\n\n records = read_incremental(stream, stream_state)\n\n assert records == [\n {\"id\": 154935432, \"comment_id\": 55538826, \"created_at\": \"2022-02-01T16:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n {\"id\": 154935433, \"comment_id\": 55538827, \"created_at\": \"2022-02-01T17:00:00Z\", \"repository\": \"airbytehq/integration-test\"},\n ]\n" }, { "id": 261121, "commit_id": "9a76368c4e6a46088c9669b2ea74a426c105d252", "repo": "scikit-learn", "path": "sklearn/metrics/tests/test_pairwise.py", "file_name": "test_pairwise.py", "fun_name": "test_cosine_distances", "commit_message": "TST use global_dtype in sklearn/metrics/tests/test_pairwise.py (#22666)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Olivier Grisel ", "code": "def test_cosine_distances():\n # Check the pairwise Cosine distances computation\n rng = np.random.RandomState(1337)\n x = np.abs(rng.rand(910))\n XA = np.vstack([x, x])\n D = cosine_distances(XA)\n assert_allclose(D, [[0.0, 0.0], [0.0, 0.0]], atol=1e-10)\n # check that all elements are in [0, 2]\n assert np.all(D >= 0.0)\n assert np.all(D <= 2.0)\n # check that diagonal elements are equal to 0\n assert_allclose(D[np.diag_indices_from(D)], [0.0, 0.0])\n\n XB = np.vstack([x, -x])\n D2 = cosine_distances(XB)\n # check that all elements are in [0, 2]\n assert np.all(D2 >= 0.0)\n assert np.all(D2 <= 2.0)\n # check that diagonal elements are equal to 0 and non diagonal to 2\n assert_allclose(D2, [[0.0, 2.0], [2.0, 0.0]])\n\n # check large random matrix\n X = np.abs(rng.rand(1000, 5000))\n D = cosine_distances(X)\n # check that diagonal elements are equal to 0\n assert_allclose(D[np.diag_indices_from(D)], [0.0] * D.shape[0])\n assert np.all(D >= 0.0)\n assert np.all(D <= 2.0)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 207, "n_words": 133, "vocab_size": 67, "complexity": 1, "nloc": 19, "token_counts": 256, "n_ast_nodes": 340, "n_identifiers": 20, "random_cut": "def test_cosine_distances():\n # Check the pairwise Cosine distances computation\n rng = np.random.RandomState(1337)\n x = np.abs(rng.rand(" }, { "id": 80025, "commit_id": "f8688985f6b88cfff719fb61943666d40bc8e55b", "repo": "wagtail", "path": "wagtail/admin/views/generic/mixins.py", "file_name": "mixins.py", "fun_name": "setup", "commit_message": "Extract `EditViewOptionalFeaturesMixin` from generic `EditView`", "code": "def setup(self, request, *args, **kwargs):\n # Need to set these here as they are used in get_object()\n self.request = request\n self.args = args\n self.kwargs = kwargs\n\n self.revision_enabled = self.model and issubclass(self.model, RevisionMixin)\n self.draftstate_enabled = self.model and issubclass(self.model, DraftStateMixin)\n\n # Set the object before super().setup() as LocaleMixin.setup() needs it\n self.object = self.get_object()\n super().setup(request, *args, **kwargs)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 116, "n_words": 54, "vocab_size": 43, "complexity": 3, "nloc": 8, "token_counts": 83, "n_ast_nodes": 130, "n_identifiers": 14, "random_cut": "def setup(self, request, *args, **kwargs):\n # Need to set these he" }, { "id": 186984, "commit_id": "d6b952d26e00256e4935f55a035f9284f9627bd2", "repo": "streamlink", "path": "tests/plugins/conftest.py", "file_name": "conftest.py", "fun_name": "pytest_collection_modifyitems", "commit_message": "tests.plugins: implement should_match_groups\n\nAdd parametrized tests for plugin URL capture groups via the newly added\nshould_match_groups attribute in PluginCanHandleUrl. Capture groups can\nbe tested either with dictionaries (named) or tuples (unnamed).\n\nCapture group URL fixtures automatically get appended to the regular\npositive URL fixtures.\n\n- Call register_assert_rewrite(\"tests.plugins\"), so that pytest properly\n prints diffs of given and expected values in assertion failures raised\n by parametrized tests in tests.plugins.\n- Implement should_match_groups logic in PluginCanHandleUrl class\n- Add short descriptions to each URL test fixtures list\n- Rename parametrized test_capture_groups test\n- Remove skipped parametrized tests from the collection, so that plugins\n without capture group tests or with only capture group tests don't\n unnecessarily emit a warning.", "code": "def pytest_collection_modifyitems(session, config, items): # pragma: no cover\n # remove empty parametrized tests\n session.items = list(filter(lambda item: not any(\n marker.name == \"skip\" and str(marker.kwargs.get(\"reason\", \"\")).startswith(\"got empty parameter set\")\n for marker in item.own_markers\n ), items))\n\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 57, "n_words": 34, "vocab_size": 32, "complexity": 3, "nloc": 5, "token_counts": 58, "n_ast_nodes": 98, "n_identifiers": 15, "random_cut": "def pytest_collection_modifyitems(session, config, items): # pragma: no cover\n # remove empty parametrized tests\n session.items = list(filter(lambda item: not any(\n marker.name == \"skip\" and str(marker.kwargs.get(\"reason\", \"\"))." }, { "id": 310816, "commit_id": "0cd3302ebc5951c9ecd00ab1e6cd9ae28173fab5", "repo": "core", "path": "homeassistant/components/advantage_air/climate.py", "file_name": "climate.py", "fun_name": "set_myzone", "commit_message": "Start depreciation of custom services in Advantage Air (#58777)\n\nCo-authored-by: J. Nick Koston ", "code": "async def set_myzone(self, **kwargs):\n \n _LOGGER.warning(\n \"The advantage_air.set_myzone service has been deprecated and will be removed in a future version, please use the select.select_option service on the MyZone entity\"\n )\n await self.async_change(\n {self.ac_key: {\"info\": {\"myZone\": self._zone[\"number\"]}}}\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 93, "n_words": 36, "vocab_size": 33, "complexity": 1, "nloc": 7, "token_counts": 41, "n_ast_nodes": 76, "n_identifiers": 8, "random_cut": "async def set_myzone(self, **kwargs):\n \n _LOGGER.warning(\n \"The advantage_air.set_myzone service has been deprecated and will be remov" }, { "id": 106885, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/server/handlers/base_handlers.py", "file_name": "base_handlers.py", "fun_name": "write_error", "commit_message": "apply black py to all python files", "code": "def write_error(self, status_code, **kwargs):\n logging.error(\"ERROR: %s: %s\" % (status_code, kwargs))\n if \"exc_info\" in kwargs:\n logging.info(\n \"Traceback: {}\".format(traceback.format_exception(*kwargs[\"exc_info\"]))\n )\n if self.settings.get(\"debug\") and \"exc_info\" in kwargs:\n logging.error(\"rendering error page\")\n exc_info = kwargs[\"exc_info\"]\n # exc_info is a tuple consisting of:\n # 1. The class of the Exception\n # 2. The actual Exception that was thrown\n # 3. The traceback opbject\n try:\n params = {\n \"error\": exc_info[1],\n \"trace_info\": traceback.format_exception(*exc_info),\n \"request\": self.request.__dict__,\n }\n\n # TODO make an error.html page\n self.render(\"error.html\", **params)\n logging.error(\"rendering complete\")\n except Exception as e:\n logging.error(e)\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 375, "n_words": 83, "vocab_size": 68, "complexity": 5, "nloc": 19, "token_counts": 130, "n_ast_nodes": 234, "n_identifiers": 19, "random_cut": "def write_error(self, status_code, **kwargs):\n logging.error(\"ERROR: %s: %s\" % (status_code, kwargs))\n if \"exc_info\" in kwargs:\n logging.info(\n \"Traceback: {}\".format(traceback.format_exception(*kwargs[\"exc_info\"]))\n )\n if self.settings.get(\"debug\") and \"exc_info\" in kwargs:\n logging.error(\"rendering error page\")\n " }, { "id": 10148, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "tests/unit/flow-async/test_asyncflow.py", "file_name": "test_asyncflow.py", "fun_name": "test_return_results_async_flow", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "async def test_return_results_async_flow(return_results, protocol, flow_cls):\n with flow_cls(\n protocol=protocol, asyncio=True, return_results=return_results\n ).add() as f:", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 25, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 65, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "async def test_return_results_async_flow(return_results, protocol, flow_cls):\n with flow_cls(\n protocol=protocol, asyncio=True, return_res" }, { "id": 180370, "commit_id": "71bcfdbe929c83c10d761018042078041e956b81", "repo": "gradio", "path": "gradio/components.py", "file_name": "components.py", "fun_name": "serialize", "commit_message": "Callable blocks (#1437)\n\n* format backend\r\n\r\n* blocks callable\r\n\r\n* call blocks\r\n\r\n* format\r\n\r\n* fixed upload\r\n\r\n* fix mix\r\n\r\n* formatting\r\n\r\n* formatting\r\n\r\n* formatting\r\n\r\n* added serialization/deserialization for video\r\n\r\n* formatting\r\n\r\n* blocks\r\n\r\n* formatting\r\n\r\n* fix tests\r\n\r\n* formatting", "code": "def serialize(self, x, called_directly):\n data = processing_utils.encode_url_or_file_to_base64(x)\n return {\"name\": x, \"data\": data, \"is_example\": False}\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 51, "n_identifiers": 7, "random_cut": "def serialize(self, x, called_directly):\n data = processing_utils.encode_u" }, { "id": 43348, "commit_id": "e477f4ba6cd15fabbfe5210c99947bcb70ddac4f", "repo": "airflow", "path": "tests/providers/amazon/aws/operators/test_appflow.py", "file_name": "test_appflow.py", "fun_name": "test_run_full", "commit_message": "Amazon appflow (#24057)\n\n* Add Amazon AppFlow hook.\r\n\r\n* Add Amazon AppFlow operators.\r\n\r\n* Add Amazon AppFlow examples.\r\n\r\n* Add Amazon Appflow docs.\r\n\r\n* Apply comments/docs patterns.\r\n\r\n* Removing the \"private\" attribute signal and more.\r\n\r\n* Fix task_ids for example_appflow.\r\n\r\n* Move datetime_to_epoch() to utils and more.\r\n\r\n* Fix the AppflowBaseOperator name.\r\n\r\n* Ignore AppflowBaseOperator during structure check.\r\n\r\n* test_short_circuit refactor.\r\n\r\n* Add get_airflow_version.\r\n\r\n* Update airflow/providers/amazon/aws/hooks/appflow.py\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>\r\n\r\n* Update airflow/providers/amazon/aws/operators/appflow.py\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>\r\n\r\n* Update airflow/providers/amazon/aws/operators/appflow.py\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>\r\n\r\n* Update airflow/providers/amazon/aws/operators/appflow.py\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>\r\n\r\n* Update airflow/providers/amazon/aws/operators/appflow.py\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>\r\n\r\n* Update airflow/providers/amazon/aws/operators/appflow.py\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>\r\n\r\n* Addressing Josh's requests.\r\n\r\n* Add cached_property to AppflowHook\r\n\r\n* Update airflow/providers/amazon/aws/hooks/appflow.py\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>\r\n\r\n* Update airflow/providers/amazon/aws/operators/appflow.py\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>\r\n\r\n* Update airflow/providers/amazon/aws/operators/appflow.py\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>\r\n\r\n* Update Josh's comment.\r\n\r\n* Update cached_property import.\r\n\r\n* Fix mypy.\r\n\r\nCo-authored-by: Josh Fell <48934154+josh-fell@users.noreply.github.com>", "code": "def test_run_full(appflow_conn, ctx):\n operator = AppflowRunFullOperator(**DUMP_COMMON_ARGS)\n operator.execute(ctx) # type: ignore\n run_assertions_base(appflow_conn, [])\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 8, "random_cut": "def test_run_full(appflow_conn, ctx):\n operator = AppflowRunFullOperator(**DUMP_COMMON_ARGS)\n operator.execute(ctx) # type: ignore\n run_assertions_base(appflow_conn, [])\n\n" }, { "id": 163493, "commit_id": "3743dbcb60a1f0a5a1a7e3d84ac27c10c6090a64", "repo": "pandas", "path": "pandas/plotting/_matplotlib/core.py", "file_name": "core.py", "fun_name": "_get_index_name", "commit_message": "BUG: Fix for xlabel/ylabel in barh plot (#45145)", "code": "def _get_index_name(self) -> str | None:\n if isinstance(self.data.index, ABCMultiIndex):\n name = self.data.index.names\n if com.any_not_none(*name):\n name = \",\".join([pprint_thing(x) for x in name])\n else:\n name = None\n else:\n name = self.data.index.name\n if name is not None:\n name = pprint_thing(name)\n\n # GH 45145, override the default axis label if one is provided.\n index_name = self._get_custom_index_name()\n if index_name is not None:\n name = pprint_thing(index_name)\n\n return name\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 214, "n_words": 62, "vocab_size": 38, "complexity": 6, "nloc": 15, "token_counts": 105, "n_ast_nodes": 171, "n_identifiers": 16, "random_cut": "def _get_index_name(self) -> str | None:\n if isinstance(self.data.index, ABCMultiIndex):\n name = self.data.index.names\n if com.any_not_none(*name):\n name = \",\".join([pprint_thing(x) for x in name])\n else:\n name = None\n else:\n name = self.data.index.name\n if name is not None:\n name = pprint_thing(name)\n\n # GH 4514" }, { "id": 310565, "commit_id": "12780a3173043cd96ce3da68880371f03c1750aa", "repo": "core", "path": "tests/components/webostv/test_media_player.py", "file_name": "test_media_player.py", "fun_name": "test_client_disconnected", "commit_message": "Add webostv 100% tests coverage for media player (#64723)", "code": "async def test_client_disconnected(hass, client, monkeypatch):\n \n await setup_webostv(hass)\n monkeypatch.setattr(client, \"is_connected\", Mock(return_value=False))\n monkeypatch.setattr(client, \"connect\", Mock(side_effect=asyncio.TimeoutError))\n\n async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=20))\n await hass.async_block_till_done()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 70, "n_ast_nodes": 116, "n_identifiers": 17, "random_cut": "async def test_client_disconnected(hass, client, monkeypatch):\n \n await setup_webostv(hass)\n monkeypatch.setattr(client, \"" }, { "id": 79784, "commit_id": "f6a92bf7d24eac1aec39c145e5f374c76937346d", "repo": "wagtail", "path": "wagtail/tests/test_form_data_utils.py", "file_name": "test_form_data_utils.py", "fun_name": "test_html_only", "commit_message": "Add new test assertions to WagtailPageTestCase\n\n- Add assertions, and move them to a new TestCase that allows use without forcing authentication for every test\n- Add routes and preview modes to RoutablePageTest to facilitate testing\n- Move assertion tests out of admin app\n- Add custom test assertions for pages\n- Use default value for exclude_csrf in assertPageIsEditable\n- Use publish action when posting in assertPageIsEditable for better coverage\n- Update assertPageIsEditable to always make both a GET and POST request", "code": "def test_html_only(self):\n # data should be extracted from the 'first' form by default\n result = querydict_from_html(self.html)\n self.assertEqual(list(result.lists()), self.personal_details)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 38, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 50, "n_identifiers": 9, "random_cut": "def test_html_only(self):\n # data should be extracted from the 'first' form by default\n result = querydict_from_html(self.html)\n self.assertEqual(list(result.lists()), self.personal_details)\n" }, { "id": 287954, "commit_id": "2667f0b792b1f936aeb5958cc40d5dee26350bf6", "repo": "core", "path": "tests/components/plugwise/conftest.py", "file_name": "conftest.py", "fun_name": "mock_smile_anna_3", "commit_message": "Bump plugwise to v0.21.3, add related new features (#76610)\n\nCo-authored-by: Franck Nijhof ", "code": "def mock_smile_anna_3() -> Generator[None, MagicMock, None]:\n \n chosen_env = \"m_anna_heatpump_idle\"\n with patch(\n \"homeassistant.components.plugwise.gateway.Smile\", autospec=True\n ) as smile_mock:\n smile = smile_mock.return_value\n\n smile.gateway_id = \"015ae9ea3f964e668e490fa39da3870b\"\n smile.heater_id = \"1cbf783bb11e4a7c8a6843dee3a86927\"\n smile.smile_version = \"4.0.15\"\n smile.smile_type = \"thermostat\"\n smile.smile_hostname = \"smile98765\"\n smile.smile_name = \"Anna\"\n\n smile.connect.return_value = True\n\n smile.notifications = _read_json(chosen_env, \"notifications\")\n smile.async_update.return_value = _read_json(chosen_env, \"all_data\")\n\n yield smile\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 146, "n_words": 51, "vocab_size": 39, "complexity": 1, "nloc": 17, "token_counts": 95, "n_ast_nodes": 180, "n_identifiers": 21, "random_cut": "def mock_smile_anna_3() -> Generator[None, MagicMock, None]:\n \n chosen_env = \"m_anna_heatpump_idle\"\n with patch(\n \"homeassistant.components.plugwise.gateway.Smile\", autospec=True\n ) as smile_mock:\n smile = smile_mock.return_value\n\n smile.gateway_id = \"015ae9ea3f964e668e490fa39da3870b\"\n smile.heater_id = \"1cbf783bb11e4a7c8a6843dee3a86927\"\n smile.smile_version = \"4.0.15\"\n smile.smile_type = \"thermostat\"\n smile.smile_hostname = \"smile98765\"\n smile.smile_name = \"Anna\"\n\n smile.connect.return_value = True\n\n smile.notifications = _read_json(chosen_env, \"notifications\")\n smile.async_update.return_value = _read_json(chosen_env, \"a" }, { "id": 86949, "commit_id": "7dedc5c46349e59a41e248d674f4dd0f372e5a48", "repo": "sentry", "path": "src/sentry/db/models/manager/base.py", "file_name": "base.py", "fun_name": "get_queryset", "commit_message": "test: Add mode to audit which models are touched by each test case (#39952)\n\nWhen a \"model manifest file path\" env var is set, write the audit to a\r\nJSON file at that path.\r\n\r\nAdd a set of triggers to BaseManager that execute provided callbacks\r\nwhen the model is queried, saved, or deleted, in order to define what\r\ncounts as \"touching\" the model.\r\n\r\nCo-authored-by: Zach Collins ", "code": "def get_queryset(self) -> BaseQuerySet:\n \n\n # TODO: This is a quick-and-dirty place to put the trigger hook that won't\n # work for all model classes, because some custom managers override\n # get_queryset without a `super` call.\n self._execute_triggers(ModelManagerTriggerCondition.QUERY)\n\n if hasattr(self, \"_hints\"):\n return self._queryset_class(self.model, using=self._db, hints=self._hints)\n return self._queryset_class(self.model, using=self._db)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 108, "n_words": 46, "vocab_size": 41, "complexity": 2, "nloc": 9, "token_counts": 60, "n_ast_nodes": 98, "n_identifiers": 13, "random_cut": "def get_queryset(self) -> BaseQuerySet:\n \n\n # TODO: This is a quick-and-dirty place to put the trigger hook that won't\n # work for all model classes, because some custom managers override\n # get_queryset without a `super" }, { "id": 13533, "commit_id": "d3feb668330f35b8e72b5f199ca63b3424afd3bb", "repo": "jina", "path": "tests/unit/serve/stream/test_stream.py", "file_name": "test_stream.py", "fun_name": "test_request_streamer", "commit_message": "feat: add option to return in order in client and streamer (#5404)", "code": "async def test_request_streamer(prefetch, num_requests, async_iterator, results_in_order):\n requests_handled = []\n results_handled = []\n\n request_ids = [random_identity() for _ in range(num_requests)]\n response_ids = []\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 22, "vocab_size": 17, "complexity": 6, "nloc": 36, "token_counts": 196, "n_ast_nodes": 59, "n_identifiers": 12, "random_cut": "async def test_request_streamer(prefetch, num_requests, async_iterator, results_in_order):\n requests_handled = []\n results_handled = []\n\n request_ids = [random_identity() for _ in range(num_requests)]\n response_ids = []\n" }, { "id": 55701, "commit_id": "c36348901182dc9b012dfbb5164676d7676242ca", "repo": "prefect", "path": "tests/blocks/test_storage.py", "file_name": "test_storage.py", "fun_name": "test_key_type_determines_file_name", "commit_message": "Add comment per feedback", "code": "async def test_key_type_determines_file_name(self, tmp_path, key_type):\n block = storage.FileStorageBlock(base_path=tmp_path, key_type=key_type)\n key = await block.write(b\"hello\")\n\n if key_type == \"hash\":\n assert key == stable_hash(b\"hello\")\n elif key_type == \"uuid\":\n assert uuid.UUID(key)\n elif key_type == \"timestamp\":\n # colons are not allowed in windows paths\n assert pendulum.parse(key.replace(\"_\", \":\"))\n\n assert (tmp_path / key).exists()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 131, "n_words": 46, "vocab_size": 35, "complexity": 4, "nloc": 10, "token_counts": 87, "n_ast_nodes": 148, "n_identifiers": 17, "random_cut": "async def test_key_type_determines_file_name(self, tmp_path, key_type):\n block = storage.FileStorageBlock(base_path=tmp_path, key_type=key_type)\n key = await block.write(b\"hello\")\n\n if key_type == \"hash\":\n assert key == stable_hash(b\"hello\")\n elif key_type == \"uuid\":\n assert uuid.UUID(key)\n elif key_type == \"timestamp\":\n # colons are not allowed in windows paths\n assert pendulum.parse(key.replace(\"_\", \":\"))\n\n assert (tmp_path / key).exists()\n" }, { "id": 39838, "commit_id": "1fe99dc6677d9c598d9361d088fe3498afdfd61d", "repo": "dash", "path": "dash/_get_paths.py", "file_name": "_get_paths.py", "fun_name": "app_get_relative_path", "commit_message": "added tests", "code": "def app_get_relative_path(requests_pathname, path):\n if requests_pathname == \"/\" and path == \"\":\n return \"/\"\n if requests_pathname != \"/\" and path == \"\":\n return requests_pathname\n if not path.startswith(\"/\"):\n raise exceptions.UnsupportedRelativePath(\n .format(\n path\n )\n )\n return \"/\".join([requests_pathname.rstrip(\"/\"), path.lstrip(\"/\")])\n\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 111, "n_words": 35, "vocab_size": 20, "complexity": 6, "nloc": 15, "token_counts": 71, "n_ast_nodes": 131, "n_identifiers": 10, "random_cut": "def app_get_relative_path(requests_pathname, path):\n if requests_pathname == \"/\" and path == \"\":\n return \"/\"\n if requests_pathname != \"/\" and path == \"\":\n return requests_pathname\n if not path.startswith(\"/\"):\n raise exceptions.UnsupportedRelativePath(\n .format(\n path\n " }, { "id": 220251, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ast.py", "file_name": "ast.py", "fun_name": "visit_ClassDef", "commit_message": "add python 3.10.4 for windows", "code": "def visit_ClassDef(self, node):\n self.maybe_newline()\n for deco in node.decorator_list:\n self.fill(\"@\")\n self.traverse(deco)\n self.fill(\"class \" + node.name)\n with self.delimit_if(\"(\", \")\", condition = node.bases or node.keywords):\n comma = False\n for e in node.bases:\n if comma:\n self.write(\", \")\n else:\n comma = True\n self.traverse(e)\n for e in node.keywords:\n if comma:\n self.write(\", \")\n else:\n comma = True\n self.traverse(e)\n\n with self.block():\n self._write_docstring_and_traverse_body(node)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 320, "n_words": 54, "vocab_size": 36, "complexity": 7, "nloc": 22, "token_counts": 131, "n_ast_nodes": 226, "n_identifiers": 18, "random_cut": "def visit_ClassDef(self, node):\n self.maybe_newline()\n for deco in node.decorator_list:\n self.fill(\"@\")\n self.traverse(deco)\n self.fill(\"class \" + node.name)\n with self.delimit_if(\"(\", \")\", condition = node.bases or node.keywords):\n comma = False\n for e in node.bases:\n if comma:\n self.write(\", \")\n else:\n comma = True\n self.traverse(e)\n for e in node.keywords:\n if comma:\n self.write(\", \")\n else:\n comma = True\n self.traverse(e)\n\n with self.block():\n self._write_docstring_and_traverse_body(node)\n" }, { "id": 65953, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/education/setup.py", "file_name": "setup.py", "fun_name": "create_academic_sessions", "commit_message": "style: format code with black", "code": "def create_academic_sessions():\n\tdata = [\n\t\t{\"doctype\": \"Academic Year\", \"academic_year_name\": \"2015-16\"},\n\t\t{\"doctype\": \"Academic Year\", \"academic_year_name\": \"2016-17\"},\n\t\t{\"doctype\": \"Academic Year\", \"academic_year_name\": \"2017-18\"},\n\t\t{\"doctype\": \"Academic Year\", \"academic_year_name\": \"2018-19\"},\n\t\t{\"doctype\": \"Academic Term\", \"academic_year\": \"2016-17\", \"term_name\": \"Semester 1\"},\n\t\t{\"doctype\": \"Academic Term\", \"academic_year\": \"2016-17\", \"term_name\": \"Semester 2\"},\n\t\t{\"doctype\": \"Academic Term\", \"academic_year\": \"2017-18\", \"term_name\": \"Semester 1\"},\n\t\t{\"doctype\": \"Academic Term\", \"academic_year\": \"2017-18\", \"term_name\": \"Semester 2\"},\n\t]\n\tinsert_record(data)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 59, "vocab_size": 23, "complexity": 1, "nloc": 12, "token_counts": 108, "n_ast_nodes": 226, "n_identifiers": 3, "random_cut": "def create_academic_sessions():\n\tdata = [\n\t\t{\"doctype\": \"Academic Year\", \"academic_year_name\": \"2015-16\"},\n\t\t{\"doctype\": \"Academic Year\", \"academic_year_name\": \"2016-17\"},\n\t\t{\"doctype\": \"Academic Year\", \"academic_year_name\": \"2017-18\"},\n\t\t{\"doctype\": \"Academic Year\", \"academic_year_name\": \"2018-19\"},\n\t\t{\"doctype\": \"Academic Term\", \"academic_year\": \"2016-17\", \"term_name\": \"Semester 1\"},\n\t\t{\"doctype\": \"Academic Term\", \"academic_year\": \"2016-17\", \"term_name\": \"Semester 2\"},\n\t\t{\"doctype\": \"Academic Term\", \"academic_" }, { "id": 281548, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/options/options_controller.py", "file_name": "options_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n has_ticker_start = \"\" if self.ticker and self.selected_date else \"[dim]\"\n has_ticker_end = \"\" if self.ticker and self.selected_date else \"[/dim]\"\n help_text = f\n console.print(text=help_text, menu=\"Stocks - Options\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 62, "n_words": 27, "vocab_size": 19, "complexity": 5, "nloc": 31, "token_counts": 48, "n_ast_nodes": 114, "n_identifiers": 11, "random_cut": "def print_help(self):\n \n has_ticker_start = \"\" if self.ticker and self.selected_date else \"[dim]\"\n has_ticker_end = \"\" if self.ticker and self.selected_date else \"[/dim]\"\n help_text = f\n console.print(text=help_text, menu=\"Stocks - Options\")\n" }, { "id": 16218, "commit_id": "5cd051a26e83adff6899b4ad42dc5d51d91050e0", "repo": "ccxt", "path": "python/ccxt/async_support/btcturk.py", "file_name": "btcturk.py", "fun_name": "describe", "commit_message": "1.69.48\n\n[ci skip]", "code": "def describe(self):\n return self.deep_extend(super(btcturk, self).describe(), {\n 'id': 'btcturk',\n 'name': 'BTCTurk',\n 'countries': ['TR'], # Turkey\n 'rateLimit': 100,\n 'has': {\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'addMargin': False,\n 'cancelOrder': True,\n 'CORS': True,\n 'createOrder': True,\n 'createReduceOnlyOrder': False,\n 'fetchBalance': True,\n 'fetchBorrowRate': False,\n 'fetchBorrowRateHistory': False,\n 'fetchBorrowRates': False,\n 'fetchBorrowRatesPerSymbol': False,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchIsolatedPositions': False,\n 'fetchLeverage': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchMyTrades': True,\n 'fetchOHLCV': True,\n 'fetchOpenOrders': True,\n 'fetchOrderBook': True,\n 'fetchOrders': True,\n 'fetchPosition': False,\n 'fetchPositions': False,\n 'fetchPositionsRisk': False,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchTicker': True,\n 'fetchTickers': True,\n 'fetchTrades': True,\n 'reduceMargin': False,\n 'setLeverage': False,\n 'setMarginMode': False,\n 'setPositionMode': False,\n },\n 'timeframes': {\n '1d': '1d',\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/51840849/87153926-efbef500-c2c0-11ea-9842-05b63612c4b9.jpg',\n 'api': {\n 'public': 'https://api.btcturk.com/api/v2',\n 'private': 'https://api.btcturk.com/api/v1',\n 'graph': 'https://graph-api.btcturk.com/v1',\n },\n 'www': 'https://www.btcturk.com',\n 'doc': 'https://github.com/BTCTrader/broker-api-docs',\n },\n 'api': {\n 'public': {\n 'get': {\n 'orderbook': 1,\n 'ticker': 0.1,\n 'trades': 1, # ?last=COUNT(max 50)\n 'server/exchangeinfo': 1,\n },\n },\n 'private': {\n 'get': {\n 'users/balances': 1,\n 'openOrders': 1,\n 'allOrders': 1,\n 'users/transactions/trade': 1,\n },\n 'post': {\n 'order': 1,\n 'cancelOrder': 1,\n },\n 'delete': {\n 'order': 1,\n },\n },\n 'graph': {\n 'get': {\n 'ohlcs': 1,\n },\n },\n },\n 'fees': {\n 'trading': {\n 'maker': self.parse_number('0.0005'),\n 'taker': self.parse_number('0.0009'),\n },\n },\n 'exceptions': {\n 'exact': {\n 'FAILED_ORDER_WITH_OPEN_ORDERS': InsufficientFunds,\n 'FAILED_LIMIT_ORDER': InvalidOrder,\n 'FAILED_MARKET_ORDER': InvalidOrder,\n },\n },\n })\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1849, "n_words": 199, "vocab_size": 108, "complexity": 1, "nloc": 105, "token_counts": 395, "n_ast_nodes": 712, "n_identifiers": 8, "random_cut": "def describe(self):\n return self.deep_extend(super(btcturk, self).describe(), {\n 'id': 'btcturk',\n 'name': 'BTCTurk',\n 'countries': ['TR'], # Turkey\n 'rateLimit': 100,\n 'has': {\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'addMargin': False,\n 'cancelOrder': True,\n 'CORS': True,\n 'createOrder': True,\n 'createReduceOnlyOrder': False,\n 'fetchBalance': True,\n 'fetchBorrowRate': False,\n 'fetchBorrowRateHistory': False,\n 'fetchBorrowRates': False,\n 'fetchBorrowRatesPerSymbol': False,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchIsolatedPositions': False,\n 'fetchLeverage': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchMyTrades': True,\n 'fetchOHLCV': True,\n 'fetchOpenOrders': True,\n 'fetchOrderBook': True,\n 'fetchOrders': True,\n 'fetchPosition': False,\n 'fetchPositions': False,\n 'fetchPositionsRisk': False,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchTicker': True,\n 'fetchTickers': True,\n 'fetchTrades': True,\n 'reduceMargin': False,\n 'setLeverage': False,\n 'setMarginMode': False,\n 'setPositionMode': False,\n },\n 'timeframes': {\n '1d': '1d',\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/51840849/87153926-efbef500-c2c0-11ea-9842-05b63612c4b9.jp" }, { "id": 91857, "commit_id": "689fae7f0804c5f820c3e4fc8b05b552a391e032", "repo": "sentry", "path": "src/sentry/relay/projectconfig_debounce_cache/redis.py", "file_name": "redis.py", "fun_name": "mark_task_done", "commit_message": "fix(projectconfig): Apply correct time limits to tasks (#35889)\n\nThe time limits did get mixed up somehow. This also thightens the\r\ntime limit of the build task a lot, because this only ever can build a\r\nsingle config that should be relatively short. Having it this short\r\nmeans it's 1/6th of the time limit in relay so relay can trigger a new\r\ntask in a new request and maybe that'll work beter.\r\n\r\nFinally this adds metrics around the debouncing keys to check if they\r\nmatch up correctly.", "code": "def mark_task_done(self, *, public_key, project_id, organization_id):\n key = self._get_redis_key(public_key, project_id, organization_id)\n client = self._get_redis_client(key)\n ret = client.delete(key)\n metrics.incr(\"relay.projectconfig_debounce_cache.task_done\", sample_rate=1)\n return ret\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 55, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 53, "n_ast_nodes": 81, "n_identifiers": 14, "random_cut": "def mark_task_done(self, *, public_key, project_id, organization_id):\n key = self._get_redis_key(public_key, project_id, organization_id)\n client = self._get_redis_client(key)\n ret = client.delete(key)\n metrics.incr(\"relay.projectconfig_debounce_cache.task_done\", sample_rate=1)\n return ret\n" }, { "id": 249531, "commit_id": "b7272b73aa38dcb19c9b075514f963390358113d", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "test_repeated_paginate_relations", "commit_message": "Properly paginate forward in the /relations API. (#13840)\n\nThis fixes a bug where the `/relations` API with `dir=f` would\r\nskip the first item of each page (except the first page), causing\r\nincomplete data to be returned to the client.", "code": "def test_repeated_paginate_relations(self) -> None:\n \n\n expected_event_ids = []\n for idx in range(10):\n channel = self._send_relation(\n RelationTypes.ANNOTATION, \"m.reaction\", chr(ord(\"a\") + idx)\n )\n expected_event_ids.append(channel.json_body[\"event_id\"])\n\n prev_token: Optional[str] = \"\"\n found_event_ids: List[str] = []\n for _ in range(20):\n from_token = \"\"\n if prev_token:\n from_token = \"&from=\" + prev_token\n\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?limit=3{from_token}\",\n access_token=self.user_token,\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n found_event_ids.extend(e[\"event_id\"] for e in channel.json_body[\"chunk\"])\n next_batch = channel.json_body.get(\"next_batch\")\n\n self.assertNotEqual(prev_token, next_batch)\n prev_token = next_batch\n\n if not prev_token:\n break\n\n # We paginated backwards, so reverse\n found_event_ids.reverse()\n self.assertEqual(found_event_ids, expected_event_ids)\n\n # Test forward pagination.\n prev_token = \"\"\n found_event_ids = []\n for _ in range(20):\n from_token = \"\"\n if prev_token:\n from_token = \"&from=\" + prev_token\n\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?org.matrix.msc3715.dir=f&limit=3{from_token}\",\n access_token=self.user_token,\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n found_event_ids.extend(e[\"event_id\"] for e in channel.json_body[\"chunk\"])\n next_batch = channel.json_body.get(\"next_batch\")\n\n self.assertNotEqual(prev_token, next_batch)\n prev_token = next_batch\n\n if not prev_token:\n break\n\n self.assertEqual(found_event_ids, expected_event_ids)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 648, "n_words": 132, "vocab_size": 61, "complexity": 10, "nloc": 49, "token_counts": 288, "n_ast_nodes": 516, "n_identifiers": 33, "random_cut": "def test_repeated_paginate_relations(self) -> None:\n \n\n expected_event_ids = []\n for idx in range(10):\n channel = self._send_relation(\n RelationTypes.ANNOTATION, \"m.reaction\", chr(ord(\"a\") + idx)\n )\n expected_event_ids.append(channel.json_body[\"event_id\"])\n\n prev_token: Optional[str] = \"\"\n found_event_ids: List[str] = []\n for _ in range(20):\n from_token = \"\"\n if prev_token:\n from_token = \"&from=\" + prev_token\n\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?limit=3{from_token}\",\n access_token=self.user_token,\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n " }, { "id": 204134, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/sitemaps/kml.py", "file_name": "kml.py", "fun_name": "location", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def location(self, obj):\n return reverse(\n \"django.contrib.gis.sitemaps.views.%s\" % self.geo_format,\n kwargs={\n \"label\": obj[0],\n \"model\": obj[1],\n \"field_name\": obj[2],\n },\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 108, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 9, "token_counts": 43, "n_ast_nodes": 68, "n_identifiers": 6, "random_cut": "def location(self, obj):\n return reverse(\n \"django.contrib.gis.sitemaps.views." }, { "id": 183201, "commit_id": "a2da5546bdadbc20e5bfcd0c9914f138c087fa82", "repo": "textual", "path": "tests/test_focus.py", "file_name": "test_focus.py", "fun_name": "test_show_focus", "commit_message": "focus traversal", "code": "def test_show_focus():\n app = App()\n app.push_screen(Screen())\n app.screen.add_children(\n Focusable(id=\"foo\"),\n NonFocusable(id=\"bar\"),\n Focusable(Focusable(id=\"Paul\"), id=\"container1\"),\n NonFocusable(Focusable(id=\"Jessica\"), id=\"container2\"),\n Focusable(id=\"baz\"),\n )\n\n focused = [widget.id for widget in app.focus_chain]\n assert focused == [\"foo\", \"Paul\", \"baz\"]\n\n assert app.focused is None\n assert not app.has_class(\"-show-focus\")\n app.show_focus()\n assert app.has_class(\"-show-focus\")\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 102, "n_words": 38, "vocab_size": 32, "complexity": 2, "nloc": 16, "token_counts": 122, "n_ast_nodes": 211, "n_identifiers": 15, "random_cut": "def test_show_focus():\n app = App()\n app.push_screen(Screen())\n app.screen.add_children(\n Focusable(id=\"foo\"),\n NonFocusable(id=\"bar\"),\n Focusable(Focu" }, { "id": 114479, "commit_id": "c77ef976e2cae969da77bac6171a8f44e07553e6", "repo": "mindsdb", "path": "mindsdb/integrations/lightwood_handler/lightwood_handler/lightwood_handler.py", "file_name": "lightwood_handler.py", "fun_name": "select_query", "commit_message": "towards joining TS predictors", "code": "def select_query(self, stmt) -> pd.DataFrame:\n model = self._get_model(stmt)\n # if 'LATEST' in str(stmt.where):\n # stmt = self._get_latest_oby(stmt) # todo: it would be easy if I had access to the handler here, just query the handler to get the latest available date then proceed as usual\n # todo: with signatures as they stand, the way to do it is to actually fetch latest from model internal data, and emit forecast for that\n # TODO: check with max whether there is support for latest without joining. if so, this is a problem. if not, then it's actually fine.\n # todo: for now, will just ignore this possibility\n values = self._recur_get_conditionals(stmt.where.args, {})\n df = pd.DataFrame.from_dict(values)\n return self._call_predictor(df, model)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 182, "n_words": 115, "vocab_size": 81, "complexity": 1, "nloc": 5, "token_counts": 53, "n_ast_nodes": 87, "n_identifiers": 14, "random_cut": "def select_query(self, stmt) -> pd.DataFrame:\n model = self._get_model(stmt)\n # if 'LATEST' in str(stmt.where):\n # stmt = self._get_latest_oby(stmt) # todo: it would be easy if I had access to the handler here, just query the handler to get the latest available date then proceed as usual\n # todo: with signatures as they stand, the way to do it is to actually fetch latest from model internal data, and emit forecast for that\n # TODO: check with max whether there is support for latest without joining. if so, this is a problem. if not, then it's actually fine.\n # todo: for now, will just ignore this possibility\n values = self._recur_get_conditionals(stmt.where.args, {})\n df = pd.DataFrame.from_dict(values)\n return self._call_predictor(df, model)\n" }, { "id": 86524, "commit_id": "56fb605b0eb5d17a6c69fce01e396a68d69736af", "repo": "sentry", "path": "tests/sentry/nodestore/bigtable/test_backend.py", "file_name": "test_backend.py", "fun_name": "get_temporary_bigtable_nodestorage", "commit_message": "ref: upgrade protobuf (#39268)\n\nfor dependabot here: https://github.com/getsentry/getsentry/pull/8387", "code": "def get_temporary_bigtable_nodestorage() -> BigtableNodeStorage:\n if \"BIGTABLE_EMULATOR_HOST\" not in os.environ:\n pytest.skip(\n \"Bigtable is not available, set BIGTABLE_EMULATOR_HOST enironment variable to enable\"\n )\n\n ns = BigtableNodeStorage(project=\"test\")\n ns.bootstrap()\n\n try:\n yield ns\n finally:\n ns.store.destroy()\n\n\n@pytest.fixture(params=[MockedBigtableNodeStorage, BigtableNodeStorage])", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.fixture(params=[MockedBigtableNodeStorage, BigtableNodeStorage])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 84, "n_words": 32, "vocab_size": 30, "complexity": 3, "nloc": 11, "token_counts": 46, "n_ast_nodes": 105, "n_identifiers": 14, "random_cut": "def get_temporary_bigtable_nodestorage() -> BigtableNodeStorage:\n if \"BIGTABLE_EMULATOR_HOST\" not in os.environ:\n pytest.skip(\n \"Bigtable is not available, set BIGTABLE_EMULATOR_HOST enironment variable to enable\"\n )\n\n ns = BigtableNodeStorage(project=\"test\")\n ns.bootstrap()\n\n try:" }, { "id": 26806, "commit_id": "52adcd10d4e0a4d0026afc51b89a72bd0e53cc78", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py", "file_name": "test_create_deliveries_for_subscription.py", "fun_name": "generate_expected_payload_for_gift_card", "commit_message": "New events related to gift card changes (#9588)\n\n* GiftCards webhook events\r\n\r\n* Changes after review.\r\n\r\n* GIFT_CARD_STATUS_CHANGED enum value fix\r\n\r\n* Fix tests coverage\r\n\r\n* Revert last commit\r\n\r\n* Graphql schema update", "code": "def generate_expected_payload_for_gift_card(gift_card, card_global_id):\n return json.dumps(\n {\n \"giftCard\": {\n \"id\": card_global_id,\n \"isActive\": gift_card.is_active,\n \"code\": gift_card.code,\n \"createdBy\": {\"email\": gift_card.created_by.email},\n },\n \"meta\": None,\n }\n )\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 134, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 52, "n_ast_nodes": 88, "n_identifiers": 9, "random_cut": "def generate_expected_payload_for_gift_card(gift_card, card_global_id):\n return json.dumps(\n {\n \"giftCard\": {\n \"id\": card_global_id,\n \"isActive\": gift_card.is_active,\n \"code\": gift_card.code,\n \"createdBy\": {\"email\": gift_card.created_by.email},\n" }, { "id": 216515, "commit_id": "00ee5eed1d75417faaaa185e27947b268239698e", "repo": "salt", "path": "tests/integration/modules/test_cmdmod.py", "file_name": "test_cmdmod.py", "fun_name": "test_runas", "commit_message": "various changes and fixes needed to add PhotonOS into CICD.", "code": "def test_runas(self):\n \n with self._ensure_user_exists(self.runas_usr):\n out = self.run_function(\n \"cmd.run\", [\"env\"], runas=self.runas_usr, cwd=\"/tmp\"\n ).splitlines()\n self.assertIn(\"USER={}\".format(self.runas_usr), out)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 72, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 57, "n_ast_nodes": 100, "n_identifiers": 11, "random_cut": "def test_runas(self):\n \n with self._ensure_user_exists(self.runas_usr):\n out = self.run_function(\n \"cmd.run\", [\"env\"], runas=self.runas_usr, cwd=\"/tmp\"\n ).splitlines()\n self.assertIn(\"USER={}\".format(self.runas_usr), out)\n" }, { "id": 15661, "commit_id": "8830e8755680c44fe6c80ce954431da0fe89d3ba", "repo": "ccxt", "path": "python/ccxt/async_support/idex.py", "file_name": "idex.py", "fun_name": "create_order", "commit_message": "1.67.67\n\n[ci skip]", "code": "async def create_order(self, symbol, type, side, amount, price=None, params={}):\n # https://docs.idex.io/#create-order\n self.check_required_credentials()\n await self.load_markets()\n market = self.market(symbol)\n nonce = self.uuidv1()\n typeEnum = None\n stopLossTypeEnums = {\n 'stopLoss': 3,\n 'stopLossLimit': 4,\n 'takeProfit': 5,\n 'takeProfitLimit': 6,\n }\n stopPriceString = None\n if (type == 'stopLossLimit') or (type == 'takeProfitLimit') or ('stopPrice' in params):\n if not ('stopPrice' in params):\n raise BadRequest(self.id + ' stopPrice is a required parameter for ' + type + 'orders')\n stopPriceString = self.price_to_precision(symbol, params['stopPrice'])\n limitTypeEnums = {\n 'limit': 1,\n 'limitMaker': 2,\n }\n priceString = None\n typeLower = type.lower()\n limitOrder = typeLower.find('limit') >= 0\n if type in limitTypeEnums:\n typeEnum = limitTypeEnums[type]\n priceString = self.price_to_precision(symbol, price)\n elif type in stopLossTypeEnums:\n typeEnum = stopLossTypeEnums[type]\n priceString = self.price_to_precision(symbol, price)\n elif type == 'market':\n typeEnum = 0\n else:\n raise BadRequest(self.id + ' ' + type + ' is not a valid order type')\n amountEnum = 0 # base quantity\n if 'quoteOrderQuantity' in params:\n if type != 'market':\n raise NotSupported(self.id + ' quoteOrderQuantity is not supported for ' + type + ' orders, only supported for market orders')\n amountEnum = 1\n amount = self.safe_number(params, 'quoteOrderQuantity')\n sideEnum = 0 if (side == 'buy') else 1\n walletBytes = self.remove0x_prefix(self.walletAddress)\n network = self.safe_string(self.options, 'network', 'ETH')\n orderVersion = self.get_supported_mapping(network, {\n 'ETH': 1,\n 'BSC': 2,\n 'MATIC': 4,\n })\n amountString = self.amount_to_precision(symbol, amount)\n # https://docs.idex.io/#time-in-force\n timeInForceEnums = {\n 'gtc': 0,\n 'ioc': 2,\n 'fok': 3,\n }\n defaultTimeInForce = self.safe_string(self.options, 'defaultTimeInForce', 'gtc')\n timeInForce = self.safe_string(params, 'timeInForce', defaultTimeInForce)\n timeInForceEnum = None\n if timeInForce in timeInForceEnums:\n timeInForceEnum = timeInForceEnums[timeInForce]\n else:\n allOptions = list(timeInForceEnums.keys())\n asString = ', '.join(allOptions)\n raise BadRequest(self.id + ' ' + timeInForce + ' is not a valid timeInForce, please choose one of ' + asString)\n # https://docs.idex.io/#self-trade-prevention\n selfTradePreventionEnums = {\n 'dc': 0,\n 'co': 1,\n 'cn': 2,\n 'cb': 3,\n }\n defaultSelfTradePrevention = self.safe_string(self.options, 'defaultSelfTradePrevention', 'cn')\n selfTradePrevention = self.safe_string(params, 'selfTradePrevention', defaultSelfTradePrevention)\n selfTradePreventionEnum = None\n if selfTradePrevention in selfTradePreventionEnums:\n selfTradePreventionEnum = selfTradePreventionEnums[selfTradePrevention]\n else:\n allOptions = list(selfTradePreventionEnums.keys())\n asString = ', '.join(allOptions)\n raise BadRequest(self.id + ' ' + selfTradePrevention + ' is not a valid selfTradePrevention, please choose one of ' + asString)\n byteArray = [\n self.number_to_be(orderVersion, 1),\n self.base16_to_binary(nonce),\n self.base16_to_binary(walletBytes),\n self.encode(market['id']), # TODO: refactor to remove either encode or stringToBinary\n self.number_to_be(typeEnum, 1),\n self.number_to_be(sideEnum, 1),\n self.encode(amountString),\n self.number_to_be(amountEnum, 1),\n ]\n if limitOrder:\n encodedPrice = self.encode(priceString)\n byteArray.append(encodedPrice)\n if type in stopLossTypeEnums:\n encodedPrice = self.encode(stopPriceString or priceString)\n byteArray.append(encodedPrice)\n clientOrderId = self.safe_string(params, 'clientOrderId')\n if clientOrderId is not None:\n byteArray.append(self.encode(clientOrderId))\n after = [\n self.number_to_be(timeInForceEnum, 1),\n self.number_to_be(selfTradePreventionEnum, 1),\n self.number_to_be(0, 8), # unused\n ]\n allBytes = self.array_concat(byteArray, after)\n binary = self.binary_concat_array(allBytes)\n hash = self.hash(binary, 'keccak', 'hex')\n signature = self.sign_message_string(hash, self.privateKey)\n request = {\n 'parameters': {\n 'nonce': nonce,\n 'market': market['id'],\n 'side': side,\n 'type': type,\n 'wallet': self.walletAddress,\n 'selfTradePrevention': selfTradePrevention,\n },\n 'signature': signature,\n }\n if type != 'market':\n request['parameters']['timeInForce'] = timeInForce\n if limitOrder:\n request['parameters']['price'] = priceString\n if type in stopLossTypeEnums:\n request['parameters']['stopPrice'] = stopPriceString or priceString\n if amountEnum == 0:\n request['parameters']['quantity'] = amountString\n else:\n request['parameters']['quoteOrderQuantity'] = amountString\n if clientOrderId is not None:\n request['parameters']['clientOrderId'] = clientOrderId\n # {\n # market: 'DIL-ETH',\n # orderId: '7cdc8e90-eb7d-11ea-9e60-4118569f6e63',\n # wallet: '0x0AB991497116f7F5532a4c2f4f7B1784488628e1',\n # time: 1598873478650,\n # status: 'filled',\n # type: 'limit',\n # side: 'buy',\n # originalQuantity: '0.40000000',\n # executedQuantity: '0.40000000',\n # cumulativeQuoteQuantity: '0.03962396',\n # price: '1.00000000',\n # fills: [\n # {\n # fillId: '48582d10-b9bb-3c4b-94d3-e67537cf2472',\n # price: '0.09905990',\n # quantity: '0.40000000',\n # quoteQuantity: '0.03962396',\n # time: 1598873478650,\n # makerSide: 'sell',\n # sequence: 5053,\n # fee: '0.00080000',\n # feeAsset: 'DIL',\n # gas: '0.00857497',\n # liquidity: 'taker',\n # txStatus: 'pending'\n # }\n # ],\n # avgExecutionPrice: '0.09905990'\n # }\n # we don't use self.extend here because it is a signed endpoint\n response = await self.privatePostOrders(request)\n return self.parse_order(response, market)\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 2141, "n_words": 579, "vocab_size": 284, "complexity": 23, "nloc": 131, "token_counts": 868, "n_ast_nodes": 1484, "n_identifiers": 72, "random_cut": "async def create_order(self, symbol, type, side, amount, price=None, params={}):\n # https://docs.idex.io/#create-order\n self.check_required_credentials()\n await self.load_markets()\n market = self.market(symbol)\n nonce = self.uuidv1()\n typeEnum = None\n stopLossTypeEnums = {\n 'stopLoss': 3,\n 'stopLossLimit': 4,\n 'takeProfit': 5,\n 'takeProfitLimit': 6,\n }\n stopPriceString = None\n if (type == 'stopLossLimit') or (type == 'takeProfitLimit') or ('stopPrice' in params):\n if not ('stopPrice' in params):\n raise BadRequest(self.id + ' stopPrice is a required parameter for ' + type + 'orders')\n stopPriceString = self.price_to_precision(symbol, params['stopPrice'])\n limitTypeEnums = {\n 'limit': 1,\n 'limitMaker': 2,\n }\n priceString = None\n typeLower = type.lower()\n limitOrder = typeLower.find('limit') >= 0\n if type in limitTypeEnums:\n typeEnum = limitTypeEnums[type]\n priceString = self.price_to_precision(symbol, price)\n elif type in stopLossTypeEnums:\n typeEnum = stopLossTypeEnums[type]\n priceString = self.price_to_precision(symbol, price)\n elif type == 'market':\n typeEnum = 0\n else:\n raise BadRequest(self.id + ' ' + type + ' is not a valid order type')\n amountEnum = 0 # base quantity\n if 'quoteOrderQuantity' in params:\n if type != 'market':\n raise NotSupported(self.id + ' quoteOrderQuantity is not supported for ' + type + ' orders, only supported for market orders')\n amountEnum = 1\n amount = self.safe_number(params, 'quoteOrderQuantity')\n sideEnum = 0 if (side == 'buy') else 1\n walletBytes = self.remove0x_prefix(self.walletAddress)\n network = self.safe_string(self.options, 'network', 'ETH')\n orderVersion = self.get_supported_mapping(network, {\n 'ETH': 1,\n 'BSC': 2,\n 'MATIC': 4,\n })\n amountString = self.amount_to_precision(symbol, amount)\n # https://docs.idex.io/#time-in-force\n timeInForceEnums = {\n 'gtc': 0,\n 'ioc': 2,\n 'fok': 3,\n }\n defaultTimeInForce = self.safe_string(self.options, 'defaultTimeInForce', 'gtc')\n timeInForce = self.safe_string(params, 'timeInForce', defaultTimeInForce)\n timeInForceEnum = None\n if timeInForce in timeInForceEnums:\n timeInForceEnum = timeInForceEnums[timeInForce]\n else:\n allOptions = list(timeInForceEnums.keys())\n asString = ', '.join(allOptions)\n raise BadRequest(self.id + ' ' + timeInForce + ' is not a valid timeInForce, please choose one of ' + asString)\n # https://docs.idex.io/#self-trade-prevention\n selfTradePreventionEnums = {\n 'dc': 0,\n 'co': 1,\n 'cn': 2,\n 'cb': 3,\n }\n defaultSelfTradePrevention = self.safe_string(self.options, 'defaultSelfTradePrevention', 'cn')\n selfTradePrevention = self.safe_string(params, 'selfTradePrevention', defaultSelfTradePrevention)\n selfTradePreventionEnum = None\n if selfTradePrevention in selfTradePreventionEnums:\n selfTradePreventionEnum = selfTradePreventionEnums[selfTradePrevention]\n else:\n allOptions = list(selfTradePreventionEnums.keys())\n asString = ', '.join(allOptions)\n raise BadRequest(self.id + ' ' + selfTradePrevention + ' is not a valid selfTradePrevention, please choose one of ' + asString)\n byteArray = [\n self.number_to_be(orderVersion, 1),\n self.base16_to_binary(nonce),\n self.base16_to_binary(walletBytes),\n self.encode(market['id']), # TODO: refactor to remove either encode or stringToBinary\n self.number_to_be(typeEnum, 1),\n self.number_to_be(sideEnum, 1),\n self.encode(amountString),\n self.number_to_be(amountEnum, 1),\n ]\n if limitOrder:\n encodedPrice = self.encode(priceString)\n byteArray.append(encodedPrice)\n if type in stopLossTypeEnums:\n encodedPrice = self.encode(stopPriceString or priceString)\n byteArray.append(encodedPrice)\n clientOrderId = self.safe_string(params, 'clientOrderId')\n if clientOrderId is not None:\n byteArray.append(self.encode(clientOrderId))\n after = [\n self.number_to_be(timeInForceEnum, 1),\n self.number_to_be(selfTradePreventionEnum, 1),\n self.number_to_be(0, 8), # unused\n ]\n allBytes = self.array_concat(byteArray, after)\n binary = self.binary_concat_array(allBytes)\n hash = self.hash(binary, 'keccak', 'hex')\n signature = self.sign_message_string(hash, self.privateKey)\n request = {\n 'parameters': {\n 'nonce': nonce,\n 'market': market['id'],\n 'side': side,\n 'type': type,\n 'wallet': self.walletAddress,\n 'selfTradePrevention': selfTradePrevention,\n },\n 'signature': signature,\n }\n if type != 'market':\n request['parameters']['timeInForce'] = timeInForce\n if limitOrder:\n request['parameters']['price'] = priceString\n if type in stopLossTypeEnums:\n request['parameters']['stopPrice'] = stopPriceString or priceString\n if amountEnum == 0:\n request['parameters']['quantity'] = amountString\n else:\n request['parameters']['quoteOrderQuantity'] = amountString\n if clientOrderId is not None:\n request['parameters']['clientOrderId'] = clientOrderId\n # {\n # market: 'DIL-ETH',\n # orderId: '7cdc8e90-eb7d-11ea-9e60-4118569f6e63',\n # wallet: '0x0AB991497116f7F5532a4c2f4f7B1784488628e1',\n # time: 1598873478650,\n # status: 'filled',\n # type: 'limit',\n # side: 'buy',\n # originalQuantity: '0.40000000',\n # executedQuantity: '0.40000000',\n # cumulativeQuoteQuantity: '0.03962396',\n # price: '1.00000000',\n # fills: [\n # {\n # fillId: '48582d10-b9bb-3c4b-94d3-e67537cf2472',\n # price: '0.09905990',\n # quantity: '0.40000000',\n # quoteQuantity: '0.03962396',\n # time: 1598873478650,\n # makerSide: 'sell',\n # sequence: 5053,\n # " }, { "id": 204397, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/cache/backends/db.py", "file_name": "db.py", "fun_name": "has_key", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def has_key(self, key, version=None):\n key = self.make_and_validate_key(key, version=version)\n\n db = router.db_for_read(self.cache_model_class)\n connection = connections[db]\n quote_name = connection.ops.quote_name\n\n now = timezone.now().replace(microsecond=0, tzinfo=None)\n\n with connection.cursor() as cursor:\n cursor.execute(\n \"SELECT %s FROM %s WHERE %s = %%s and %s > %%s\"\n % (\n quote_name(\"cache_key\"),\n quote_name(self._table),\n quote_name(\"cache_key\"),\n quote_name(\"expires\"),\n ),\n [key, connection.ops.adapt_datetimefield_value(now)],\n )\n return cursor.fetchone() is not None\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 263, "n_words": 53, "vocab_size": 44, "complexity": 1, "nloc": 18, "token_counts": 127, "n_ast_nodes": 203, "n_identifiers": 23, "random_cut": "def has_key(self, key, version=None):\n key = self.make_and_validate_key(key, version=version)\n\n db = router.db_for_read(self.cache_model_class)\n connection = connections[db]\n quote_name = connection.ops.quote_name\n\n now = timezone.now().replace(microsecond=0, tzinfo=None)\n\n with connection.cursor() as cursor:\n curs" }, { "id": 90388, "commit_id": "0e063282196cd7cbe321d099e9581a54b799ea0a", "repo": "sentry", "path": "tests/sentry/api/endpoints/sentry_apps/test_interaction.py", "file_name": "test_interaction.py", "fun_name": "test_allows_logged_in_user_who_does_own_app", "commit_message": "ref(tests): Refactor API tests (#34896)", "code": "def test_allows_logged_in_user_who_does_own_app(self):\n self.get_success_response(\n self.published_app.slug,\n tsdbField=\"sentry_app_component_interacted\",\n componentType=\"issue-link\",\n status_code=status.HTTP_201_CREATED,\n )\n self.get_success_response(\n self.published_app.slug,\n tsdbField=\"sentry_app_viewed\",\n status_code=status.HTTP_201_CREATED,\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 117, "n_words": 13, "vocab_size": 9, "complexity": 1, "nloc": 12, "token_counts": 51, "n_ast_nodes": 80, "n_identifiers": 10, "random_cut": "def test_allows_logged_in_user_who_does_own_app(self):\n self.get_success_respo" }, { "id": 209491, "commit_id": "dd7a5c97d68c00d1d03ecf8ac27c6c7038525065", "repo": "scapy", "path": "scapy/layers/dns.py", "file_name": "dns.py", "fun_name": "make_reply", "commit_message": "Answering machines improvements (NBNS/DNS/LLMNR) (#3699)\n\n* Minor NBNS improvements\r\n\r\n* Improve Netbios/LLMNR/DNS answering machines\r\n\r\n* DNS_am: support IPv6\r\n\r\n* More customization of some answering machines", "code": "def make_reply(self, req):\n resp = AnsweringMachineUtils.reverse_packet(req)\n dns = req.getlayer(self.cls)\n if req.qd.qtype == 28:\n # AAAA\n if self.joker6 is False:\n return\n rdata = self.match.get(\n dns.qd.qname,\n self.joker or get_if_addr6(self.optsniff.get(\"iface\", conf.iface))\n )\n if isinstance(rdata, (tuple, list)):\n rdata = rdata[1]\n resp /= self.cls(id=dns.id, qr=1, qd=dns.qd,\n an=DNSRR(rrname=dns.qd.qname, ttl=10, rdata=rdata,\n type=28))\n else:\n if self.joker is False:\n return\n rdata = self.match.get(\n dns.qd.qname,\n self.joker or get_if_addr(self.optsniff.get(\"iface\", conf.iface))\n )\n if isinstance(rdata, (tuple, list)):\n # Fallback\n rdata = rdata[0]\n resp /= self.cls(id=dns.id, qr=1, qd=dns.qd,\n an=DNSRR(rrname=dns.qd.qname, ttl=10, rdata=rdata))\n return resp\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 462, "n_words": 79, "vocab_size": 43, "complexity": 8, "nloc": 27, "token_counts": 250, "n_ast_nodes": 374, "n_identifiers": 32, "random_cut": "def make_reply(self, req):\n resp = AnsweringMachineUtils.reverse_packet(req)\n dns = req.getlayer(self.cls)\n if req.qd.qtype == 28:\n # AAAA\n if self.joker6 is False:\n return\n rdata = self.match.get(\n dns.qd.qname,\n self.joker or get_if_addr6(self.optsniff.get(\"iface\", conf.iface))\n )\n if isinstance(rdata, (tuple, list)):\n rdata = rdata[1]\n resp /= self.cls(id=dns.id, qr=1, qd=dns.qd,\n an=DNSRR(rrname=dns.qd.qname, ttl=10, rdata=rdata,\n type=28))\n else:\n if self" }, { "id": 137534, "commit_id": "7e17ba355741d38bece7e006772ea45607ef0927", "repo": "ray", "path": "rllib/core/rl_module/tf/tests/test_tf_rl_module.py", "file_name": "test_tf_rl_module.py", "fun_name": "test_forward_train", "commit_message": "[RLlib] TFRLModule and optimzer test (#31003)\n\nSigned-off-by: Kourosh Hakhamaneshi \r\nSigned-off-by: avnish \r\nCo-authored-by: Kourosh Hakhamaneshi ", "code": "def test_forward_train(self):\n\n bsize = 1024\n env = gym.make(\"CartPole-v1\")\n module = DiscreteBCTFModule.from_model_config(\n env.observation_space,\n env.action_space,\n model_config={\"hidden_dim\": 32},\n )\n\n obs_shape = env.observation_space.shape\n obs = tf.random.uniform((bsize,) + obs_shape)\n actions = tf.stack(\n [\n tf.convert_to_tensor(env.action_space.sample(), dtype=tf.float32)\n for _ in range(bsize)\n ]\n )\n with tf.GradientTape() as tape:\n output = module.forward_train({\"obs\": obs})\n loss = -tf.math.reduce_mean(output[\"action_dist\"].log_prob(actions))\n\n self.assertIsInstance(output, Mapping)\n self.assertIn(\"action_dist\", output)\n self.assertIsInstance(output[\"action_dist\"], tfp.distributions.Categorical)\n\n grads = tape.gradient(loss, module.trainable_variables())\n\n # check that all neural net parameters have gradients\n for grad in grads[\"policy\"]:\n self.assertIsNotNone(grad)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 293, "n_words": 71, "vocab_size": 60, "complexity": 3, "nloc": 25, "token_counts": 194, "n_ast_nodes": 317, "n_identifiers": 45, "random_cut": "def test_forward_train(self):\n\n bsize = 1024\n env = gym.make(\"CartPole-v1\")\n module = DiscreteBCTFModule.from_model_config(\n env.observation_space,\n env.action_space,\n model_config={\"hidden_dim\": 32},\n )\n\n obs_shape = env.observation_space.shape\n obs = tf.random.uniform((bsize,) + obs_shape)\n actions = tf.stack(\n [\n tf.convert_to_tensor(env.action_space.sample(), dtype=tf.float32)\n for _ in range(bsize)\n ]\n )\n with tf.GradientTape() as tape:\n output = module.forward_train({\"obs\": obs})\n loss = -tf.math.reduce_mean(output[\"action_dist\"].log_prob(actions))\n\n self.assertIsInstance(output, Mapping)\n self.assertIn(\"action_dist\", output)\n self.assertIsInstance(output[\"action_dist\"], tfp.distributions.Categorical)\n\n grads = tape.gradient(loss, module.trainable_variables())\n\n # check that all neural net parameters have gradients\n for grad in grads[\"policy\"]:\n self.assertIsNotNone(grad)\n" }, { "id": 260065, "commit_id": "48a0fe67432773dc6d571d5c6bacbe868921f022", "repo": "scikit-learn", "path": "sklearn/metrics/tests/test_dist_metrics.py", "file_name": "test_dist_metrics.py", "fun_name": "test_input_data_size", "commit_message": "MAINT float32 support for `DistanceMetric` (#22764)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Jérémie du Boisberranger ", "code": "def test_input_data_size():\n # Regression test for #6288\n # Previously, a metric requiring a particular input dimension would fail", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 4, "n_whitespaces": 23, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 7, "token_counts": 60, "n_ast_nodes": 10, "n_identifiers": 1, "random_cut": "def test_input_data_size():\n # Regression test for #6288\n # Previously, a " }, { "id": 69460, "commit_id": "27df455b9862396a192ce381f2e34f0d3cb94e5e", "repo": "erpnext", "path": "erpnext/accounts/doctype/journal_entry/journal_entry.py", "file_name": "journal_entry.py", "fun_name": "get_payment_entry", "commit_message": "perf: use `get_cached_value` instead of `db.get_value` in accounts module", "code": "def get_payment_entry(ref_doc, args):\n\tcost_center = ref_doc.get(\"cost_center\") or frappe.get_cached_value(\n\t\t\"Company\", ref_doc.company, \"cost_center\"\n\t)\n\texchange_rate = 1\n\tif args.get(\"party_account\"):\n\t\t# Modified to include the posting date for which the exchange rate is required.\n\t\t# Assumed to be the posting date in the reference document\n\t\texchange_rate = get_exchange_rate(\n\t\t\tref_doc.get(\"posting_date\") or ref_doc.get(\"transaction_date\"),\n\t\t\targs.get(\"party_account\"),\n\t\t\targs.get(\"party_account_currency\"),\n\t\t\tref_doc.company,\n\t\t\tref_doc.doctype,\n\t\t\tref_doc.name,\n\t\t)\n\n\tje = frappe.new_doc(\"Journal Entry\")\n\tje.update(\n\t\t{\"voucher_type\": \"Bank Entry\", \"company\": ref_doc.company, \"remark\": args.get(\"remarks\")}\n\t)\n\n\tparty_row = je.append(\n\t\t\"accounts\",\n\t\t{\n\t\t\t\"account\": args.get(\"party_account\"),\n\t\t\t\"party_type\": args.get(\"party_type\"),\n\t\t\t\"party\": ref_doc.get(args.get(\"party_type\").lower()),\n\t\t\t\"cost_center\": cost_center,\n\t\t\t\"account_type\": frappe.get_cached_value(\"Account\", args.get(\"party_account\"), \"account_type\"),\n\t\t\t\"account_currency\": args.get(\"party_account_currency\")\n\t\t\tor get_account_currency(args.get(\"party_account\")),\n\t\t\t\"balance\": get_balance_on(args.get(\"party_account\")),\n\t\t\t\"party_balance\": get_balance_on(party=args.get(\"party\"), party_type=args.get(\"party_type\")),\n\t\t\t\"exchange_rate\": exchange_rate,\n\t\t\targs.get(\"amount_field_party\"): args.get(\"amount\"),\n\t\t\t\"is_advance\": args.get(\"is_advance\"),\n\t\t\t\"reference_type\": ref_doc.doctype,\n\t\t\t\"reference_name\": ref_doc.name,\n\t\t},\n\t)\n\n\tbank_row = je.append(\"accounts\")\n\n\t# Make it bank_details\n\tbank_account = get_default_bank_cash_account(\n\t\tref_doc.company, \"Bank\", account=args.get(\"bank_account\")\n\t)\n\tif bank_account:\n\t\tbank_row.update(bank_account)\n\t\t# Modified to include the posting date for which the exchange rate is required.\n\t\t# Assumed to be the posting date of the reference date\n\t\tbank_row.exchange_rate = get_exchange_rate(\n\t\t\tref_doc.get(\"posting_date\") or ref_doc.get(\"transaction_date\"),\n\t\t\tbank_account[\"account\"],\n\t\t\tbank_account[\"account_currency\"],\n\t\t\tref_doc.company,\n\t\t)\n\n\tbank_row.cost_center = cost_center\n\n\tamount = args.get(\"debit_in_account_currency\") or args.get(\"amount\")\n\n\tif bank_row.account_currency == args.get(\"party_account_currency\"):\n\t\tbank_row.set(args.get(\"amount_field_bank\"), amount)\n\telse:\n\t\tbank_row.set(args.get(\"amount_field_bank\"), amount * exchange_rate)\n\n\t# Multi currency check again\n\tif party_row.account_currency != ref_doc.company_currency or (\n\t\tbank_row.account_currency and bank_row.account_currency != ref_doc.company_currency\n\t):\n\t\tje.multi_currency = 1\n\n\tje.set_amounts_in_company_currency()\n\tje.set_total_debit_credit()\n\n\treturn je if args.get(\"journal_entry\") else je.as_dict()\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 136, "n_words": 206, "vocab_size": 128, "complexity": 13, "nloc": 62, "token_counts": 466, "n_ast_nodes": 832, "n_identifiers": 36, "random_cut": "def get_payment_entry(ref_doc, args):\n\tcost_center = ref_doc.get(\"cost_center\") or frappe.get_cached_value(\n\t\t\"Company\", ref_doc.company, \"cost_center\"\n\t)\n\texchange_rate = 1\n\tif args.get(\"party_account\"):\n\t\t# Modified to include the posting date for which the exchange rate is required.\n\t\t# Assumed to be the posting date in the reference document\n\t\texchange_rate = get_exchange_rate(\n\t\t\tref_doc.get(\"posting_date\") or ref_doc.get(\"transaction_date\"),\n\t\t\targs.get(\"party_account\"),\n\t\t\targs.get(\"party_account_currency\"),\n\t\t\tref_doc.company,\n\t\t\tref_doc.doctype,\n\t\t\tref_doc.name,\n\t\t)\n\n\tje = frappe.new_doc(\"Journal Entry\")\n\tje.update(\n\t\t{\"voucher_type\": \"Bank Entry\", \"company\": ref_doc.company, \"remark\": args.get(\"remarks\")}\n\t)\n\n\tparty_row = je.append(\n\t\t\"accounts\",\n\t\t{\n\t\t\t\"account\": args.get(\"party_account\"),\n\t\t\t\"party_type\": args.get(\"party_type\"),\n\t\t\t\"party\": ref_doc.get(args.get(\"party_type\").lower()),\n\t\t\t\"cost_center\": cost_center,\n\t\t\t\"account_type\": frappe.get_cached_value(\"Account\", args.get(\"party_account\"), \"account_type\"),\n\t\t\t\"account_currency\": args.get(\"party_account_currency\")\n\t\t\tor get_account_currency(args.get(\"party_account\")),\n\t\t\t\"balance\": get_balance_on(args.get(\"party_account\")),\n\t\t\t\"party_balance\": get_balance_on(party=args.get(\"party\"), party_type=args.get(\"party_type\")),\n\t\t\t\"exchange_rate\": exchange_rate,\n\t\t\targs.get(\"amount_field_party\"): args.get(\"amount\"),\n\t\t\t\"is_advance\": args.get(\"is_advance\"),\n\t\t\t\"reference_type\": ref_doc.doctype,\n\t\t\t\"reference_name\": ref_doc.name,\n\t\t},\n\t)\n\n\tbank_row = je.append(\"accounts\")\n\n\t# Make it bank_details\n\tbank_account = get_default_bank_cash_account(\n\t\tref_doc.company, \"Bank\", account=args.get(\"bank_account\")\n\t)\n\tif bank_account:\n\t\tbank_row.update(bank_account)\n\t\t# Modified to include the posting date for which the exchange rate is requir" }, { "id": 121544, "commit_id": "acb5e491aba81cedd2af7b80c6bccb03ecf2a3b5", "repo": "jax", "path": "jax/_src/prng.py", "file_name": "prng.py", "fun_name": "random_bits", "commit_message": "sketch: setup for new key array implementation based on eltypes\n\nCo-authored-by: Matthew Johnson ", "code": "def random_bits(keys, bit_width, shape):\n return random_bits_p.bind(keys, bit_width=bit_width, shape=shape)\n\nrandom_bits_p = core.Primitive('random_bits')\nbatching.defvectorized(random_bits_p)\n\n@random_bits_p.def_abstract_eval", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@random_bits_p.def_abstract_eval", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 10, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 66, "n_identifiers": 11, "random_cut": "def random_bits(keys, bit_width, shape):\n return random_bits_p.bind(keys, bit_width=bit_width, shape=shape)\n\nrandom_bits_p = core.Primitive('random_bits')\nbatching.defvectorized(random_bits_p)\n\n@random_bits_p.def_abst" }, { "id": 66769, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/move_branch_code_to_bank_account.py", "file_name": "move_branch_code_to_bank_account.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\n\tfrappe.reload_doc(\"accounts\", \"doctype\", \"bank_account\")\n\tfrappe.reload_doc(\"accounts\", \"doctype\", \"bank\")\n\n\tif frappe.db.has_column(\"Bank\", \"branch_code\") and frappe.db.has_column(\n\t\t\"Bank Account\", \"branch_code\"\n\t):\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 11, "n_words": 19, "vocab_size": 17, "complexity": 3, "nloc": 12, "token_counts": 55, "n_ast_nodes": 105, "n_identifiers": 6, "random_cut": "def execute():\n\n\tfrappe.reload_doc(\"accounts\", \"doctype\", \"bank_account\")\n\tfrappe.reload_doc(\"acco" }, { "id": 111530, "commit_id": "1f23c615d7a7326ca5a38a7d768b8b70caaa0e17", "repo": "spaCy", "path": "spacy/tests/pipeline/test_entity_linker.py", "file_name": "test_entity_linker.py", "fun_name": "test_abstract_kb_instantiation", "commit_message": "Refactor KB for easier customization (#11268)\n\n* Add implementation of batching + backwards compatibility fixes. Tests indicate issue with batch disambiguation for custom singular entity lookups.\r\n\r\n* Fix tests. Add distinction w.r.t. batch size.\r\n\r\n* Remove redundant and add new comments.\r\n\r\n* Adjust comments. Fix variable naming in EL prediction.\r\n\r\n* Fix mypy errors.\r\n\r\n* Remove KB entity type config option. Change return types of candidate retrieval functions to Iterable from Iterator. Fix various other issues.\r\n\r\n* Update spacy/pipeline/entity_linker.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/pipeline/entity_linker.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/kb_base.pyx\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/kb_base.pyx\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/pipeline/entity_linker.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Add error messages to NotImplementedErrors. Remove redundant comment.\r\n\r\n* Fix imports.\r\n\r\n* Remove redundant comments.\r\n\r\n* Rename KnowledgeBase to InMemoryLookupKB and BaseKnowledgeBase to KnowledgeBase.\r\n\r\n* Fix tests.\r\n\r\n* Update spacy/errors.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Move KB into subdirectory.\r\n\r\n* Adjust imports after KB move to dedicated subdirectory.\r\n\r\n* Fix config imports.\r\n\r\n* Move Candidate + retrieval functions to separate module. Fix other, small issues.\r\n\r\n* Fix docstrings and error message w.r.t. class names. Fix typing for candidate retrieval functions.\r\n\r\n* Update spacy/kb/kb_in_memory.pyx\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Update spacy/ml/models/entity_linker.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Fix typing.\r\n\r\n* Change typing of mentions to be Span instead of Union[Span, str].\r\n\r\n* Update docs.\r\n\r\n* Update EntityLinker and _architecture docs.\r\n\r\n* Update website/docs/api/entitylinker.md\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Adjust message for E1046.\r\n\r\n* Re-add section for Candidate in kb.md, add reference to dedicated page.\r\n\r\n* Update docs and docstrings.\r\n\r\n* Re-add section + reference for KnowledgeBase.get_alias_candidates() in docs.\r\n\r\n* Update spacy/kb/candidate.pyx\r\n\r\n* Update spacy/kb/kb_in_memory.pyx\r\n\r\n* Update spacy/pipeline/legacy/entity_linker.py\r\n\r\n* Remove canididate.md. Remove mistakenly added config snippet in entity_linker.py.\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\nCo-authored-by: Sofie Van Landeghem ", "code": "def test_abstract_kb_instantiation():\n \n with pytest.raises(TypeError):\n KnowledgeBase(None, 3)\n\n\n# fmt: off\n@pytest.mark.parametrize(\n \"meet_threshold,config\",\n [\n (False, {\"@architectures\": \"spacy.EntityLinker.v2\", \"tok2vec\": DEFAULT_TOK2VEC_MODEL}),\n (True, {\"@architectures\": \"spacy.EntityLinker.v2\", \"tok2vec\": DEFAULT_TOK2VEC_MODEL}),\n ],\n)\n# fmt: on", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"meet_threshold,config\",\n [\n (False, {\"@architectures\": \"spacy.EntityLinker.v2\", \"tok2vec\": DEFAULT_TOK2VEC_MODEL}),\n (True, {\"@architectures\": \"spacy.EntityLinker.v2\", \"tok2vec\": DEFAULT_TOK2VEC_MODEL}),\n ],\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 59, "n_words": 27, "vocab_size": 21, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 108, "n_identifiers": 8, "random_cut": "def test_abstract_kb_instantiation():\n \n with pytest.raises(TypeError):\n KnowledgeBase(None, 3)\n\n\n# fmt: off\n@pytest.mark.parametrize(\n \"meet_threshold,config\",\n [\n (False, {\"@architectures\": \"spacy.EntityLinker.v2\", \"tok2vec\": DEFAULT_TOK2VEC_MODEL}),\n (True, {\"@architectures\": \"spacy.EntityLinker.v2\", \"tok2vec\": DEFAUL" }, { "id": 300096, "commit_id": "a8aa0e1cca486ce5f8baf8e09b8c9bd24c47cfa1", "repo": "core", "path": "tests/components/recorder/test_purge.py", "file_name": "test_purge.py", "fun_name": "mock_use_sqlite", "commit_message": "Add Estimated Database Size to the recorder system health (#71463)", "code": "def mock_use_sqlite(request):\n \n with patch(\n \"homeassistant.components.recorder.core.Recorder.dialect_name\",\n return_value=SupportedDialect.SQLITE\n if request.param\n else SupportedDialect.MYSQL,\n ):\n yield\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 56, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 8, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 8, "random_cut": "def mock_use_sqlite(request):\n \n with patch(\n \"homeassis" }, { "id": 135180, "commit_id": "cd031a021d659c0c1e27b9b37a9637aad8d46610", "repo": "ray", "path": "doc/source/tune/doc_code/trainable.py", "file_name": "trainable.py", "fun_name": "objective", "commit_message": "[Tune] [Doc] Tune checkpointing and Tuner restore docfix (#29411)", "code": "def objective(x, a, b):\n return a * (x ** 0.5) + b\n# __example_objective_end__\n# fmt: on\n\n# __function_api_report_intermediate_metrics_start__\nfrom ray import tune\nfrom ray.air import session\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 24, "n_words": 27, "vocab_size": 23, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 47, "n_identifiers": 8, "random_cut": "def objective(x, a, b):\n return a * (x ** 0.5) + b\n# __example_objective_end__\n# fmt: on\n\n" }, { "id": 303347, "commit_id": "842cc060f80a632032dacbe1e2eaa8ca6421eda0", "repo": "core", "path": "tests/components/zwave_js/conftest.py", "file_name": "conftest.py", "fun_name": "mock_addon_store_info", "commit_message": "Fix zwave_js addon info (#76044)\n\n* Add add-on store info command\r\n\r\n* Use add-on store info command in zwave_js\r\n\r\n* Fix init tests\r\n\r\n* Update tests\r\n\r\n* Fix method for addon store info\r\n\r\n* Fix response parsing\r\n\r\n* Fix store addon installed response parsing\r\n\r\n* Remove addon info log that can contain network keys\r\n\r\n* Add supervisor store addon info test\r\n\r\n* Default to version None if add-on not installed\r\n\r\nCo-authored-by: Mike Degatano \r\n\r\nCo-authored-by: Mike Degatano ", "code": "def mock_addon_store_info(addon_store_info_side_effect):\n \n with patch(\n \"homeassistant.components.zwave_js.addon.async_get_addon_store_info\",\n side_effect=addon_store_info_side_effect,\n ) as addon_store_info:\n addon_store_info.return_value = {\n \"installed\": None,\n \"state\": None,\n \"version\": \"1.0.0\",\n }\n yield addon_store_info\n\n\n@pytest.fixture(name=\"addon_running\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"addon_running\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 98, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 11, "token_counts": 39, "n_ast_nodes": 91, "n_identifiers": 9, "random_cut": "def mock_addon_store_info(addon_store_info_side_effect):\n \n with patch(\n \"homeassistant.components.zwave_js.addon.async_get_addon_store_info\",\n side_effect=addon_store_info_side_effect,\n ) as addon_store_info:\n addon_store_info.return_value = {\n \"installed\": None,\n \"state\": None,\n \"version" }, { "id": 153172, "commit_id": "39fbc57e809c2422b250f0be58d076a22bd45031", "repo": "modin", "path": "modin/pandas/test/test_series.py", "file_name": "test_series.py", "fun_name": "test_max", "commit_message": "FEAT-#4035: Upgrade pandas support to 1.4 (#4036)\n\nCo-authored-by: Igoshev, Yaroslav \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Rehan Durrani \r\nCo-authored-by: ienkovich \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Devin Petersohn ", "code": "def test_max(data, skipna):\n eval_general(*create_test_series(data), lambda df: df.max(skipna=skipna))\n\n\n@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)\n@pytest.mark.parametrize(\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)\n@pytest.mark.parametrize(\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 18, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 2, "token_counts": 27, "n_ast_nodes": 99, "n_identifiers": 16, "random_cut": "def test_max(data, skipna):\n eval_gene" }, { "id": 304522, "commit_id": "f3e432c9c7735daddd2946773cc6572567821a5e", "repo": "core", "path": "homeassistant/components/bluetooth/scanner.py", "file_name": "scanner.py", "fun_name": "_async_reset_adapter", "commit_message": "Reduce bluetooth logging noise when an adapter is recovered (#77109)", "code": "async def _async_reset_adapter(self) -> None:\n \n # There is currently nothing the user can do to fix this\n # so we log at debug level. If we later come up with a repair\n # strategy, we will change this to raise a repair issue as well.\n _LOGGER.debug(\"%s: adapter stopped responding; executing reset\", self.name)\n result = await async_reset_adapter(self.adapter)\n _LOGGER.debug(\"%s: adapter reset result: %s\", self.name, result)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 112, "n_words": 63, "vocab_size": 53, "complexity": 1, "nloc": 5, "token_counts": 39, "n_ast_nodes": 71, "n_identifiers": 8, "random_cut": "async def _async_reset_adapter(self) -> None:\n \n # There is currently nothing the user can do to fix this\n # so we log at debug level. If we later come up with a repair\n # strategy, we will change this to raise a repair issue as well.\n _LOGGER.debug(\"%s: adapter stopped responding; executing reset\", sel" }, { "id": 41541, "commit_id": "6b61a26a462effaea1c80518e98185abb12174ed", "repo": "seaborn", "path": "seaborn/_core/plot.py", "file_name": "plot.py", "fun_name": "_variables", "commit_message": "Begin removal of data/layers as Plotter attributes", "code": "def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 102, "n_words": 31, "vocab_size": 24, "complexity": 4, "nloc": 9, "token_counts": 79, "n_ast_nodes": 126, "n_identifiers": 14, "random_cut": "def _variables(self) -> list[str]:\n\n variables = (\n lis" }, { "id": 112017, "commit_id": "d63a2ea3979f590fdd1fdffba937d8ae2e58099e", "repo": "nni", "path": "examples/nas/oneshot/spos/search.py", "file_name": "search.py", "fun_name": "test_acc", "commit_message": "Support weight loading in SPOS (#4595)", "code": "def test_acc(model, criterion, log_freq, loader):\n logger.info(\"Start testing...\")\n model.eval()\n meters = AverageMeterGroup()\n start_time = time.time()\n with torch.no_grad():\n for step, (inputs, targets) in enumerate(loader):\n inputs, targets = inputs.to('cuda'), targets.to('cuda')\n logits = model(inputs)\n loss = criterion(logits, targets)\n metrics = accuracy(logits, targets)\n metrics[\"loss\"] = loss.item()\n meters.update(metrics)\n if step % log_freq == 0 or step + 1 == len(loader):\n logger.info(\"Valid Step [%d/%d] time %.3fs acc1 %.4f acc5 %.4f loss %.4f\",\n step + 1, len(loader), time.time() - start_time,\n meters.acc1.avg, meters.acc5.avg, meters.loss.avg)\n return meters.acc1.avg\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 247, "n_words": 77, "vocab_size": 62, "complexity": 4, "nloc": 18, "token_counts": 173, "n_ast_nodes": 278, "n_identifiers": 29, "random_cut": "def test_acc(model, criterion, log_freq, loader):\n logger.info(\"Start testing...\")\n model.eval()\n meters = AverageMeterGroup()\n start_time = time.time()\n with torch.no_grad():\n for step, (inputs, targets) in enumerate(loader):\n inputs, targets = inputs.to('cuda'), targets.to('cuda')\n logits = model(inputs)\n loss = criterion(logits, targets)\n metrics = accuracy(logits, targets)\n metrics[\"loss\"] = loss.item()\n meters.update(me" }, { "id": 195541, "commit_id": "07ba788f1c3a370fb39ecc3189dd58323caa20e6", "repo": "ParlAI", "path": "tests/test_tga.py", "file_name": "test_tga.py", "fun_name": "test_token_level_loss_logging", "commit_message": "[TGA] Factual Nucleus Sampling (#4890)\n\n* factual nucleus\r\n\r\n* docstring\r\n\r\n* remove extranneous change\r\n\r\n* address comments", "code": "def test_token_level_loss_logging(self):\n \n inference_types = [\n 'beam',\n 'greedy',\n 'topk',\n 'nucleus',\n 'factual_nucleus',\n 'delayedbeam',\n ]\n gold_data = {\n 'beam': {\n 'text_token_info': [\n ('__start__', {\"token_logprob\": 0.0, \"token_rank\": 0}),\n ('5', {\"token_logprob\": math.log(0.999), \"token_rank\": 0}),\n ('__end__', {\"token_logprob\": math.log(0.999), \"token_rank\": 0}),\n ],\n 'extra_args': ['--beam-size', '3'],\n },\n 'greedy': {\n 'text_token_info': [\n ('__start__', {\"token_logprob\": 0.0, \"token_rank\": 0}),\n ('5', {\"token_logprob\": math.log(0.999), \"token_rank\": 0}),\n ('__end__', {\"token_logprob\": math.log(0.999), \"token_rank\": 0}),\n ],\n 'extra_args': [],\n },\n # sampling based token selection will produce non-deterministic output, so we can't do data regression\n 'topk': {'extra_args': ['--topk', '2']},\n 'topk_multiple_beams': {'extra_args': ['--topk', '2', '--beam-size', '5']},\n # sampling based token selection will produce non-deterministic output, so we can't do data regression\n 'nucleus': {'extra_args': ['--topp', '0.3']},\n 'nucleus_multiple_beams': {\n 'extra_args': ['--topp', '0.3', '--beam-size', '5']\n },\n 'factual_nucleus': {'extra_args': ['--topp', '0.3']},\n 'factual_nucleus_multiple_beams': {\n 'extra_args': ['--topp', '0.3', '--beam-size', '5']\n },\n # sampling based token selection will produce non-deterministic output, so we can't do data regression\n 'delayedbeam': {'extra_args': ['--topk', '2', '--beam-delay', '2']},\n }\n\n for inference_type in inference_types:\n args = [\n '--model-file',\n 'zoo:unittest/transformer_generator2/model',\n '--inference',\n inference_type,\n '--truncate',\n '1024',\n '-v',\n ] + gold_data[inference_type]['extra_args']\n\n pp = ParlaiParser(True, True)\n agent = create_agent(pp.parse_args(args), True)\n obs = {'text': '5', 'episode_done': False}\n agent.observe(obs)\n act = agent.act()\n\n if 'text_token_info' in gold_data[inference_type]:\n for i, tok_data in enumerate(act['text_token_info']):\n assert (\n gold_data[inference_type]['text_token_info'][i][0]\n == tok_data[0]\n ), f\"failed token prediction for inference type {inference_type} at token {gold_data[inference_type]['text_token_info'][i][0]}\"\n assert math.isclose(\n gold_data[inference_type]['text_token_info'][i][1][\n \"token_logprob\"\n ],\n tok_data[1][\"token_logprob\"],\n abs_tol=1e-3,\n ), f\"failed token log-probability prediction for inference type {inference_type} at token {gold_data[inference_type]['text_token_info'][i][0]}\"\n assert math.isclose(\n gold_data[inference_type]['text_token_info'][i][1][\n \"token_rank\"\n ],\n tok_data[1][\"token_rank\"],\n ), f\"failed token rank prediction for inference type {inference_type} at token {gold_data[inference_type]['text_token_info'][i][0]}\"\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1346, "n_words": 245, "vocab_size": 120, "complexity": 4, "nloc": 72, "token_counts": 470, "n_ast_nodes": 877, "n_identifiers": 21, "random_cut": "def test_token_level_loss_logging(self):\n \n inference_types = [\n 'beam',\n 'greedy',\n 'topk',\n 'nucleus',\n 'factual_nucleus',\n 'delayedbeam',\n ]\n gold_data = {\n 'beam': {\n 'text_token_info': [\n ('__start__', {\"token_logprob\": 0.0, \"token_rank\": 0}),\n ('5', {\"token_logprob\": math.log(0.999), \"token_rank\": 0}),\n ('__end__', {\"token_logprob\": math.log(0.999), \"token_rank\": 0}),\n ],\n 'extra_args': ['--beam-size', '3'],\n },\n 'greedy': {\n 'text_token_info': [\n ('__start__', {\"token_logprob\": 0.0, \"token_rank\": 0}),\n ('5', {\"token_logprob\": math.log(0.999), \"token_rank\": 0}),\n ('__end__', {\"token_logprob\": math.log(0.999), \"token_rank\": 0}),\n ],\n 'extra_args': [],\n },\n # sampling based token selection will produce non-deterministic output, so we can't do data regression\n 'topk': {'extra_args': ['--topk', '2']},\n 'topk_multiple_beams': {'extra_args': ['--topk', '2', '--beam-size', '5']},\n # sampling based token selection will produce non-deterministic output, so we can't do data regression\n 'nucleus': {'extra_args': ['--topp', '0.3']},\n 'nucleus_multiple_beams': {\n 'extra_args': ['--topp', '0.3', '--beam-size', '5'" }, { "id": 256759, "commit_id": "dde9d592715452d420e22c5abaf2a99f995ed49d", "repo": "haystack", "path": "test/test_document_classifier.py", "file_name": "test_document_classifier.py", "fun_name": "test_zero_shot_document_classifier", "commit_message": "fix pip backtracking issue (#2281)\n\n* fix pip backtracking issue\r\n\r\n* restrict azure-core version\r\n\r\n* Remove the trailing comma\r\n\r\n* Add skip_magic_trailing_comma in pyproject.toml for pydoc compatibility\r\n\r\n* Pin pydoc-markdown _again_\r\n\r\nCo-authored-by: Sara Zan \r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_zero_shot_document_classifier(zero_shot_document_classifier):\n assert isinstance(zero_shot_document_classifier, BaseDocumentClassifier)\n\n docs = [\n Document(\n content= * 700, # extra long text to check truncation\n meta={\"name\": \"0\"},\n id=\"1\",\n ),\n Document(content=, meta={\"name\": \"1\"}, id=\"2\"),\n ]\n results = zero_shot_document_classifier.predict(documents=docs)\n expected_labels = [\"positive\", \"negative\"]\n for i, doc in enumerate(results):\n assert doc.to_dict()[\"meta\"][\"classification\"][\"label\"] == expected_labels[i]\n\n\n@pytest.mark.slow", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.slow", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 123, "n_words": 45, "vocab_size": 41, "complexity": 2, "nloc": 14, "token_counts": 104, "n_ast_nodes": 189, "n_identifiers": 20, "random_cut": "def test_zero_shot_document_classifier(zero_shot_document_classifier):\n assert isinstance(zero_shot_document_classifie" }, { "id": 120023, "commit_id": "1246b6fc737f74ff00e8d681fb348c21b0c3626e", "repo": "jax", "path": "jax/_src/public_test_util.py", "file_name": "public_test_util.py", "fun_name": "numerical_jvp", "commit_message": "Separate jax.test_util implementations into public and private sources.\n\nEventually the private functionality will no longer be exported via the jax.test_util submodule.\n\nPiperOrigin-RevId: 439415485", "code": "def numerical_jvp(f, primals, tangents, eps=EPS):\n delta = scalar_mul(tangents, eps)\n f_pos = f(*add(primals, delta))\n f_neg = f(*sub(primals, delta))\n return scalar_mul(safe_sub(f_pos, f_neg), 0.5 / eps)\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 26, "n_words": 23, "vocab_size": 19, "complexity": 1, "nloc": 5, "token_counts": 61, "n_ast_nodes": 88, "n_identifiers": 13, "random_cut": "def numerical_jvp(f, primals, tangents, eps=EPS):\n delta = scalar_mul(tangents, eps)\n f_pos = f(*add(primals, delta))\n f_neg = f(*sub(pri" }, { "id": 28369, "commit_id": "6c0a67da7b5c90b123960443e030854225f51a53", "repo": "saleor", "path": "saleor/checkout/utils.py", "file_name": "utils.py", "fun_name": "_append_line_to_delete", "commit_message": "Multiple copies of a single variant in the same Checkout/Order (#10095)\n\n* CHANGELOG.md update\r\n\r\n* Handle same variant in multiple lines in CheckoutLineAdd/Update\r\n\r\n* Handle same variant in multiple lines in CheckoutLineAdd/Update\r\n\r\n* Review changes\r\n\r\n* Add checkout with same variant in multiple lines to populatedb data\r\n\r\n* Improve handling lines with unavailable variants\r\n\r\n* Create new line on liensAdd when variant already in multiple lines\r\n\r\n* CHANGELOG.md update\r\n\r\n* Checkout limits calculation update (#10300)\r\n\r\n* Handle same variant in multiple lines in checkout limits calculation\r\n\r\n* Performence improvements\r\n\r\n* Same variant in multiple lines for order (#10299)\r\n\r\n* Handle same variant in multiple lines in DraftOrderCreate/OrderCreate/OrderLinesCreate mutations\r\n\r\n* Rebase fixes\r\n\r\n* Formating fixes\r\n\r\n* Update mutation description\r\n\r\n* Review upgrades\r\n\r\n* Review changes", "code": "def _append_line_to_delete(to_delete, line_data, line):\n quantity = line_data.quantity\n if line_data.quantity_to_update:\n if quantity <= 0:\n to_delete.append(line)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 14, "vocab_size": 12, "complexity": 3, "nloc": 5, "token_counts": 30, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def _append_line_to_delete(to_delete, line_data, line):\n quantity =" }, { "id": 77798, "commit_id": "69e2f523aacb970c7335848a271bdc9dd4108671", "repo": "wagtail", "path": "wagtail/models/__init__.py", "file_name": "__init__.py", "fun_name": "get_content_type", "commit_message": "Extract revision methods in `Page` into `RevisionMixin`", "code": "def get_content_type(self):\n return ContentType.objects.get_for_model(self, for_concrete_model=False)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 28, "n_identifiers": 6, "random_cut": "def get_content_type(self):\n return ContentType.objects." }, { "id": 22954, "commit_id": "a323fce66dd68a881cf599526185b52ab5df356b", "repo": "PaddleOCR", "path": "tools/program.py", "file_name": "program.py", "fun_name": "load_config", "commit_message": "vqa code integrated into ppocr training system", "code": "def load_config(file_path):\n \n _, ext = os.path.splitext(file_path)\n assert ext in ['.yml', '.yaml'], \"only support yaml files for now\"\n config = yaml.load(open(file_path, 'rb'), Loader=yaml.Loader)\n return config\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 5, "token_counts": 49, "n_ast_nodes": 84, "n_identifiers": 12, "random_cut": "def load_config(file_path):\n \n _, ext = os.path.splitext(file_path)\n assert ext in ['.yml', '.yaml'], \"only support yaml files for now\"\n config = yaml.load(open(file_path, 'rb'), Loader=yaml.Loader)\n return config\n\n" }, { "id": 45603, "commit_id": "6c37e47cf69083326c0ee535e5fb950c5dfa4c4a", "repo": "airflow", "path": "kubernetes_tests/test_kubernetes_pod_operator.py", "file_name": "test_kubernetes_pod_operator.py", "fun_name": "create_context", "commit_message": "Add map_index label to mapped KubernetesPodOperator (#21916)", "code": "def create_context(task):\n dag = DAG(dag_id=\"dag\")\n tzinfo = pendulum.timezone(\"Europe/Amsterdam\")\n execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, tzinfo=tzinfo)\n dag_run = DagRun(dag_id=dag.dag_id, execution_date=execution_date)\n task_instance = TaskInstance(task=task)\n task_instance.dag_run = dag_run\n task_instance.dag_id = dag.dag_id\n task_instance.xcom_push = mock.Mock()\n return {\n \"dag\": dag,\n \"ts\": execution_date.isoformat(),\n \"task\": task,\n \"ti\": task_instance,\n \"task_instance\": task_instance,\n }\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 110, "n_words": 46, "vocab_size": 34, "complexity": 1, "nloc": 16, "token_counts": 113, "n_ast_nodes": 182, "n_identifiers": 18, "random_cut": "def create_context(task):\n dag = DAG(dag_id=\"dag\")\n tzinfo = pendulum.timezone(\"Europe/Amsterdam\")\n execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, t" }, { "id": 216983, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/encodings/idna.py", "file_name": "idna.py", "fun_name": "nameprep", "commit_message": "add python 3.10.4 for windows", "code": "def nameprep(label):\n # Map\n newlabel = []\n for c in label:\n if stringprep.in_table_b1(c):\n # Map to nothing\n continue\n newlabel.append(stringprep.map_table_b2(c))\n label = \"\".join(newlabel)\n\n # Normalize\n label = unicodedata.normalize(\"NFKC\", label)\n\n # Prohibit\n for c in label:\n if stringprep.in_table_c12(c) or \\\n stringprep.in_table_c22(c) or \\\n stringprep.in_table_c3(c) or \\\n stringprep.in_table_c4(c) or \\\n stringprep.in_table_c5(c) or \\\n stringprep.in_table_c6(c) or \\\n stringprep.in_table_c7(c) or \\\n stringprep.in_table_c8(c) or \\\n stringprep.in_table_c9(c):\n raise UnicodeError(\"Invalid character %r\" % c)\n\n # Check bidi\n RandAL = [stringprep.in_table_d1(x) for x in label]\n for c in RandAL:\n if c:\n # There is a RandAL char in the string. Must perform further\n # tests:\n # 1) The characters in section 5.8 MUST be prohibited.\n # This is table C.8, which was already checked\n # 2) If a string contains any RandALCat character, the string\n # MUST NOT contain any LCat character.\n if any(stringprep.in_table_d2(x) for x in label):\n raise UnicodeError(\"Violation of BIDI requirement 2\")\n\n # 3) If a string contains any RandALCat character, a\n # RandALCat character MUST be the first character of the\n # string, and a RandALCat character MUST be the last\n # character of the string.\n if not RandAL[0] or not RandAL[-1]:\n raise UnicodeError(\"Violation of BIDI requirement 3\")\n\n return label\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 532, "n_words": 194, "vocab_size": 101, "complexity": 20, "nloc": 27, "token_counts": 199, "n_ast_nodes": 325, "n_identifiers": 26, "random_cut": "def nameprep(label):\n # Map\n newlabel = []\n for c in label:\n if stringprep.in_table_b1(c):\n # Map to nothing\n continue\n newlabel.append(stringprep.map_table_b2(c))\n label = \"\".join(newlabel)\n\n # Normalize\n label = unicodedata.normalize(\"NFKC\", label)\n\n # Prohibit\n for c in label:\n if stringprep.in_table_c12(c) or \\\n stringprep.in_table_c22(c) or \\\n stringprep.in_table_c3(c) or \\\n " }, { "id": 122442, "commit_id": "532cd7ed74ea2a282b0c626b410308bdd626cfe3", "repo": "jax", "path": "benchmarks/api_benchmark.py", "file_name": "api_benchmark.py", "fun_name": "bench_pjit_check_aval_sharding", "commit_message": "Skip the benchmarks properly via state.skip_with_error when enough devices are not present.\n\nPiperOrigin-RevId: 485931295", "code": "def bench_pjit_check_aval_sharding(state):\n mesh = create_mesh((4, 2), ('x', 'y'), state)\n if mesh is None:\n return\n s = sharding.MeshPspecSharding(mesh, pxla.PartitionSpec('x', 'y'))\n aval = jax.ShapedArray((8, 2), np.int32)\n\n while state:\n pjit_lib.pjit_check_aval_sharding([s] * 100, [aval] * 100, 'benchmark', False)\n\n\n@google_benchmark.register\n@google_benchmark.option.unit(google_benchmark.kMillisecond)", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@google_benchmark.register\n@google_benchmark.option.unit(google_benchmark.kMillisecond)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 44, "n_words": 36, "vocab_size": 30, "complexity": 3, "nloc": 8, "token_counts": 85, "n_ast_nodes": 158, "n_identifiers": 21, "random_cut": "def bench_pjit_check_aval_sharding(state):\n mesh = create_mesh((4, 2), ('x', 'y'), state)\n if mesh is None:\n return\n s = sharding.MeshPspecSharding(me" }, { "id": 22746, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "primelib/primelib.py", "file_name": "primelib.py", "fun_name": "fib", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def fib(n):\n \n\n # precondition\n assert isinstance(n, int) and (n >= 0), \"'n' must been an int and >= 0\"\n\n tmp = 0\n fib1 = 1\n ans = 1 # this will be return\n\n for i in range(n - 1):\n tmp = ans\n ans += fib1\n fib1 = tmp\n\n return ans\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 96, "n_words": 50, "vocab_size": 34, "complexity": 3, "nloc": 10, "token_counts": 51, "n_ast_nodes": 86, "n_identifiers": 9, "random_cut": "def fib(n):\n \n\n # precondit" }, { "id": 192628, "commit_id": "1ac6e8b91b980b052324f77828a5ef4a6715dd66", "repo": "vision", "path": "torchvision/prototype/datasets/_builtin/eurosat.py", "file_name": "eurosat.py", "fun_name": "_info", "commit_message": "Refactor and simplify prototype datasets (#5778)\n\n* refactor prototype datasets to inherit from IterDataPipe (#5448)\r\n\r\n* refactor prototype datasets to inherit from IterDataPipe\r\n\r\n* depend on new architecture\r\n\r\n* fix missing file detection\r\n\r\n* remove unrelated file\r\n\r\n* reinstante decorator for mock registering\r\n\r\n* options -> config\r\n\r\n* remove passing of info to mock data functions\r\n\r\n* refactor categories file generation\r\n\r\n* fix imagenet\r\n\r\n* fix prototype datasets data loading tests (#5711)\r\n\r\n* reenable serialization test\r\n\r\n* cleanup\r\n\r\n* fix dill test\r\n\r\n* trigger CI\r\n\r\n* patch DILL_AVAILABLE for pickle serialization\r\n\r\n* revert CI changes\r\n\r\n* remove dill test and traversable test\r\n\r\n* add data loader test\r\n\r\n* parametrize over only_datapipe\r\n\r\n* draw one sample rather than exhaust data loader\r\n\r\n* cleanup\r\n\r\n* trigger CI\r\n\r\n* migrate VOC prototype dataset (#5743)\r\n\r\n* migrate VOC prototype dataset\r\n\r\n* cleanup\r\n\r\n* revert unrelated mock data changes\r\n\r\n* remove categories annotations\r\n\r\n* move properties to constructor\r\n\r\n* readd homepage\r\n\r\n* migrate CIFAR prototype datasets (#5751)\r\n\r\n* migrate country211 prototype dataset (#5753)\r\n\r\n* migrate CLEVR prototype datsaet (#5752)\r\n\r\n* migrate coco prototype (#5473)\r\n\r\n* migrate coco prototype\r\n\r\n* revert unrelated change\r\n\r\n* add kwargs to super constructor call\r\n\r\n* remove unneeded changes\r\n\r\n* fix docstring position\r\n\r\n* make kwargs explicit\r\n\r\n* add dependencies to docstring\r\n\r\n* fix missing dependency message\r\n\r\n* Migrate PCAM prototype dataset (#5745)\r\n\r\n* Port PCAM\r\n\r\n* skip_integrity_check\r\n\r\n* Update torchvision/prototype/datasets/_builtin/pcam.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Address comments\r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Migrate DTD prototype dataset (#5757)\r\n\r\n* Migrate DTD prototype dataset\r\n\r\n* Docstring\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Migrate GTSRB prototype dataset (#5746)\r\n\r\n* Migrate GTSRB prototype dataset\r\n\r\n* ufmt\r\n\r\n* Address comments\r\n\r\n* Apparently mypy doesn't know that __len__ returns ints. How cute.\r\n\r\n* why is the CI not triggered??\r\n\r\n* Update torchvision/prototype/datasets/_builtin/gtsrb.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* migrate CelebA prototype dataset (#5750)\r\n\r\n* migrate CelebA prototype dataset\r\n\r\n* inline split_id\r\n\r\n* Migrate Food101 prototype dataset (#5758)\r\n\r\n* Migrate Food101 dataset\r\n\r\n* Added length\r\n\r\n* Update torchvision/prototype/datasets/_builtin/food101.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Migrate Fer2013 prototype dataset (#5759)\r\n\r\n* Migrate Fer2013 prototype dataset\r\n\r\n* Update torchvision/prototype/datasets/_builtin/fer2013.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Migrate EuroSAT prototype dataset (#5760)\r\n\r\n* Migrate Semeion prototype dataset (#5761)\r\n\r\n* migrate caltech prototype datasets (#5749)\r\n\r\n* migrate caltech prototype datasets\r\n\r\n* resolve third party dependencies\r\n\r\n* Migrate Oxford Pets prototype dataset (#5764)\r\n\r\n* Migrate Oxford Pets prototype dataset\r\n\r\n* Update torchvision/prototype/datasets/_builtin/oxford_iiit_pet.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* migrate mnist prototype datasets (#5480)\r\n\r\n* migrate MNIST prototype datasets\r\n\r\n* Update torchvision/prototype/datasets/_builtin/mnist.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Migrate Stanford Cars prototype dataset (#5767)\r\n\r\n* Migrate Stanford Cars prototype dataset\r\n\r\n* Address comments\r\n\r\n* fix category file generation (#5770)\r\n\r\n* fix category file generation\r\n\r\n* revert unrelated change\r\n\r\n* revert unrelated change\r\n\r\n* migrate cub200 prototype dataset (#5765)\r\n\r\n* migrate cub200 prototype dataset\r\n\r\n* address comments\r\n\r\n* fix category-file-generation\r\n\r\n* Migrate USPS prototype dataset (#5771)\r\n\r\n* migrate SBD prototype dataset (#5772)\r\n\r\n* migrate SBD prototype dataset\r\n\r\n* reuse categories\r\n\r\n* Migrate SVHN prototype dataset (#5769)\r\n\r\n* add test to enforce __len__ is working on prototype datasets (#5742)\r\n\r\n* reactivate special dataset tests\r\n\r\n* add missing annotation\r\n\r\n* Cleanup prototype dataset implementation (#5774)\r\n\r\n* Remove Dataset2 class\r\n\r\n* Move read_categories_file out of DatasetInfo\r\n\r\n* Remove FrozenBunch and FrozenMapping\r\n\r\n* Remove test_prototype_datasets_api.py and move missing dep test somewhere else\r\n\r\n* ufmt\r\n\r\n* Let read_categories_file accept names instead of paths\r\n\r\n* Mypy\r\n\r\n* flake8\r\n\r\n* fix category file reading\r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* update prototype dataset README (#5777)\r\n\r\n* update prototype dataset README\r\n\r\n* fix header level\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\nCo-authored-by: Nicolas Hug ", "code": "def _info() -> Dict[str, Any]:\n return dict(\n categories=(\n \"AnnualCrop\",\n \"Forest\",\n \"HerbaceousVegetation\",\n \"Highway\",\n \"Industrial,\" \"Pasture\",\n \"PermanentCrop\",\n \"Residential\",\n \"River\",\n \"SeaLake\",\n )\n )\n\n\n@register_dataset(NAME)", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "@register_dataset(NAME)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 138, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 14, "token_counts": 38, "n_ast_nodes": 79, "n_identifiers": 8, "random_cut": "def _info() -> Dict[str, Any]:\n return dict(\n categories=(\n \"AnnualCrop\",\n " }, { "id": 286400, "commit_id": "09f753da1c2a2f03c41fe6a3ca2eb79f6ea58995", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/overview/overview_controller.py", "file_name": "overview_controller.py", "fun_name": "call_derivatives", "commit_message": "More Fixes to Crypto + key sort (#3244)\n\n* fix #3095 - autocomplete and command working + key sort\r\n\r\n* fix #3056\r\n\r\n* fix [Bug] bugs #3048\r\n\r\n* fix [Bug] bug #3017\r\n\r\n* sort -> sortby, not ascend, tests\r\n\r\n* fix my goof ups\r\n\r\nCo-authored-by: james ", "code": "def call_derivatives(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"derivatives\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=check_positive,\n help=\"display N number records\",\n default=15,\n )\n\n parser.add_argument(\n \"-s\",\n \"--sortby\",\n dest=\"sortby\",\n type=str,\n help=\"Sort by given column. Default: Rank\",\n default=\"Rank\",\n choices=pycoingecko_model.DERIVATIVES_FILTERS,\n )\n\n parser.add_argument(\n \"--descend\",\n action=\"store_true\",\n help=\"Flag to sort in descending order (lowest first)\",\n dest=\"descend\",\n default=False,\n )\n\n ns_parser = self.parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n if ns_parser:\n pycoingecko_view.display_derivatives(\n limit=ns_parser.limit,\n sortby=ns_parser.sortby,\n ascend=not ns_parser.descend,\n export=ns_parser.export,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 485, "n_words": 66, "vocab_size": 58, "complexity": 2, "nloc": 50, "token_counts": 157, "n_ast_nodes": 250, "n_identifiers": 32, "random_cut": "def call_derivatives(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"derivatives\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=check_positive,\n help=\"display N number records\",\n default=15,\n )\n\n parser.add_argument(\n \"-s\",\n \"--sortby\",\n dest=\"sortby\",\n type=str,\n help=\"Sort by given column. Default: Rank\",\n default=\"Rank\",\n choices=pycoingecko_model.DERIVATIVES_FILTERS,\n )\n\n parser.add_argument(\n \"--descend\",\n action=\"store_true\",\n " }, { "id": 131271, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_autoscaler.py", "file_name": "test_autoscaler.py", "fun_name": "testAutoscalerConfigValidationFailNotFatal", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def testAutoscalerConfigValidationFailNotFatal(self):\n invalid_config = {**SMALL_CLUSTER, \"invalid_property_12345\": \"test\"}\n # First check that this config is actually invalid\n with pytest.raises(ValidationError):\n validate_config(invalid_config)\n config_path = self.write_config(invalid_config)\n self.provider = MockProvider()\n runner = MockProcessRunner()\n autoscaler = MockAutoscaler(\n config_path,\n LoadMetrics(),\n MockNodeInfoStub(),\n max_failures=0,\n process_runner=runner,\n update_interval_s=0,\n )\n assert len(self.provider.non_terminated_nodes({})) == 0\n autoscaler.update()\n self.waitForNodes(2)\n autoscaler.update()\n self.waitForNodes(2)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 213, "n_words": 46, "vocab_size": 40, "complexity": 1, "nloc": 20, "token_counts": 111, "n_ast_nodes": 185, "n_identifiers": 25, "random_cut": "def testAutoscalerConfigValidationFailNotFatal(self):\n invalid_config = {**SMALL_CLUSTER, \"inv" }, { "id": 80288, "commit_id": "dea53a0dba692b5271c661b60de8f1b63748cf1f", "repo": "awx", "path": "awx/api/views/root.py", "file_name": "root.py", "fun_name": "get", "commit_message": "Creates end point and serializer for receptor mesh", "code": "def get(self, request, format=None):\n \n data = OrderedDict()\n data['ping'] = reverse('api:api_v2_ping_view', request=request)\n data['instances'] = reverse('api:instance_list', request=request)\n data['instance_groups'] = reverse('api:instance_group_list', request=request)\n data['config'] = reverse('api:api_v2_config_view', request=request)\n data['settings'] = reverse('api:setting_category_list', request=request)\n data['me'] = reverse('api:user_me_list', request=request)\n data['dashboard'] = reverse('api:dashboard_view', request=request)\n data['organizations'] = reverse('api:organization_list', request=request)\n data['users'] = reverse('api:user_list', request=request)\n data['execution_environments'] = reverse('api:execution_environment_list', request=request)\n data['projects'] = reverse('api:project_list', request=request)\n data['project_updates'] = reverse('api:project_update_list', request=request)\n data['teams'] = reverse('api:team_list', request=request)\n data['credentials'] = reverse('api:credential_list', request=request)\n data['credential_types'] = reverse('api:credential_type_list', request=request)\n data['credential_input_sources'] = reverse('api:credential_input_source_list', request=request)\n data['applications'] = reverse('api:o_auth2_application_list', request=request)\n data['tokens'] = reverse('api:o_auth2_token_list', request=request)\n data['metrics'] = reverse('api:metrics_view', request=request)\n data['inventory'] = reverse('api:inventory_list', request=request)\n data['inventory_sources'] = reverse('api:inventory_source_list', request=request)\n data['inventory_updates'] = reverse('api:inventory_update_list', request=request)\n data['groups'] = reverse('api:group_list', request=request)\n data['hosts'] = reverse('api:host_list', request=request)\n data['job_templates'] = reverse('api:job_template_list', request=request)\n data['jobs'] = reverse('api:job_list', request=request)\n data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)\n data['system_job_templates'] = reverse('api:system_job_template_list', request=request)\n data['system_jobs'] = reverse('api:system_job_list', request=request)\n data['schedules'] = reverse('api:schedule_list', request=request)\n data['roles'] = reverse('api:role_list', request=request)\n data['notification_templates'] = reverse('api:notification_template_list', request=request)\n data['notifications'] = reverse('api:notification_list', request=request)\n data['labels'] = reverse('api:label_list', request=request)\n data['unified_job_templates'] = reverse('api:unified_job_template_list', request=request)\n data['unified_jobs'] = reverse('api:unified_job_list', request=request)\n data['activity_stream'] = reverse('api:activity_stream_list', request=request)\n data['workflow_job_templates'] = reverse('api:workflow_job_template_list', request=request)\n data['workflow_jobs'] = reverse('api:workflow_job_list', request=request)\n data['workflow_approvals'] = reverse('api:workflow_approval_list', request=request)\n data['workflow_job_template_nodes'] = reverse('api:workflow_job_template_node_list', request=request)\n data['workflow_job_nodes'] = reverse('api:workflow_job_node_list', request=request)\n data['mesh_visualizer_view'] = reverse('api:mesh_visualizer_view', request=request)\n return Response(data)\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 503, "n_words": 181, "vocab_size": 96, "complexity": 1, "nloc": 46, "token_counts": 581, "n_ast_nodes": 1026, "n_identifiers": 8, "random_cut": "def get(self, request, format=None):\n \n data = OrderedDict()\n data['ping'] = reverse('api:api_v2_ping_view', request=request)\n data['instances'] = reverse('api:instance_list', request=request)\n data['instance_groups'] = reverse('api:instance_group_list', request=request)\n data['config'] = reverse('api:api_v2_config_view', request=request)\n data['settings'] = reverse('api:setting_category_list', request=request)\n data['me'] = reverse('api:user_me_list', request=request)\n data['dashboard'] = reverse('api:dashboard_view', request=request)\n data['organizations'] = reverse('api:organization_list', request=request)\n data['users'] = reverse('api:user_list', request=request)\n data['execution_environments'] = reverse('api:execution_environment_list', request=request)\n data['projects'] = reverse('api:project_list', request=request)\n data['project_updates'] = reverse('api:project_update_list', request=request)\n data['teams'] = reverse('api:team_list', request=request)\n data['credentials'] = reverse('api:credential_list', request=request)\n data['credential_types'] = reverse('api:credential_type_list', request=request)\n data['credential_input_sources'] = reverse('api:credential_input_source_list', request=request)\n data['applications'] = reverse('api:o_auth2_application_list', request=request)\n data['tokens'] = reverse('api:o_auth2_token_list', request=request)\n data['metrics'] = reverse('api:metrics_view', request=request)\n data['inventory'] = reverse('api:inventory_list', request=request)\n data['inventory_sources'] = reverse('api:inventory_source_list', request=request)\n data['inventory_updates'] = reverse('api:inventory_update_list', request=request)\n data['groups'] = reverse('api:group_list', request=request)\n data['hosts'] = reverse('api:host_list', request=request)\n data['job_templates'] = reverse('api:job_template_list', request=request)\n data['jobs'] = reverse('api:job_list', request=request)\n data['ad_hoc_commands'] = reverse('api:ad_hoc_command_list', request=request)\n data['system_job_templates'] = reverse('api:system_job_template_list', request=request)\n data['system_jobs'] = reverse('api:system_job_list', request=request)\n data['schedules'] = reverse('api:schedule_list', request=request)\n data['roles'] = reverse('api:role_list', request=request)\n data['notification_templates'] = reverse('api:notification_template_list', request=request)\n d" }, { "id": 305338, "commit_id": "cfa838b27aa08822bb1d46fcac50b72237a33505", "repo": "core", "path": "homeassistant/components/bmw_connected_drive/lock.py", "file_name": "lock.py", "fun_name": "_handle_coordinator_update", "commit_message": "Small refactoring of BMW lock entity (#77451)\n\n* Refactor entity_description\r\n\r\n* Fix default attrs not always shown\r\n\r\n* Simplify further\r\n\r\nCo-authored-by: @emontnemery\r\n\r\nCo-authored-by: rikroe ", "code": "def _handle_coordinator_update(self) -> None:\n \n _LOGGER.debug(\"Updating lock data of %s\", self.vehicle.name)\n # Set default attributes\n self._attr_extra_state_attributes = self._attrs\n\n # Only update the HA state machine if the vehicle reliably reports its lock state\n if self.door_lock_state_available:\n self._attr_is_locked = self.vehicle.doors_and_windows.door_lock_state in {\n LockState.LOCKED,\n LockState.SECURED,\n }\n self._attr_extra_state_attributes[\n \"door_lock_state\"\n ] = self.vehicle.doors_and_windows.door_lock_state.value\n\n super()._handle_coordinator_update()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 186, "n_words": 48, "vocab_size": 41, "complexity": 2, "nloc": 13, "token_counts": 77, "n_ast_nodes": 128, "n_identifiers": 17, "random_cut": "def _handle_coordinator_update(self) -> None:\n \n _LOGGER.debug(\"Updating lock data of %s\", self.vehicle.name)\n # Set default attributes\n self._attr_extra_state_attributes = self._attr" }, { "id": 121471, "commit_id": "88f2b5e86d36fbc9e0dcfba3eab5b56975148c20", "repo": "jax", "path": "jax/_src/callback.py", "file_name": "callback.py", "fun_name": "pure_callback_lowering", "commit_message": "Add functionality for \"pure\" callbacks\n\nAlso avoids using CPP dispatch path when host callbacks are involved\n\nPiperOrigin-RevId: 467270949", "code": "def pure_callback_lowering(ctx, *args, callback, **params):\n\n if ctx.module_context.platform == \"TPU\" and jaxlib.version < (0, 3, 15):\n raise NotImplementedError(\"Pure callbacks on TPU not supported. \"\n \"Please upgrade to a jaxlib >= 0.3.15.\")\n if isinstance(ctx.module_context.axis_context,\n (mlir.SPMDAxisContext, mlir.ShardingContext)):\n raise NotImplementedError(\"Sharding for pure callback not implemented.\")\n\n def _callback(*flat_args):\n return tuple(pure_callback_p.impl(*flat_args, callback=callback, **params))\n\n result, _, keepalive = mlir.emit_python_callback(\n ctx, _callback, None, list(args), ctx.avals_in, ctx.avals_out, False,\n sharding=None)\n ctx.module_context.add_keepalive(keepalive)\n return result\n\nmlir.register_lowering(pure_callback_p, pure_callback_lowering)\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 132, "n_words": 65, "vocab_size": 60, "complexity": 4, "nloc": 13, "token_counts": 112, "n_ast_nodes": 216, "n_identifiers": 30, "random_cut": "def pure_callback_lowering(ctx, *args, callback, **params):\n\n if ctx.module_context.platform == \"TPU\" and jaxlib.version < (0, 3, 15):\n raise NotImplementedError(\"Pure callbacks on TPU not supported. \"\n \"Please upgrade to a jaxlib >= 0.3.15.\")\n if isinstance(ctx.module_context.axis_context,\n (mlir.SPMDAxisContext, mlir.ShardingContext)):\n raise NotImplementedError(\"Sharding for pure callback not implemented.\")\n\n def _callback(*flat_args):\n return tuple(pure_callback_p.impl(*flat_args, callback=callback, **params))\n\n result, _, keepalive = mlir.emit_python_callback(\n ctx, _callback, None, list(args), ctx.avals_in, ctx.avals_out, False,\n sharding=None)\n ctx.module_context.add_keepalive(keepalive)\n return result\n\nmlir.register_lowering(pure_callback_p, pure_callback_lowering)\n\n" }, { "id": 176222, "commit_id": "149177564df37d991e10d3eb397fecfcc829a414", "repo": "networkx", "path": "networkx/tests/test_lazy_imports.py", "file_name": "test_lazy_imports.py", "fun_name": "test_lazy_attach", "commit_message": "Initial setup of lazy_import functions. (#4909)\n\n* Initial setup of lazy_import functions.\r\n\r\nStill needs:\r\n- way to handle pytest.importorskip --> soln is not to load delayed modules into sys.modules\r\n- Loader class instead of monkey patch on SourceFileLoader\r\n- Way to identify a lazily-loaded-not-yet-used module --> now create an instance of the Delayed Module class.\r\n\r\n* fix importorskip with new module class\r\n\r\n* Remove lazy_importorskip. Don't add Delayed reporting module to sys.modules\r\n\r\n* make tests work for pypy\r\n\r\n* refactor to include changes from skimage.lazy\r\n\r\n* fix test handling of types.ModuleType\r\n\r\n* Change name from nx.lazy_imports.load to nx.lazy_import\r\n\r\n* fix tests to use new name. keep `attach` name as is.", "code": "def test_lazy_attach():\n name = \"mymod\"\n submods = [\"mysubmodule\", \"anothersubmodule\"]\n myall = {\"not_real_submod\": [\"some_var_or_func\"]}\n\n locls = {\n \"attach\": lazy.attach,\n \"name\": name,\n \"submods\": submods,\n \"myall\": myall,\n }\n s = \"__getattr__, __lazy_dir__, __all__ = attach(name, submods, myall)\"\n\n exec(s, {}, locls)\n expected = {\n \"attach\": lazy.attach,\n \"name\": name,\n \"submods\": submods,\n \"myall\": myall,\n \"__getattr__\": None,\n \"__lazy_dir__\": None,\n \"__all__\": None,\n }\n assert locls.keys() == expected.keys()\n for k, v in expected.items():\n if v is not None:\n assert locls[k] == v\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 200, "n_words": 73, "vocab_size": 50, "complexity": 3, "nloc": 25, "token_counts": 127, "n_ast_nodes": 220, "n_identifiers": 14, "random_cut": "def test_lazy_attach():\n name = \"mymod\"\n submods = [\"mysubmodule\", \"anothersubmodule\"]\n myall = {\"not_real_submod\": [\"some_var_or_func\"]}\n\n locls = {\n \"attach\": lazy.attach,\n \"name\": name,\n \"submods\": submods,\n \"myall\": myall,\n }\n s = \"__getattr__, __lazy_dir__, __all__ = attach(name, submods, myall)\"\n\n exec(s, {}, locls)\n expected = {\n \"attach\": lazy.attach,\n \"name\": name,\n \"submods\": submods" }, { "id": 212842, "commit_id": "b3680477c755277192715b343e9cd4254de7c45e", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "_user_bind_callback", "commit_message": "Added propagate parameter to the Element.bind and Window.bind methods. Indicates whether tkinter should propagate the event to the corresponding element/window or stop with the user callback", "code": "def _user_bind_callback(self, bind_string, event, propagate=True):\n \n key_suffix = self.user_bind_dict.get(bind_string, '')\n self.user_bind_event = event\n if self.Type == ELEM_TYPE_GRAPH:\n self._update_position_for_returned_values(event)\n if self.Key is not None:\n if isinstance(self.Key, str):\n key = self.Key + str(key_suffix)\n else:\n key = (self.Key, key_suffix) # old way (pre 2021) was to make a brand new tuple\n # key = self.Key + (key_suffix,) # in 2021 tried this. It will break existing applications though - if key is a tuple, add one more item\n else:\n key = bind_string\n\n self._generic_callback_handler(force_key_to_be=key)\n\n return 'break' if propagate is not True else None\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 236, "n_words": 88, "vocab_size": 65, "complexity": 5, "nloc": 14, "token_counts": 105, "n_ast_nodes": 169, "n_identifiers": 18, "random_cut": "def _user_bind_callback(self, bind_string, event, propagate=True):\n \n key_suffix = self.user_bind_dict.get(bind_string, '')\n self.user_bind_event = event\n if self.Type == ELEM_TYPE_GRAPH:\n self._update_position_for_returned_valu" }, { "id": 138632, "commit_id": "bb4e5cb70a53a50654211136e5bff26dfdfc25a7", "repo": "ray", "path": "rllib/execution/train_ops.py", "file_name": "train_ops.py", "fun_name": "multi_gpu_train_one_step", "commit_message": "[RLlib] CQL: training iteration function. (#24166)", "code": "def multi_gpu_train_one_step(trainer, train_batch) -> Dict:\n \n config = trainer.config\n workers = trainer.workers\n local_worker = workers.local_worker()\n num_sgd_iter = config.get(\"num_sgd_iter\", 1)\n sgd_minibatch_size = config.get(\"sgd_minibatch_size\", config[\"train_batch_size\"])\n\n # Determine the number of devices (GPUs or 1 CPU) we use.\n num_devices = int(math.ceil(config[\"num_gpus\"] or 1))\n\n # Make sure total batch size is dividable by the number of devices.\n # Batch size per tower.\n per_device_batch_size = sgd_minibatch_size // num_devices\n # Total batch size.\n batch_size = per_device_batch_size * num_devices\n assert batch_size % num_devices == 0\n assert batch_size >= num_devices, \"Batch size too small!\"\n\n # Handle everything as if multi-agent.\n train_batch = train_batch.as_multi_agent()\n\n # Load data into GPUs.\n load_timer = trainer._timers[LOAD_BATCH_TIMER]\n with load_timer:\n num_loaded_samples = {}\n for policy_id, batch in train_batch.policy_batches.items():\n # Not a policy-to-train.\n if not local_worker.is_policy_to_train(policy_id, train_batch):\n continue\n\n # Decompress SampleBatch, in case some columns are compressed.\n batch.decompress_if_needed()\n\n # Load the entire train batch into the Policy's only buffer\n # (idx=0). Policies only have >1 buffers, if we are training\n # asynchronously.\n num_loaded_samples[policy_id] = local_worker.policy_map[\n policy_id\n ].load_batch_into_buffer(batch, buffer_index=0)\n\n # Execute minibatch SGD on loaded data.\n learn_timer = trainer._timers[LEARN_ON_BATCH_TIMER]\n with learn_timer:\n # Use LearnerInfoBuilder as a unified way to build the final\n # results dict from `learn_on_loaded_batch` call(s).\n # This makes sure results dicts always have the same structure\n # no matter the setup (multi-GPU, multi-agent, minibatch SGD,\n # tf vs torch).\n learner_info_builder = LearnerInfoBuilder(num_devices=num_devices)\n\n for policy_id, samples_per_device in num_loaded_samples.items():\n policy = local_worker.policy_map[policy_id]\n num_batches = max(1, int(samples_per_device) // int(per_device_batch_size))\n logger.debug(\"== sgd epochs for {} ==\".format(policy_id))\n for _ in range(num_sgd_iter):\n permutation = np.random.permutation(num_batches)\n for batch_index in range(num_batches):\n # Learn on the pre-loaded data in the buffer.\n # Note: For minibatch SGD, the data is an offset into\n # the pre-loaded entire train batch.\n results = policy.learn_on_loaded_batch(\n permutation[batch_index] * per_device_batch_size, buffer_index=0\n )\n\n learner_info_builder.add_learn_on_batch_results(results, policy_id)\n\n # Tower reduce and finalize results.\n learner_info = learner_info_builder.finalize()\n\n load_timer.push_units_processed(train_batch.count)\n learn_timer.push_units_processed(train_batch.count)\n\n # TODO: Move this into Trainer's `training_iteration` method for\n # better transparency.\n trainer._counters[NUM_ENV_STEPS_TRAINED] += train_batch.count\n trainer._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps()\n\n return learner_info\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 823, "n_words": 315, "vocab_size": 207, "complexity": 7, "nloc": 60, "token_counts": 317, "n_ast_nodes": 544, "n_identifiers": 58, "random_cut": "def multi_gpu_train_one_step(trainer, train_batch) -> Dict:\n \n config = trainer.config\n workers = trainer.workers\n local_worker = workers.local_worker()\n num_sgd_iter = config.get(\"num_sgd_iter\", 1)\n sgd_minibatch_size = config.get(\"sgd_minibatch_size\", config[\"train_batch_size\"])\n\n # Determine the number of devices (GPUs or 1 CPU) we use.\n num_devices = int(math." }, { "id": 97138, "commit_id": "c41d106cdf3f0aa7b368901aae64481b8f185809", "repo": "sentry", "path": "src/sentry/api/endpoints/organization_member_details.py", "file_name": "organization_member_details.py", "fun_name": "_serialize_member", "commit_message": "ref(endpoints): Pass OrganizationMember through `convert_args()` (#32718)", "code": "def _serialize_member(self, member, request, allowed_roles=None):\n context = serialize(member, serializer=OrganizationMemberWithTeamsSerializer())\n\n if request.access.has_scope(\"member:admin\"):\n context[\"invite_link\"] = member.get_invite_link()\n context[\"user\"] = serialize(member.user, request.user, DetailedUserSerializer())\n\n context[\"isOnlyOwner\"] = member.is_only_owner()\n context[\"roles\"] = serialize(\n roles.get_all(), serializer=RoleSerializer(), allowed_roles=allowed_roles\n )\n\n return context\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 105, "n_words": 31, "vocab_size": 26, "complexity": 2, "nloc": 10, "token_counts": 99, "n_ast_nodes": 161, "n_identifiers": 18, "random_cut": "def _serialize_member(self, member, request, allowed_roles=None):\n context = serialize(member, serializer=OrganizationMemberWithTeamsSerializer())\n\n if request.access.has_scope(\"member:admin\"):\n context[\"invite_link\"] = member.get_invite_link()\n context[\"user\"] = serialize(member.user, request.user, DetailedUserSerializer())\n\n context[\"isOnlyOwner\"] = member.is_only_owner()\n context[\"roles\"] = serialize(\n roles.get_all(), serializer=RoleSerializer(), allowed_roles=allowed_roles\n )\n\n return context\n" }, { "id": 203054, "commit_id": "827bc0704761f2c985539d98165420d4fcc0d682", "repo": "django", "path": "django/contrib/admindocs/utils.py", "file_name": "utils.py", "fun_name": "_find_groups", "commit_message": "Refs #28135 -- Refactored out _find_groups()/_get_group_start_end() hooks in admindocs.", "code": "def _find_groups(pattern, group_matcher):\n prev_end = None\n for match in group_matcher.finditer(pattern):\n if indices := _get_group_start_end(match.start(0), match.end(0), pattern):\n start, end = indices\n if prev_end and start > prev_end or not prev_end:\n yield start, end, match\n prev_end = end\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 96, "n_words": 36, "vocab_size": 26, "complexity": 6, "nloc": 8, "token_counts": 66, "n_ast_nodes": 102, "n_identifiers": 10, "random_cut": "def _find_groups(pattern, group_matcher):\n prev_end = None\n for match in group_matcher.finditer(pattern):\n if indices := _get_group_start_end(match.start(0), match.end(0), pattern):\n start, end = indices\n if prev_end and start > prev" }, { "id": 219679, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "logical_xor", "commit_message": "add python 3.10.4 for windows", "code": "def logical_xor(self, other, context=None):\n \n if context is None:\n context = getcontext()\n\n other = _convert_other(other, raiseit=True)\n\n if not self._islogical() or not other._islogical():\n return context._raise_error(InvalidOperation)\n\n # fill to context.prec\n (opa, opb) = self._fill_logical(context, self._int, other._int)\n\n # make the operation, and clean starting zeroes\n result = \"\".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])\n return _dec_from_triple(0, result.lstrip('0') or '0', 0)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 139, "n_words": 54, "vocab_size": 45, "complexity": 6, "nloc": 9, "token_counts": 122, "n_ast_nodes": 197, "n_identifiers": 23, "random_cut": "def logical_xor(self, other, context=None):\n \n if context is None:\n context = getcontext()\n\n other = _convert_other(other, raiseit=True)\n\n if not self._islogical() or not other._islogical():\n return context._raise_error(InvalidOperation)\n\n # fill to context.prec\n (opa, opb) = self._fill_logical(context, self._int, other._int)\n\n # make the operation, and clean starting zeroes\n result = \"\".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])\n return _dec_from_tripl" }, { "id": 147034, "commit_id": "e02577adb7d04180f59ffe13294ac0c727db1a23", "repo": "ray", "path": "python/ray/serve/handle.py", "file_name": "handle.py", "fun_name": "__repr__", "commit_message": "[Pipeline] Add and use RayServeLazyHandle for DAG deployment args (#23256)", "code": "def __repr__(self):\n return f\"{self.__class__.__name__}\" f\"(deployment='{self.deployment_name}')\"\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def __repr__(self):\n return f\"{self.__class__.__name__}\" f\"(deployment='{self." }, { "id": 7703, "commit_id": "03b4ab273abd7e22a56bb550b56f3d667200abf9", "repo": "ludwig", "path": "tests/integration_tests/test_ray.py", "file_name": "test_ray.py", "fun_name": "test_train_gpu_load_cpu", "commit_message": "Encoder refactor V2 (#2370)\n\n* Added base files and some initial code\r\n\r\n* More files created, fleshing out binary feature and corresponding encoders\r\n\r\n* Added more schema infra\r\n\r\n* Registered all feature encoders\r\n\r\n* Separated feature utils infra\r\n\r\n* Added all preprocessing classes\r\n\r\n* Filled out rest of schema configs\r\n\r\n* Fixed preproc dataclass\r\n\r\n* Fixed small errors blocking import\r\n\r\n* Tests should be passing\r\n\r\n* Deleted unnecesssary files and removed commented out code\r\n\r\n* fixed flake8\r\n\r\n* Fixed most tests\r\n\r\n* fixed pattern validation\r\n\r\n* Fixed missing val strategies and solved custom encoder update issue\r\n\r\n* Removed preprocessing from features due to schema SSOT\r\n\r\n* fix flake 8\r\n\r\n* Started encoder schema work\r\n\r\n* Parallel CNN Encoder\r\n\r\n* StackedCNN Encoder\r\n\r\n* Added image encoders\r\n\r\n* Finished sequence encoders\r\n\r\n* Partway through text encoders\r\n\r\n* Added text encoders\r\n\r\n* Bag Encoders\r\n\r\n* Binary and Date Encoders\r\n\r\n* category, date, h3, and set encoders\r\n\r\n* Wired up encoder schemas\r\n\r\n* Switched input feature encoder schema definitions\r\n\r\n* Fixed handful of issues\r\n\r\n* Fix schema issues\r\n\r\n* Refactored a bunch of test configs\r\n\r\n* Small changes\r\n\r\n* Removed default param from register_encoder\r\n\r\n* Schema working now, working on refactoring\r\n\r\n* Finished decoder schemas\r\n\r\n* Removed default param from register_decoder\r\n\r\n* Added some default params to output features and more decoder work\r\n\r\n* Refactored all input feature encoder/decoder referencing\r\n\r\n* Refactored pretty much all the tests\r\n\r\n* Added back constants\r\n\r\n* Solved gbm issue\r\n\r\n* Fixed save_load test\r\n\r\n* various fixes\r\n\r\n* Fixed import issue\r\n\r\n* Flake 8 and various fixes\r\n\r\n* Solved more failed tests\r\n\r\n* Refactored missed tests\r\n\r\n* Removed commented lines\r\n\r\n* Added init file for decoders schema\r\n\r\n* Fixed failing tests\r\n\r\n* Fixed hyperopt shared params test\r\n\r\n* Added backwards compatability logic and test\r\n\r\n* Flake 8\r\n\r\n* removed comment\r\n\r\n* Added base files and some initial code\r\n\r\n* More files created, fleshing out binary feature and corresponding encoders\r\n\r\n* Added more schema infra\r\n\r\n* Registered all feature encoders\r\n\r\n* Separated feature utils infra\r\n\r\n* Added all preprocessing classes\r\n\r\n* Filled out rest of schema configs\r\n\r\n* Fixed preproc dataclass\r\n\r\n* Fixed small errors blocking import\r\n\r\n* Tests should be passing\r\n\r\n* Deleted unnecesssary files and removed commented out code\r\n\r\n* fixed flake8\r\n\r\n* Fixed most tests\r\n\r\n* fixed pattern validation\r\n\r\n* Fixed missing val strategies and solved custom encoder update issue\r\n\r\n* Removed preprocessing from features due to schema SSOT\r\n\r\n* fix flake 8\r\n\r\n* Started encoder schema work\r\n\r\n* Parallel CNN Encoder\r\n\r\n* StackedCNN Encoder\r\n\r\n* Added image encoders\r\n\r\n* Finished sequence encoders\r\n\r\n* Partway through text encoders\r\n\r\n* Added text encoders\r\n\r\n* Bag Encoders\r\n\r\n* Binary and Date Encoders\r\n\r\n* category, date, h3, and set encoders\r\n\r\n* Wired up encoder schemas\r\n\r\n* Switched input feature encoder schema definitions\r\n\r\n* Fixed handful of issues\r\n\r\n* Fix schema issues\r\n\r\n* Refactored a bunch of test configs\r\n\r\n* Small changes\r\n\r\n* Removed default param from register_encoder\r\n\r\n* Schema working now, working on refactoring\r\n\r\n* Finished decoder schemas\r\n\r\n* Removed default param from register_decoder\r\n\r\n* Added some default params to output features and more decoder work\r\n\r\n* Refactored all input feature encoder/decoder referencing\r\n\r\n* Refactored pretty much all the tests\r\n\r\n* Added back constants\r\n\r\n* Solved gbm issue\r\n\r\n* Fixed save_load test\r\n\r\n* various fixes\r\n\r\n* Fixed import issue\r\n\r\n* Flake 8 and various fixes\r\n\r\n* Solved more failed tests\r\n\r\n* Refactored missed tests\r\n\r\n* Removed commented lines\r\n\r\n* Added init file for decoders schema\r\n\r\n* Fixed failing tests\r\n\r\n* Fixed hyperopt shared params test\r\n\r\n* Added backwards compatability logic and test\r\n\r\n* Flake 8\r\n\r\n* removed comment\r\n\r\n* Skipping CTRL Encoder test since it's blasting memory\r\n\r\n* Fixed audio_feature test\r\n\r\n* Addressed failing tests\r\n\r\n* Fixed backwards compatability\r\n\r\n* Fixed more failing tests\r\n\r\n* Flake 8\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Refactored default logic for all features\r\n\r\n* Fixed H3 weighted_sum encoder wrong type\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix import issue\r\n\r\n* Mark slow HF tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed defaults tests\r\n\r\n* Pin Ray nightly version\r\n\r\n* fix link\r\n\r\n* pin torch to 07/26\r\n\r\n* cleanup\r\n\r\n* upgrade ray pinned version to enable parquet partition filtering\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* downgrade Ray to ensure TensorDtypes are not inferred during Ray Dataset <=> Dask conversions\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Removed custom encoder decoder helper method\r\n\r\n* unpin torch\r\n\r\n* Flake 8\r\n\r\n* Daniel feedback\r\n\r\n* Small fixes\r\n\r\n* Fixed default weights init\r\n\r\n* Added test with encoder dependencies for global defaults\r\n\r\n* Fixed Arnav's test\r\n\r\n* Addressed Arnav's feedback\r\n\r\n* Address nit\r\n\r\n* Addressed feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address nit\r\n\r\n* Fix test\r\n\r\n* Initial feedback refactor\r\n\r\n* More refactoring\r\n\r\n* Added vocab field to all text_encoder configs\r\n\r\n* More refactoring\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix audio feature test, also s/logging/logger.\r\n\r\n* param names should start with lowercase s/N/n\r\n\r\n* Re-added schema utils used in encoder refactor.\r\n\r\n* Removes unused overwrite_defaults()\r\n\r\n* Oops, name is passed to feature as a kwarg not a member of the feature config. Why? Probably should change that.\r\n\r\n* Change lowercase default back to True. Fixes test_strings_utils\r\n\r\n* Set feature validation error with output size 1.\r\n\r\n* MLP mixer encoder needs num_channels.\r\n\r\n* Use schema.dump instead of .__dict__ to convert marshmallow dataclass to dict\r\n\r\n* (x,) in python is a tuple with a single element x. Watch out for this when defining schemas.\r\n\r\n* Construct features by using build_single_input/output to share code for deserializing feature configs. Also changes ECD to BaseModel, IMO its confusing to import ECD to use a class method from BaseModel.\r\n\r\n* Fix test_trainer_utils, adds convenience method BaseFeature.load_from_dictionary\r\n\r\n* Use feature load_from_dictionary instead of BaseModel in feature tests.\r\n\r\n* Populate encoder and decoder types in shared test fixtures, fixes error expectations in test_validate_config_combiner.py\r\n\r\n* Fixes test_validate_config_misc.py by ensuring only one option of OneOf allows None, because OneOf fails validation if more than one condition match.\r\n\r\n* Updates test_defaults.py\r\n\r\n* Adds type, column, proc_column to feature schemas. Revert feature tests by passing in config dict again.\r\n\r\n* decorate feature base classes with @dataclass, fixes failure building input features in trainer.\r\n\r\n* Implement _serialize for PreprocessingDataclassField.\r\n\r\n* use type(feature) to get schema class.\r\n\r\n* Fix test_trainer_utils.py\r\n\r\n* audio_feature requires embedding_size, but passthrough encoder does not have this property. Technically, passthrough encoder is not supported for audio features.\r\n\r\n* Wow, apparently the order of elements in the oneOf affects which error message we get from jsonschema.\r\n\r\n* Get default encoders from feature schema.\r\n\r\n* Get encoder defaults from schema in config_utils.py\r\n\r\n* Make number feature allow decoders without clip property\r\n\r\n* s/list/List\r\n\r\n* Adds reduce_output to h3 encoder.\r\n\r\n* Moves decoder params into nested decoder.\r\n\r\n* Update processing parameters with computed_fill_value.\r\n\r\n* Removes test code.\r\n\r\n* Adds input_size to decoder base because some features assume decoders have an input_size\r\n\r\n* dense encoder not supported for bag features, changed to embed.\r\n\r\n* Adds input_size param to dense encoder schema, since its a required parameter of dense encoder.\r\n\r\n* Fixes vector feature input_size in encoder metadata.\r\n\r\n* Fixes test reducers, set sequence reduce mode in output feature base.\r\n\r\n* Don't nest encoder parameters in decoder\r\n\r\n* Fixes test_torchscript, get num_classes from encoder config.\r\n\r\n* Audio feature padding is float, not int.\r\n\r\n* Adds temp check for threshold to fix GBM tests.\r\n\r\n* Adds missing value strategy drop_row for vector feature in test.\r\n\r\n* Drop row should work even if computed_fill_value is an empty string\r\n\r\n* Removes duplicated TOP_K constant.\r\n\r\n* Consolidated set_default_values\r\n\r\n* Removes commented-out defaults.\r\n\r\n* Remove load_config from OutputFeature, it isn't doing anything here.\r\n\r\n* Removes comment.\r\n\r\n* Fix type annotations for input/output feature constructors.\r\n\r\n* Fixes output feature dependencies being ignored.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Adds test for construction of output features with dependencies.\r\n\r\n* Encoder/Decoder config now lives on encoder/decoder object\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixes decoder params to match their respective classes. Moves fc_stack params and threshold back to output feature.\r\n\r\n* Make clip property of number output feature again.\r\n\r\n* Adds threshold property to set feature schema, use this property instead of storing it in the decoder.\r\n\r\n* input_size in output_feature instead of decoder.\r\n\r\n* Made vector_size property of vector_feature.\r\n\r\n* Fixed gbm tests\r\n\r\n* Fixed flake 8\r\n\r\n* Re-adds num_classes as member of category output feature.\r\n\r\n* Makes vocab_size match vocab used in preprocessing.\r\n\r\n* num_classes in CategoryOutputFeature.\r\n\r\n* Moves num_classes from decoder to category output feature.\r\n\r\n* Fixes test_model_training_options. Copies fc_layer keys into decoder if they are present on output features.\r\n\r\n* Adds field descriptors for fc_layers params in BaseOutputFeatureConfig.\r\n\r\nCo-authored-by: connor-mccorm \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: connor-mccorm <97468934+connor-mccorm@users.noreply.github.com>\r\nCo-authored-by: Geoffrey Angus \r\nCo-authored-by: Arnav Garg \r\nCo-authored-by: Daniel Treiman ", "code": "def test_train_gpu_load_cpu():\n input_features = [\n category_feature(encoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n number_feature(normalization=\"zscore\"),\n ]\n output_features = [\n binary_feature(),\n ]\n run_test_with_features(input_features, output_features, run_fn=_run_train_gpu_load_cpu, num_gpus=1)\n\n\n@pytest.mark.distributed\n@pytest.mark.parametrize(\n \"method, balance\",\n [\n (\"oversample_minority\", 0.25),\n (\"oversample_minority\", 0.5),\n (\"oversample_minority\", 0.75),\n (\"undersample_majority\", 0.25),\n (\"undersample_majority\", 0.5),\n (\"undersample_majority\", 0.75),\n ],\n)", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@pytest.mark.distributed\n@pytest.mark.parametrize(\n \"method, balance\",\n [\n (\"oversample_minority\", 0.25),\n (\"oversample_minority\", 0.5),\n (\"oversample_minority\", 0.75),\n (\"undersample_majority\", 0.25),\n (\"undersample_majority\", 0.5),\n (\"undersample_majority\", 0.75),\n ],\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 121, "n_words": 38, "vocab_size": 27, "complexity": 1, "nloc": 9, "token_counts": 52, "n_ast_nodes": 169, "n_identifiers": 17, "random_cut": "def test_train_gpu_load_cpu():\n input_features = [\n category_feature(encoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n number_feature(normalization=\"zscore\"),\n ]\n output_features = [\n binary_feature(),\n ]\n run_test_with_features(input_features, output_features, run_fn=_run_train_gpu_load_cpu, num_gpus=1)\n\n\n@pytest.mark" }, { "id": 206299, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/response.py", "file_name": "response.py", "fun_name": "__iter__", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def __iter__(self):\n if not self._is_rendered:\n raise ContentNotRenderedError(\n \"The response content must be rendered before it can be iterated over.\"\n )\n return super().__iter__()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 72, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 6, "token_counts": 24, "n_ast_nodes": 43, "n_identifiers": 5, "random_cut": "def __iter__(self):\n if not self._is_rendered:\n raise ContentNotRenderedError(\n \"The response content must be rendered before it can be iterated over.\"\n )\n return super().__iter__()\n" }, { "id": 74682, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/tests.py", "file_name": "tests.py", "fun_name": "test_bad_slugurl", "commit_message": "Reformat with black", "code": "def test_bad_slugurl(self):\n # no 'request' object in context\n result = slugurl(template.Context({}), \"bad-slug-doesnt-exist\")\n self.assertIsNone(result)\n\n # 'request' object in context, but no 'site' attribute\n result = slugurl(\n context=template.Context({\"request\": HttpRequest()}),\n slug=\"bad-slug-doesnt-exist\",\n )\n self.assertIsNone(result)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 100, "n_words": 30, "vocab_size": 22, "complexity": 1, "nloc": 8, "token_counts": 55, "n_ast_nodes": 96, "n_identifiers": 10, "random_cut": "def test_bad_slugurl(self):\n # no 'request' object in context\n result = slugurl(template.Context({}), \"bad-slug-doesnt-exist\")\n self.assertIsNone(result)\n\n # 'request' object in context, but no 'site' attribute\n result = slu" }, { "id": 250089, "commit_id": "3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b", "repo": "synapse", "path": "tests/storage/test_account_data.py", "file_name": "test_account_data.py", "fun_name": "test_invalid_data", "commit_message": "Require types in tests.storage. (#14646)\n\nAdds missing type hints to `tests.storage` package\r\nand does not allow untyped definitions.", "code": "def test_invalid_data(self) -> None:\n \n # Add some data and ensure it is there.\n self._update_ignore_list(\"@other:test\")\n self.assert_ignored(self.user, {\"@other:test\"})\n self.assert_ignorers(\"@other:test\", {self.user})\n\n # No ignored_users key.\n self.get_success(\n self.store.add_account_data_for_user(\n self.user,\n AccountDataTypes.IGNORED_USER_LIST,\n {},\n )\n )\n\n # No one ignores the user now.\n self.assert_ignored(self.user, set())\n self.assert_ignorers(\"@other:test\", set())\n\n # Add some data and ensure it is there.\n self._update_ignore_list(\"@other:test\")\n self.assert_ignored(self.user, {\"@other:test\"})\n self.assert_ignorers(\"@other:test\", {self.user})\n\n # Invalid data.\n self.get_success(\n self.store.add_account_data_for_user(\n self.user,\n AccountDataTypes.IGNORED_USER_LIST,\n {\"ignored_users\": \"unexpected\"},\n )\n )\n\n # No one ignores the user now.\n self.assert_ignored(self.user, set())\n self.assert_ignorers(\"@other:test\", set())\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 357, "n_words": 76, "vocab_size": 37, "complexity": 1, "nloc": 26, "token_counts": 161, "n_ast_nodes": 276, "n_identifiers": 12, "random_cut": "def test_invalid_data(self) -> None:\n \n # Add some data and ensure it is there.\n self._update_ignore_list(\"@other:test\")\n self.assert_ignored(self.user, {\"@other:test\"})\n self.assert_ignorers(\"@other:test\", {self.user})\n\n # No ignored_users key.\n self.get_success(\n self.store.add_account_data_for_user(\n self.user,\n AccountDataTypes.IGNORED_USER_LIST,\n {},\n )\n )\n\n # No one ignores the user now.\n self.assert_ignored(self.user, set())\n self.assert_ignorers(\"@other:test\", set())\n\n # Add some data and ensure it is there.\n self._update_ignore_list(\"@other:test\")\n self.assert_ignored(self.user, {\"@other:test\"})\n self.assert_ignorers(\"@other:test\", {self.user})\n\n # Invalid data.\n self.get_success(\n self.store.add_account_data_for_user(\n self.user,\n AccountDataTypes.IGNORED_USER_LIST,\n {\"ignored_users\": \"unexpected\"},\n )\n )\n\n # No one ignores the user now.\n sel" }, { "id": 65155, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/bank_clearance_summary/bank_clearance_summary.py", "file_name": "bank_clearance_summary.py", "fun_name": "get_conditions", "commit_message": "style: format code with black", "code": "def get_conditions(filters):\n\tconditions = \"\"\n\n\tif filters.get(\"from_date\"):\n\t\tconditions += \" and posting_date>=%(from_date)s\"\n\tif filters.get(\"to_date\"):\n\t\tconditions += \" and posting_date<=%(to_date)s\"\n\n\treturn conditions\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 14, "n_words": 21, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 32, "n_ast_nodes": 63, "n_identifiers": 4, "random_cut": "def get_conditions(filters):\n\tconditions = \"\"\n\n\tif filters.get(\"from_date\"):\n\t\tconditions += \" and posting_date>=%(from_date)s\"\n\tif filters.get(\"to_date\"):\n\t\tconditions += \" and posting_date<=%(to_date)s" }, { "id": 108353, "commit_id": "4ffe491d514e89c4daab4f5630de84ac31d8bd12", "repo": "matplotlib", "path": "lib/matplotlib/cm.py", "file_name": "cm.py", "fun_name": "register_cmap", "commit_message": "STY: Fix typos in colormap\n\nPR 22298 got merged with typos before PR 22777, which introduced the\ncodespell pre-commit hook, which would have caught them. This simply\nfixes those typos.", "code": "def register_cmap(name=None, cmap=None, *, override_builtin=False):\n \n _api.check_isinstance((str, None), name=name)\n if name is None:\n try:\n name = cmap.name\n except AttributeError as err:\n raise ValueError(\"Arguments must include a name or a \"\n \"Colormap\") from err\n # override_builtin is allowed here for backward compatibility\n # this is just a shim to enable that to work privately in\n # the global ColormapRegistry\n _colormaps._allow_override_builtin = override_builtin\n _colormaps.register(cmap, name=name, force=override_builtin)\n _colormaps._allow_override_builtin = False\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 157, "n_words": 66, "vocab_size": 53, "complexity": 3, "nloc": 11, "token_counts": 81, "n_ast_nodes": 133, "n_identifiers": 14, "random_cut": "def register_cmap(name=None, cmap=None, *, override_builtin=False):\n \n _api.check_isinstance((str, None), name=name)\n if name is None:\n try:\n name = cmap.name\n except AttributeError as err:\n raise ValueError(\"Arguments must include a name or a \"\n \"Colormap\") from err\n # override_builtin is allowed here for backward compatibility\n # this is just a shim to enable that to work privately in\n # the global ColormapRegistry\n _colormaps._allow_override_builtin = override_builtin\n _colormaps.register(cmap, name=name, force=over" }, { "id": 215592, "commit_id": "43277294a3454e5dcd9079e005f747bf880801f6", "repo": "salt", "path": "salt/transport/tcp.py", "file_name": "tcp.py", "fun_name": "_connect", "commit_message": "Test fix", "code": "def _connect(self):\n \n while True:\n if self._closing:\n break\n try:\n kwargs = {}\n if self.source_ip or self.source_port:\n if salt.ext.tornado.version_info >= (4, 5):\n ### source_ip and source_port are supported only in Tornado >= 4.5\n # See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html\n # Otherwise will just ignore these args\n kwargs = {\n \"source_ip\": self.source_ip,\n \"source_port\": self.source_port,\n }\n else:\n log.warning(\n \"If you need a certain source IP/port, consider upgrading\"\n \" Tornado >= 4.5\"\n )\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n self._stream = yield self._tcp_client.connect(\n self.host, self.port, ssl_options=self.opts.get(\"ssl\"), **kwargs\n )\n self._connecting_future.set_result(True)\n break\n except Exception as exc: # pylint: disable=broad-except\n log.warning(\n \"TCP Message Client encountered an exception while connecting to\"\n \" %s:%s: %r, will reconnect in %d seconds\",\n self.host,\n self.port,\n exc,\n self.backoff,\n )\n yield salt.ext.tornado.gen.sleep(self.backoff)\n # self._connecting_future.set_exception(exc)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 792, "n_words": 112, "vocab_size": 90, "complexity": 7, "nloc": 33, "token_counts": 168, "n_ast_nodes": 284, "n_identifiers": 31, "random_cut": "def _connect(self):\n \n while True:\n if self._closing:\n break\n try:\n " }, { "id": 145251, "commit_id": "36a31cb6fde95d490c81c6de5d9f911b4cac8af2", "repo": "ray", "path": "dashboard/modules/usage_stats/usage_stats_head.py", "file_name": "usage_stats_head.py", "fun_name": "_report_usage", "commit_message": "[Usage Stats] Implement usage stats report \"Turned off by default\". (#22249)\n\nThis is the second PR to implement usage stats on Ray. Please refer to the file usage_lib.py for more details.\r\n\r\nThe full specification is here https://docs.google.com/document/d/1ZT-l9YbGHh-iWRUC91jS-ssQ5Qe2UQ43Lsoc1edCalc/edit#heading=h.17dss3b9evbj.\r\n\r\nThis adds a dashboard module to enable usage stats. **Usage stats report is turned off by default** after this PR. We can control the report (enablement, report period, and URL. Note that URL is strictly for testing) using the env variable. \r\n\r\n## NOTE\r\nThis requires us to add `requests` to the default library. `requests` must be okay to be included because\r\n1. it is extremely lightweight. It is implemented only with built-in libs.\r\n2. It is really stable. The project basically claims they are \"deprecated\", meaning no new features will be added there.\r\n\r\ncc @edoakes @richardliaw for the approval\r\n\r\nFor the HTTP request, I was alternatively considered httpx, but it was not as lightweight as `requests`. So I decided to implement async requests using the thread pool.", "code": "async def _report_usage(self):\n if not ray_usage_lib._usage_stats_enabled():\n return\n\n \n try:\n data = ray_usage_lib.generate_report_data(\n self.cluster_metadata,\n self.total_success,\n self.total_failed,\n self.seq_no,\n )\n error = None\n try:\n await self.client.report_usage_data_async(\n ray_usage_lib._usage_stats_report_url(), data\n )\n except Exception as e:\n logger.info(f\"Usage report request failed. {e}\")\n error = str(e)\n self.total_failed += 1\n else:\n self.total_success += 1\n finally:\n self.seq_no += 1\n\n data = ray_usage_lib.generate_write_data(data, error)\n await self.client.write_usage_data_async(data, self.session_dir)\n\n except Exception as e:\n logger.exception(e)\n logger.info(f\"Usage report failed: {e}\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 408, "n_words": 64, "vocab_size": 44, "complexity": 5, "nloc": 34, "token_counts": 138, "n_ast_nodes": 236, "n_identifiers": 23, "random_cut": "async def _report_usage(self):\n if not ray_usage_lib._usage_stats_enabled():\n return\n\n \n try:\n data = ray_usage_lib.generate_report_data(\n self.cluster_metadata,\n self.total_success,\n self.total_failed,\n self.seq_no,\n )\n error = None\n try:\n await self.client.report_usage_data_async(\n ray_usage_lib._usage_stats_report_url(), data\n )\n except Exception as e:\n logger.info(f\"Usage report request failed. {e}\")\n error = str(e)\n self.total_failed += 1\n else:\n self.total_success += 1\n finally:\n self.seq_no += 1\n\n data = ray_usage_lib.generate_write_data(data, error)\n await self.client.write_usage_data_async(data, self.session_dir)\n\n except Exception as e:\n logger.exception(e)\n logger.info(f\"Usage report failed: {e}\")\n" }, { "id": 110638, "commit_id": "b4e9e3131cdd7f1ad33ea06e21e7d3e51762af91", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_gtk4.py", "file_name": "backend_gtk4.py", "fun_name": "key_release_event", "commit_message": "Separately track modifier keys for mouse events.\n\nWhether the event modifiers are directly available on enter/leave events\ndepends on the backend, but all are handled here (except possibly for\nmacos, which I haven't checked).", "code": "def key_release_event(self, controller, keyval, keycode, state):\n KeyEvent(\n \"key_release_event\", self, self._get_key(keyval, keycode, state),\n *self._mpl_coords(),\n )._process()\n return True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 58, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 44, "n_ast_nodes": 64, "n_identifiers": 10, "random_cut": "def key_release_event(self, controller, keyval, keycode, state):\n KeyEvent(\n \"key_release_event\", self, self._get_key(keyval, keycode, state),\n *self._mpl_coords(),\n )._process()\n ret" }, { "id": 161603, "commit_id": "1b3d0ae1de9528af651ff01069e010bf176b5549", "repo": "rich", "path": "tests/test_win32_console.py", "file_name": "test_win32_console.py", "fun_name": "test_hide_cursor", "commit_message": "Using write to write to stdout file on legacy windows", "code": "def test_hide_cursor(_, SetConsoleCursorInfo, win32_handle):\n term = LegacyWindowsTerm(sys.stdout)\n term.hide_cursor()\n\n call_args = SetConsoleCursorInfo.call_args_list\n\n assert len(call_args) == 1\n\n args, kwargs = call_args[0]\n assert kwargs[\"cursor_info\"].bVisible == 0\n assert kwargs[\"cursor_info\"].dwSize == 100\n", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 75, "n_words": 27, "vocab_size": 21, "complexity": 1, "nloc": 8, "token_counts": 60, "n_ast_nodes": 96, "n_identifiers": 16, "random_cut": "def test_hide_cursor(_, SetConsoleCursorInfo, win32_handle):\n term = LegacyWindowsTerm(sys.stdout)\n term.hide_cursor()\n\n call_args = SetConsoleCursorInfo.call_args_list\n\n assert len(call_args) == 1\n\n args, kwargs = call_args[0]\n assert kwargs[\"cursor_" }, { "id": 44271, "commit_id": "cb73053211367e2c2dd76d5279cdc7dc7b190124", "repo": "airflow", "path": "tests/core/test_providers_manager.py", "file_name": "test_providers_manager.py", "fun_name": "test_hooks", "commit_message": "Add optional features in providers. (#21074)\n\nSome features in providers can be optional, depending on the\r\npresence of some libraries. Since Providers Manager tries\r\nto import the right classes that are exposed via providers it\r\nshould not - in this case - log warning message for those\r\noptional features. Previously, all ImportErrors were turned into\r\ndebug log but now we only turn them in debug log when creator\r\nof the provider deliberately raised\r\nan AirflowOptionalProviderFeatureException.\r\n\r\nInstructions on how to raise such exception in the way to keep\r\nbackwards compatibility were updated in proider's documentation.\r\n\r\nFixes: #20709", "code": "def test_hooks(self):\n with pytest.warns(expected_warning=None) as warning_records:\n with self._caplog.at_level(logging.WARNING):\n provider_manager = ProvidersManager()\n connections_list = list(provider_manager.hooks.keys())\n assert len(connections_list) > 60\n assert [] == [w.message for w in warning_records.list if \"hook-class-names\" in str(w.message)]\n assert len(self._caplog.records) == 0\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 110, "n_words": 34, "vocab_size": 28, "complexity": 3, "nloc": 8, "token_counts": 88, "n_ast_nodes": 147, "n_identifiers": 21, "random_cut": "def test_hooks(self):\n with pytest.warns(expected_warning=None) as warning_records:\n " }, { "id": 187236, "commit_id": "d09112ab1f6db6aa605650fe1ff6a3028344f90d", "repo": "streamlink", "path": "tests/test_api_validate.py", "file_name": "test_api_validate.py", "fun_name": "test_nested", "commit_message": "plugin.api.validate: rewrite tests\n\nCompletely rewrite tests using pytest, with full coverage", "code": "def test_nested(self):\n dictionary = {\"foo\": {\"bar\": {\"baz\": \"qux\"}}}\n assert validate.validate(validate.get((\"foo\", \"bar\", \"baz\")), dictionary) == \"qux\"\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 28, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 42, "n_ast_nodes": 79, "n_identifiers": 5, "random_cut": "def test_nested(self):\n dictionary = {\"foo\": {\"bar\": {\"baz\": \"qux\"}}}\n assert validate.validate(vali" }, { "id": 284404, "commit_id": "4d62b262a2221bb894098cd51f0f8541dad3cf3c", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/core/log/generation/test_settings.py", "file_name": "test_settings.py", "fun_name": "test_log_settings", "commit_message": "Add Logger Tests (#1707)\n\n* Added openbb_terminal/logger tests\r\n\r\n* Added tests for path_tracking\r\n\r\n* Just in case lol\r\n\r\n* Finished test_directories\r\n\r\n* Added more tests\r\n\r\n* Fixed flake8 error\r\n\r\n* Fixed flake8 error\r\n\r\n* Coverage for test_expired_files\r\n\r\n* Added handling for no boto3\r\n\r\n* Added test mock\r\n\r\n* Added test mock\r\n\r\n* Added fix\r\n\r\n* Test fix\r\n\r\n* Fix for windows\r\n\r\n* Added int\r\n\r\n* More tests\r\n\r\n* Added fixes\r\n\r\n* Fixed black issues\r\n\r\n* Tests : update fixture examples\r\n\r\n* Tests : core/log/generation\r\n\r\n* Core : log/generation\r\n\r\n* replaced conftest delete with temp_path\r\n\r\n* Fixed mocks\r\n\r\n* refactored get settings to use tmp_path\r\n\r\n* reverted codespell\r\n\r\n* Making random faillign test linux only\r\n\r\n* Swapped to fixture\r\n\r\n* Fixed conftest\r\n\r\n* Logs collection : add docstring + clean lines\r\n\r\n* Tests : fix windows specific issue\r\n\r\n* Tests : skip unsound tests which changes logger state\r\n\r\n* Black\r\n\r\n* Linting\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: didierlopes.eth ", "code": "def test_log_settings(tmp_path):\n directory = tmp_path\n frequency = \"MOCK_FREQUENCY\"\n handler_list = \"MOCK_HANDLER_LIST\"\n rolling_clock = \"MOCK_ROLLING_CLOCK\"\n verbosity = 20\n\n log_settings = LogSettings(\n directory=directory,\n frequency=frequency,\n handler_list=handler_list,\n rolling_clock=rolling_clock,\n verbosity=verbosity,\n )\n\n assert log_settings.directory == directory\n assert log_settings.frequency == frequency\n assert log_settings.handler_list == handler_list\n assert log_settings.rolling_clock == rolling_clock\n assert log_settings.verbosity == verbosity\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 116, "n_words": 46, "vocab_size": 28, "complexity": 1, "nloc": 18, "token_counts": 75, "n_ast_nodes": 119, "n_identifiers": 9, "random_cut": "def test_log_settings(tmp_path):\n directory = tmp_path\n frequency = \"MOCK_FREQUENCY\"\n handler_list = \"MOCK_HANDLER_LIST\"\n rolling_clock = \"MOCK_ROLLING_CLOCK\"\n verbosity = 20\n\n log_settings = LogSettings(\n directory=directory,\n frequency=frequency,\n handler_list=handler_list,\n rolling_clock=rolling_clock,\n verbosity=verbosity,\n )\n\n assert log_settings.directory == directory\n assert log_settings.frequency == frequency\n assert log_settings.handler_li" }, { "id": 144951, "commit_id": "9a7979d9a2b476d87c8386746b443de56db1cea9", "repo": "ray", "path": "python/ray/util/client/server/proxier.py", "file_name": "proxier.py", "fun_name": "GetObject", "commit_message": "[Client] chunked get requests (#22100)\n\nWhy are these changes needed?\r\nSwitches GetObject from unary-unary to unary-streaming so that large objects can be streamed across multiple messages (currently hardcoded to 64MiB chunks). This will allow users to retrieve objects larger than 2GiB from a remote cluster. If the transfer is interrupted by a recoverable gRPC error (i.e. temporary disconnect), then the request will be retried starting from the first chunk that hasn't been received yet.\r\n\r\nProto changes\r\nGetRequest's now have the field start_chunk_id, to indicate which chunk to start from (useful if the we have to retry a request after already receiving some chunks). GetResponses now have a chunk_id (0 indexed chunk of the serialized object), total_chunks (total number of chunks, used in async transfers to determine when all chunks have been received), and total_size (the total size of the object in bytes, used to raise user warnings if the object being retrieved is very large).\r\n\r\nServer changes\r\nMainly just updating GetObject logic to yield chunks instead of returning\r\n\r\nClient changes\r\nAt the moment, objects can be retrieved directly from the raylet servicer (ray.get) or asynchronously over the datapath (await some_remote_func.remote()). In both cases, the request will error if the chunk isn't valid (server side error) or if a chunk is received out of order (shouldn't happen in practice, since gRPC guarantees that messages in a stream either arrive in order or not at all).\r\n\r\nray.get is fairly straightforward, and changes are mainly to accommodate yielding from the stub instead of taking the value directly.\r\n\r\nawait some_remote_func.remote() is similar, but to keep things consistent with other async handling collecting the chunks is handled by a ChunkCollector, which wraps around the original callback.", "code": "def GetObject(self, request, context=None):\n yield from self._call_inner_function(request, context, \"GetObject\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def GetObject(self, request, context=None):\n yi" }, { "id": 199449, "commit_id": "fc7b460a21e340d4104e67c86d973765c9b4071b", "repo": "sympy", "path": "sympy/functions/elementary/piecewise.py", "file_name": "piecewise.py", "fun_name": "_piecewise_simplify_equal_to_next_segment", "commit_message": "Refactor Piecewise simplifcation/evaluation", "code": "def _piecewise_simplify_equal_to_next_segment(args):\n \n prevexpr = None\n for i, (expr, cond) in reversed(list(enumerate(args))):\n if prevexpr is not None:\n if isinstance(cond, And):\n eqs, other = sift(cond.args,\n lambda i: isinstance(i, Eq), binary=True)\n elif isinstance(cond, Eq):\n eqs, other = [cond], []\n else:\n eqs = other = []\n _prevexpr = prevexpr\n _expr = expr\n if eqs and not other:\n eqs = list(ordered(eqs))\n for e in eqs:\n # allow 2 args to collapse into 1 for any e\n # otherwise limit simplification to only simple-arg\n # Eq instances\n if len(args) == 2 or _blessed(e):\n _prevexpr = _prevexpr.subs(*e.args)\n _expr = _expr.subs(*e.args)\n # Did it evaluate to the same?\n if _prevexpr == _expr:\n # Set the expression for the Not equal section to the same\n # as the next. These will be merged when creating the new\n # Piecewise\n args[i] = args[i].func(args[i + 1][0], cond)\n else:\n # Update the expression that we compare against\n prevexpr = expr\n else:\n prevexpr = expr\n return args\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 610, "n_words": 154, "vocab_size": 93, "complexity": 11, "nloc": 26, "token_counts": 195, "n_ast_nodes": 316, "n_identifiers": 24, "random_cut": "def _piecewise_simplify_equal_to_next_segment(args):\n \n prevexpr = None\n for i, (expr, cond) in reversed(list(enumerate(args))):\n if prevexpr is not None:\n if isinstance(cond, And):\n eqs, other = sift(cond.args,\n " }, { "id": 53344, "commit_id": "be671cbecee46c621dc08ed47bb520f795b34a42", "repo": "prefect", "path": "tests/test_flow_runners.py", "file_name": "test_flow_runners.py", "fun_name": "test_runner_type", "commit_message": "Kubernetes flow runner (PrefectHQ/orion#780)\n\nAdd a Kubernetes flow runner", "code": "def test_runner_type(restart_policy):\n assert KubernetesFlowRunner().typename == \"kubernetes\"\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 24, "n_identifiers": 4, "random_cut": "def test_runner_type(restart_policy):\n assert KubernetesFlowRunner().typename == \"kubernetes\"\n" }, { "id": 178836, "commit_id": "70b7eee9555c8d5599d096eaf600521475b001d9", "repo": "Nuitka", "path": "nuitka/tools/testing/Common.py", "file_name": "Common.py", "fun_name": "decideFilenameVersionSkip", "commit_message": "Python3.7+: Added support for get_resource_reader to our loader\n\n* This allows to avoid a useless file copy to a temporary file\n in case a \"importlib.resources.path\" is used.\n\n* Also fixed a few typos in tests.\n\n* And avoid compiling the meta path based loader separately, so it\n can use compiled code helpers easily.", "code": "def decideFilenameVersionSkip(filename):\n \n\n # This will make many decisions with immediate returns.\n # pylint: disable=too-many-branches,too-many-return-statements\n\n assert type(filename) is str, repr(filename)\n\n # Skip runner scripts by default.\n if filename.startswith(\"run_\"):\n return False\n\n if filename.endswith(\".j2\"):\n filename = filename[:-3]\n\n # Skip tests that require Python 2.7 at least.\n if filename.endswith(\"27.py\") and _python_version < (2, 7):\n return False\n\n # Skip tests that require Python 2 at maximum.\n if filename.endswith(\"_2.py\") and _python_version >= (3,):\n return False\n\n # Skip tests that require Python 3.7 at maximum.\n if filename.endswith(\"_37.py\") and _python_version >= (3, 8):\n return False\n\n # Skip tests that require Python 3.2 at least.\n if filename.endswith(\"32.py\") and _python_version < (3, 2):\n return False\n\n # Skip tests that require Python 3.3 at least.\n if filename.endswith(\"33.py\") and _python_version < (3, 3):\n return False\n\n # Skip tests that require Python 3.4 at least.\n if filename.endswith(\"34.py\") and _python_version < (3, 4):\n return False\n\n # Skip tests that require Python 3.5 at least.\n if filename.endswith(\"35.py\") and _python_version < (3, 5):\n return False\n\n # Skip tests that require Python 3.6 at least.\n if filename.endswith(\"36.py\") and _python_version < (3, 6):\n return False\n\n # Skip tests that require Python 3.7 at least.\n if filename.endswith(\"37.py\") and _python_version < (3, 7):\n return False\n\n # Skip tests that require Python 3.8 at least.\n if filename.endswith(\"38.py\") and _python_version < (3, 8):\n return False\n\n # Skip tests that require Python 3.9 at least.\n if filename.endswith(\"39.py\") and _python_version < (3, 9):\n return False\n\n # Skip tests that require Python 3.10 at least.\n if filename.endswith(\"310.py\") and _python_version < (3, 10):\n return False\n\n return True\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 445, "n_words": 251, "vocab_size": 78, "complexity": 27, "nloc": 31, "token_counts": 261, "n_ast_nodes": 442, "n_identifiers": 8, "random_cut": "def decideFilenameVersionSkip(filename):\n \n\n # This will make many decisions with immediate returns.\n # pylint: disable=too-many-branches,too-many-return-statements\n\n assert type(filename) is str, repr(filename)\n\n # Skip runner scripts by default.\n if filename.startswith(\"run_\"):\n return False\n\n if filename.endswith(\".j2\"):\n filename = filename[:-3]\n\n # Skip tests that require Python 2.7 at least.\n if filename.ends" }, { "id": 165531, "commit_id": "a72fa1b400234d3a05342f17c3c0b1e3993a6bd8", "repo": "pandas", "path": "pandas/tests/io/formats/test_to_csv.py", "file_name": "test_to_csv.py", "fun_name": "test_to_csv_default_encoding", "commit_message": "CLN/DOC: typos (#46328)\n\n* fix typos\r\n\r\n* fix typo\r\n\r\n* fix typo\r\n\r\n* fix typo", "code": "def test_to_csv_default_encoding(self):\n # GH17097\n df = DataFrame({\"col\": [\"AAAAA\", \"ÄÄÄÄÄ\", \"ßßßßß\", \"聞聞聞聞聞\"]})\n\n with tm.ensure_clean(\"test.csv\") as path:\n # the default to_csv encoding is uft-8.\n df.to_csv(path)\n tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 79, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 5, "token_counts": 56, "n_ast_nodes": 102, "n_identifiers": 12, "random_cut": "def test_to_csv_default_encoding(self):\n # GH17097\n df = DataFrame({\"col\": [\"AAAAA\", \"ÄÄÄÄÄ\", \"ßßßßß\", \"聞聞聞聞聞\"]})\n\n with tm.ensure_clean(\"test.csv\") as path:\n # the default to_csv encoding is uft-8.\n df.to_csv(path)\n tm.assert_frame_equal(pd.read_csv(path, " }, { "id": 194458, "commit_id": "1830123ba3edf7290b7c6cb1c6f406ccf1d0e5d4", "repo": "kivy", "path": "kivy/uix/widget.py", "file_name": "widget.py", "fun_name": "on_motion", "commit_message": "Feature: EventManagerBase (#7658)\n\n* Added EventManagerBase class and event_managers attribute to WindowBase class.\r\n* Added on_motion event to Widget class.\r\n* Updated post_dispatch_input in EventLoopBase to skip non-touch events.\r\n* Using type ids in MouseMotionEventProvider.\r\n* Added on_motion method to Widget subclasses.\r\n* Updated Widget.on_motion method to dispatch to filtered widgets if 'pos' is not in me.profile.\r\n* Changed motion_filter property in Widget to store key to list values.\r\n* Updated Widget.on_motion to not dispatch event to children if widget is disabled.\r\n* Widget: Using flags to control dispatching in on_motion method.\r\n* Widget: Don't dispatch on_motion to children if only self is registered.\r\n* Widget: Removed collision on disabled check from on_motion method.\r\n* Widget: Added docstrings for motion_filter and related methods.\r\n* EventManager: Moved motion event flags to eventmanager/__init__.py module.\r\n* ScreenManager: Overrode the on_motion method.\r\n* WindowBase: Using attributes event_managers and event_managers_dict.\r\n* WindowBase: Added doc for register_event_manager and unregister_event_manager methods.\r\n* Widget: Improved default dispatch to stop after the last registered widgets.\r\n* EventManagerBase: Added initial docs class and module.\r\n* Widget: Added experimental warnings to motion_filter property and to on_motion and (un)register_for_motion_event methods.\r\n* WindowBase: Added docs for event_managers and event_managers_dict attributes.\r\n* MotionEvent: Added type_id and flags to push_attrs list.\r\n* EventManagerBase: Added versionadded tag on all flags.\r\n* EventManagerBase: Use dispatch modes instead of flags.", "code": "def on_motion(self, etype, me):\n \n if self.disabled or me.dispatch_mode == MODE_DONT_DISPATCH:\n return\n if me.type_id not in self.motion_filter:\n return\n filtered = self.motion_filter[me.type_id]\n if filtered[0] is self and len(filtered) == 1:\n return\n if me.dispatch_mode == MODE_DEFAULT_DISPATCH:\n last_filtered = filtered[-1]\n for widget in self.children[:]:\n if widget.dispatch('on_motion', etype, me):\n return True\n if widget is last_filtered:\n return\n if me.dispatch_mode == MODE_FILTERED_DISPATCH:\n widgets = filtered[1:] if filtered[0] is self else filtered[:]\n for widget in widgets:\n if widget.dispatch('on_motion', etype, me):\n return True\n\n #\n # Default event handlers\n #", "url": "https://github.com/kivy/kivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 317, "n_words": 80, "vocab_size": 43, "complexity": 14, "nloc": 20, "token_counts": 148, "n_ast_nodes": 233, "n_identifiers": 18, "random_cut": "def on_motion(self, etype, me):\n \n if self.disabled or me.dispatch_mode == MODE_DONT_DISPATCH:\n return\n if me.type_id not in self.motion_filter:\n return\n filtered = self.motion_filter[me.type_id]\n if filtered[0] is self and len(filtered) == 1:\n return\n if me.dispatch_mode == MODE_DEFAULT_DISPATCH:\n last_filtered = filtered[-1]\n for widget in self.children[:]:\n if widget.dispatch('on_motion', etype, me):\n return True\n if widget is last_filtered:\n " }, { "id": 279999, "commit_id": "38b618ad90d669c85cccee521ad73cc0630cf750", "repo": "keras", "path": "keras/optimizers/__init__.py", "file_name": "__init__.py", "fun_name": "convert_to_legacy_optimizer", "commit_message": "Add general `weight_decay` support in optimizer.\n\nWe still keep adamw optimizer in case people want an explicit adamw. We can delete it in a followup cl.\n\nPiperOrigin-RevId: 477043911", "code": "def convert_to_legacy_optimizer(optimizer):\n \n if not isinstance(optimizer, optimizer_experimental.Optimizer):\n raise ValueError(\n \"`convert_to_legacy_optimizer` should only be called \"\n \"on instances of `tf.keras.optimizers.Optimizer`, but \"\n f\"received {optimizer} of type {type(optimizer)}.\"\n )\n optimizer_name = optimizer.__class__.__name__.lower()\n config = optimizer.get_config()\n # Remove fields that only exist in experimental optimizer.\n keys_to_remove = [\n \"weight_decay\",\n \"use_ema\",\n \"ema_momentum\",\n \"ema_overwrite_frequency\",\n \"jit_compile\",\n \"is_legacy_optimizer\",\n ]\n for key in keys_to_remove:\n config.pop(key, None)\n legacy_optimizer_config = {\n \"class_name\": optimizer_name,\n \"config\": config,\n }\n return deserialize(legacy_optimizer_config, use_legacy_optimizer=True)\n\n\n@keras_export(\"keras.optimizers.get\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.optimizers.get\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 211, "n_words": 69, "vocab_size": 62, "complexity": 3, "nloc": 24, "token_counts": 93, "n_ast_nodes": 186, "n_identifiers": 20, "random_cut": "def convert_to_legacy_optimizer(optimizer):\n \n if not isinstance(optimizer, optimizer_experimental.Optimizer):\n raise ValueError(\n \"`convert_to_legacy_optimizer` should only be called \"\n \"on instances of `tf.keras.optimizers.Optimizer`, but \"\n f\"received {optimizer} of type {type(optimizer)}.\"\n )\n optimizer_name = optimizer.__class__.__name__.lower()\n config = optimizer.get_config()\n # Remove fields that only exist in experimental optimizer.\n keys_to_remove = [\n \"weight_decay\",\n \"use_ema\",\n \"ema_momentum\",\n \"ema_overwrite_frequency\",\n \"jit_compile\",\n \"is_legacy_optimizer\",\n ]\n for key in keys_to_remove:\n con" }, { "id": 301585, "commit_id": "24c34c0ef0e38d37cd94e4806d1d0a5b715cfe7d", "repo": "core", "path": "homeassistant/components/sensibo/climate.py", "file_name": "climate.py", "fun_name": "max_temp", "commit_message": "Strict typing Sensibo (#72454)", "code": "def max_temp(self) -> float:\n \n max_temp: int = self.device_data.temp_list[-1]\n return max_temp\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 23, "n_ast_nodes": 39, "n_identifiers": 6, "random_cut": "def max_temp(self) -> float:\n \n max_temp: int = self.device_data.temp_list[-1" }, { "id": 125597, "commit_id": "b048c6f659c314a8b0f902efc389ac96aa4c15c9", "repo": "ray", "path": "python/ray/data/tests/test_dataset.py", "file_name": "test_dataset.py", "fun_name": "test_iter_batches_local_shuffle", "commit_message": "[data] set iter_batches default batch_size #26869 \n\nWhy are these changes needed?\r\nConsumers (e.g. Train) may expect generated batches to be of the same size. Prior to this change, the default behavior would be for each batch to be one block, which may be of different sizes.\r\n\r\nChanges\r\nSet default batch_size to 256. This was chosen to be a sensible default for training workloads, which is intentionally different from the existing default batch_size value for Dataset.map_batches.\r\nUpdate docs for Dataset.iter_batches, Dataset.map_batches, and DatasetPipeline.iter_batches to be consistent.\r\nUpdated tests and examples to explicitly pass in batch_size=None as these tests were intentionally testing block iteration, and there are other tests that test explicit batch sizes.", "code": "def test_iter_batches_local_shuffle(shutdown_only, pipelined, ds_format):\n # Input validation.\n # Batch size must be given for local shuffle.\n with pytest.raises(ValueError):\n list(\n ray.data.range(100).iter_batches(\n batch_size=None, local_shuffle_buffer_size=10\n )\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 83, "n_words": 24, "vocab_size": 22, "complexity": 5, "nloc": 161, "token_counts": 884, "n_ast_nodes": 64, "n_identifiers": 14, "random_cut": "def test_iter_batches_local_shuffle(shutdown_only, pipelined, ds_format):" }, { "id": 164069, "commit_id": "f46df091df3afea25a273f491d1f6b2c7d20b32c", "repo": "pandas", "path": "pandas/tests/io/excel/test_readers.py", "file_name": "test_readers.py", "fun_name": "test_engine_used", "commit_message": "TST: Remove unused fixtures (#45692)\n\n* TST: Remove unused fixtures\r\n\r\n* Undo a removed fixture\r\n\r\n* Add back other fixtures\r\n\r\n* Undo a file\r\n\r\n* Try undoing this?\r\n\r\n* Revert \"Try undoing this?\"\r\n\r\nThis reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.", "code": "def test_engine_used(self, read_ext, engine):\n expected_defaults = {\n \"xlsx\": \"openpyxl\",\n \"xlsm\": \"openpyxl\",\n \"xlsb\": \"pyxlsb\",\n \"xls\": \"xlrd\",\n \"ods\": \"odf\",\n }\n\n with pd.ExcelFile(\"test1\" + read_ext) as excel:\n result = excel.engine\n\n if engine is not None:\n expected = engine\n else:\n expected = expected_defaults[read_ext[1:]]\n assert result == expected\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 172, "n_words": 43, "vocab_size": 35, "complexity": 2, "nloc": 15, "token_counts": 75, "n_ast_nodes": 136, "n_identifiers": 10, "random_cut": "def test_engine_used(self, read_ext, engine):\n expected_defaults = {\n \"xlsx\": \"openpyxl\",\n \"xlsm\": \"openpyxl\",\n \"xlsb\": \"pyxlsb\",\n \"xls\": \"xlrd\",\n \"ods\": \"odf\",\n }\n\n with pd.ExcelFile(\"test1\" + read_ext) as excel:\n result = excel.engine\n\n if engine is not None:\n expected = engine\n else:\n expected = expected_defaults[read_ext[1:]]\n assert result == expected\n" }, { "id": 274964, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/mixed_precision/autocast_variable_test.py", "file_name": "autocast_variable_test.py", "fun_name": "test_op_attribute", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_op_attribute(self, distribution):\n with distribution.scope():\n x = get_var(0.0, tf.float32)\n x = autocast_variable.create_autocast_variable(x)\n\n # Variable.op raises an AttributeError in Eager mode and is an op in graph\n # mode. Variable.assign(...).op is None in Eager mode and an op in Graph\n # mode or a tf.function. We test this is also true of AutoCastVariable.\n if tf.executing_eagerly():\n with self.assertRaises(AttributeError):\n x.op # pylint: disable=pointless-statement\n self.assertIsNone(x.assign(1.0).op)\n self.assertIsNone(x.assign_add(1.0).op)\n self.assertIsNone(x.assign_sub(1.0).op)\n else:\n self.assertIsNotNone(x.op)\n self.assertIsNotNone(x.assign(1.0).op)\n self.assertIsNotNone(x.assign_add(1.0).op)\n self.assertIsNotNone(x.assign_sub(1.0).op)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 291, "n_words": 68, "vocab_size": 50, "complexity": 2, "nloc": 18, "token_counts": 161, "n_ast_nodes": 232, "n_identifiers": 19, "random_cut": "def test_op_attribute(self, distribution):\n with distribution.scope():\n x = get_var(0.0, tf.float32)\n x = autocast_variable.create_autocast_variable(x)\n\n # Variable.op raises an AttributeError in Eager mode and is an op in graph\n # mode. Variable.assign(...).op is None in Eager mode and an op in Graph\n # mode or a tf.function. We test this is also true of AutoCastVariabl" }, { "id": 113090, "commit_id": "b99e26833287c901f6d808d07ea6882014b05f58", "repo": "nni", "path": "nni/retiarii/experiment/config/experiment_config.py", "file_name": "experiment_config.py", "fun_name": "_canonicalize", "commit_message": "Migration of NAS tests (#4933)", "code": "def _canonicalize(self, _parents):\n msg = '{} is not supposed to be set in Retiarii experiment by users, your config is {}.'\n if self.search_space != '':\n raise ValueError(msg.format('search_space', self.search_space))\n # TODO: maybe we should also allow users to specify trial_code_directory\n if str(self.trial_code_directory) != '.' and not os.path.isabs(self.trial_code_directory):\n raise ValueError(msg.format('trial_code_directory', self.trial_code_directory))\n\n trial_command_tmpl = '{envs} {python} -m nni.retiarii.trial_entry {execution_engine}'\n if self.trial_command != '_reserved' and '-m nni.retiarii.trial_entry' not in self.trial_command:\n raise ValueError(msg.format('trial_command', self.trial_command))\n\n if isinstance(self.execution_engine, str):\n self.execution_engine = execution_engine_config_factory(self.execution_engine)\n\n _trial_command_params = {\n # Default variables\n 'envs': '',\n # TODO: maybe use sys.executable rendered in trial side (e.g., trial_runner)\n 'python': sys.executable,\n 'execution_engine': self.execution_engine.name,\n\n # This should override the parameters above.\n **(self._trial_command_params or {})\n }\n\n self.trial_command = trial_command_tmpl.format(**_trial_command_params).strip()\n\n super()._canonicalize([self])\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 310, "n_words": 113, "vocab_size": 88, "complexity": 8, "nloc": 19, "token_counts": 174, "n_ast_nodes": 299, "n_identifiers": 23, "random_cut": "def _canonicalize(self, _parents):\n msg = '{} is not supposed to be set in Retiarii experiment by users, your config is {}.'\n if self.search_space != '':\n raise ValueError(msg.format('search_space', self.search_space))\n # TODO: maybe we should also allow users to specify trial_code_directory\n if str(self.trial_code_directory) != '.' and not os.path.isabs(self.trial_code_directory):\n raise ValueError(msg.format('trial_code_directory', self.trial_code_directory))\n\n trial_command_tmpl = '{envs} {python} -m nni.retiarii.trial_entry {execution_engine}'\n if self.trial_command != '_reserved' and '-m nni.retiarii.trial_entry' not in self.trial_command:\n raise ValueError(msg.format('tri" }, { "id": 146283, "commit_id": "52a722ffe7d255e12cfab3352d442cbca362af44", "repo": "ray", "path": "dashboard/modules/job/tests/test_http_job_server.py", "file_name": "test_http_job_server.py", "fun_name": "test_submit_job", "commit_message": "[jobs] Make local pip/conda requirements files work with jobs (#22849)", "code": "def test_submit_job(job_sdk_client, runtime_env_option, monkeypatch):\n # This flag allows for local testing of runtime env conda functionality\n # without needing a built Ray wheel. Rather than insert the link to the\n # wheel into the conda spec, it links to the current Python site.\n monkeypatch.setenv(\"RAY_RUNTIME_ENV_LOCAL_DEV_MODE\", \"1\")\n\n client = job_sdk_client\n\n job_id = client.submit_job(\n entrypoint=runtime_env_option[\"entrypoint\"],\n runtime_env=runtime_env_option[\"runtime_env\"],\n )\n\n wait_for_condition(_check_job_succeeded, client=client, job_id=job_id, timeout=120)\n\n logs = client.get_job_logs(job_id)\n assert runtime_env_option[\"expected_logs\"] in logs\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 109, "n_words": 65, "vocab_size": 55, "complexity": 1, "nloc": 10, "token_counts": 72, "n_ast_nodes": 118, "n_identifiers": 15, "random_cut": "def test_submit_job(job_sdk_client, runtime_env_option, monkeypatch):\n # This flag allows for local testing of runtime env conda functionality\n # without needing a built Ray wheel. Rather than insert the link to the\n # wheel into the conda spec, it links to the current Python site.\n monkeypatch.setenv(\"RAY_RUNTIME_ENV_LOCAL_DEV_MODE\", \"1\")\n\n client = job_sdk_client\n\n job_id = client.submit_job(\n entrypoint=runtime_env_option[\"entrypoint\"],\n runtime_env=runtime_env_option[\"runtime_env\"],\n )\n\n wait_for_condition(_check_job_succeeded, client=client, job_id=job_id, timeout=120)\n\n logs = client.get_job_logs(job_id)\n assert runtime_env_option[\"expected_logs\"] in logs\n\n" }, { "id": 252541, "commit_id": "e11626395e42113f43cb6c94feab28c977389cd8", "repo": "mitmproxy", "path": "test/mitmproxy/addons/test_asgiapp.py", "file_name": "test_asgiapp.py", "fun_name": "test_asgi_full", "commit_message": "Fix running hook issue and signal proxyserver updates. (#5512)\n\n* add startup hook\r\n\r\n* remove StartupHook\r\n\r\n* add instance state reports\r\n\r\n* make server stop log entries clearer\r\n\r\n* fix default param issue\r\n\r\n* add SyncSignal and AsyncSignal\r\n\r\n* fix signal issues\r\n\r\n* simpler update handling\r\n\r\n* fix coverage\r\n\r\n* minor change to better reflect server update state\r\n\r\n* fix reuse tests on linux/wsl\r\n\r\n* exterminate `mitmproxy.exceptions.TypeError`\r\n\r\nCo-authored-by: Maximilian Hils ", "code": "async def test_asgi_full():\n ps = Proxyserver()\n addons = [\n asgiapp.WSGIApp(tapp, \"testapp\", 80),\n asgiapp.ASGIApp(errapp, \"errapp\", 80),\n asgiapp.ASGIApp(noresponseapp, \"noresponseapp\", 80),\n ]\n with taddons.context(ps, *addons) as tctx:\n tctx.master.addons.add(next_layer.NextLayer())\n tctx.configure(ps, listen_host=\"127.0.0.1\", listen_port=0)\n assert await ps.setup_servers()\n ps.running()\n await tctx.master.await_log(\"HTTP(S) proxy listening\", level=\"info\")\n proxy_addr = (\"127.0.0.1\", ps.listen_addrs()[0][1])\n\n reader, writer = await asyncio.open_connection(*proxy_addr)\n req = f\"GET http://testapp:80/ HTTP/1.1\\r\\n\\r\\n\"\n writer.write(req.encode())\n header = await reader.readuntil(b\"\\r\\n\\r\\n\")\n assert header.startswith(b\"HTTP/1.1 200 OK\")\n body = await reader.readuntil(b\"testapp\")\n assert body == b\"testapp\"\n\n reader, writer = await asyncio.open_connection(*proxy_addr)\n req = f\"GET http://testapp:80/parameters?param1=1¶m2=2 HTTP/1.1\\r\\n\\r\\n\"\n writer.write(req.encode())\n header = await reader.readuntil(b\"\\r\\n\\r\\n\")\n assert header.startswith(b\"HTTP/1.1 200 OK\")\n body = await reader.readuntil(b\"}\")\n assert body == b'{\"param1\": \"1\", \"param2\": \"2\"}'\n\n reader, writer = await asyncio.open_connection(*proxy_addr)\n req = f\"POST http://testapp:80/requestbody HTTP/1.1\\r\\nContent-Length: 6\\r\\n\\r\\nHello!\"\n writer.write(req.encode())\n header = await reader.readuntil(b\"\\r\\n\\r\\n\")\n assert header.startswith(b\"HTTP/1.1 200 OK\")\n body = await reader.readuntil(b\"}\")\n assert body == b'{\"body\": \"Hello!\"}'\n\n reader, writer = await asyncio.open_connection(*proxy_addr)\n req = f\"GET http://errapp:80/?foo=bar HTTP/1.1\\r\\n\\r\\n\"\n writer.write(req.encode())\n header = await reader.readuntil(b\"\\r\\n\\r\\n\")\n assert header.startswith(b\"HTTP/1.1 500\")\n body = await reader.readuntil(b\"ASGI Error\")\n assert body == b\"ASGI Error\"\n\n reader, writer = await asyncio.open_connection(*proxy_addr)\n req = f\"GET http://noresponseapp:80/ HTTP/1.1\\r\\n\\r\\n\"\n writer.write(req.encode())\n header = await reader.readuntil(b\"\\r\\n\\r\\n\")\n assert header.startswith(b\"HTTP/1.1 500\")\n body = await reader.readuntil(b\"ASGI Error\")\n assert body == b\"ASGI Error\"\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 502, "n_words": 183, "vocab_size": 72, "complexity": 1, "nloc": 49, "token_counts": 424, "n_ast_nodes": 750, "n_identifiers": 37, "random_cut": "async def test_asgi_full():\n ps = Proxyserver()\n addons = [\n asgiapp.WSGIApp(tapp, \"testapp\", 80),\n asgiapp.ASGIApp(errapp, \"errapp\", 80),\n asgiapp.ASGIApp(noresponseapp, \"noresponseapp\", 80),\n ]\n with taddons.context(ps, *addons) as tctx:\n tctx.master.addons.add(next_layer.NextLayer())\n tctx.configure(ps, listen_host=\"127.0.0.1\", list" }, { "id": 284637, "commit_id": "707fd8f506d6927f1accc222307d8532e6dd6a6b", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/dark_pool_shorts/dps_controller.py", "file_name": "dps_controller.py", "fun_name": "print_help", "commit_message": "Terminal supporting controller with multiple languages (#1730)\n\n* initial implementation of terminal supporting multiple languages\r\n\r\n* improve quote command\r\n\r\n* stocks controller with english translation pattern\r\n\r\n* add language to settings\r\n\r\n* update reqs to have python-i18n\r\n\r\n* find languages automatically\r\n\r\n* add to documentation how to support another language\r\n\r\n* markdown\r\n\r\n* fix no member issue\r\n\r\n* fix test\r\n\r\n* refactor code with translations\r\n\r\n* translate for options and options/scr\r\n\r\n* solve translation for all submenus within options controller\r\n\r\n* fix stocks controller tests\r\n\r\n* translate disc and th\r\n\r\n* dps and sia menus\r\n\r\n* scr and ins\r\n\r\n* add gov controller\r\n\r\n* fix tests for new controllers\r\n\r\n* stocks/ba and stocks/ca menus\r\n\r\n* add stocks/fa\r\n\r\n* res controller\r\n\r\n* stocks/dd\r\n\r\n* stocks/bt\r\n\r\n* qa and pred\r\n\r\n* crypto and crypto/disc\r\n\r\n* overview and onchain menus\r\n\r\n* defi\r\n\r\n* nft and tools\r\n\r\n* crpyto/dd\r\n\r\n* stocks and crypto TA\r\n\r\n* finish crypto menu\r\n\r\n* finish ETF controller\r\n\r\n* finish economy menu\r\n\r\n* forex\r\n\r\n* alternative and mutual funds\r\n\r\n* refactor source for translated version\r\n\r\n* econometrics and portfolio and brokers\r\n\r\n* portfolio\r\n\r\n* fix tests\r\n\r\n* finish language implementation\r\n\r\n* refactor code for cmds\r\n\r\n* improve keys\r\n\r\n* refactor name funcs and add docstrings\r\n\r\n* fix minor stuff\r\n\r\n* fix tests\r\n\r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: Chavithra \r\nCo-authored-by: James Maslek ", "code": "def print_help(self):\n \n mt = MenuText(\"stocks/dps/\")\n mt.add_cmd(\"load\")\n mt.add_raw(\"\\n\")\n mt.add_cmd(\"shorted\", \"Yahoo Finance\")\n mt.add_cmd(\"ctb\", \"Interactive Broker\")\n mt.add_cmd(\"hsi\", \"Shortinterest\")\n mt.add_cmd(\"prom\", \"FINRA\")\n mt.add_cmd(\"pos\", \"Stockgrid\")\n mt.add_cmd(\"sidtc\", \"Stockgrid\")\n mt.add_raw(\"\\n\")\n mt.add_param(\"_ticker\", self.ticker or \"\")\n mt.add_raw(\"\\n\")\n mt.add_cmd(\"dpotc\", \"FINRA\", self.ticker)\n mt.add_cmd(\"ftd\", \"SEC\", self.ticker)\n mt.add_cmd(\"spos\", \"Stockgrid\", self.ticker)\n mt.add_cmd(\"psi\", \"Quandl/Stockgrid\", self.ticker)\n console.print(text=mt.menu_text, menu=\"Stocks - Dark Pool and Short data\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 173, "n_words": 47, "vocab_size": 41, "complexity": 2, "nloc": 18, "token_counts": 158, "n_ast_nodes": 301, "n_identifiers": 13, "random_cut": "def print_help(self):\n \n mt = MenuText(\"stocks/dps/\")\n mt.add_cmd(\"load\")\n mt.add_raw(\"\\n\")\n mt.add_cmd(\"shorted\", \"Yahoo Finance\")\n mt.add_cmd(\"ctb\", \"Interactive Broker\")\n mt.add_cmd(\"hsi\", \"Shortinterest\")\n mt.add_cmd(\"prom\", \"FINRA\")\n mt.add_cmd(\"pos\", \"Stockgrid\")\n mt.add_cmd(\"sidtc\", \"Stockgrid\")\n mt.add_raw(\"\\n\")\n mt.add_param(\"_ticker\", self.ticker or \"\")\n mt.add_raw(\"\\n\")\n mt.add_cmd(\"dpotc\", \"FINRA\", self.ticker)\n mt.add_cmd(\"ftd\", \"SEC\", s" }, { "id": 124865, "commit_id": "5a95e11e1e2ed0c2a98a99c549afd67a1107473e", "repo": "ray", "path": "python/ray/data/tests/mock_http_server.py", "file_name": "mock_http_server.py", "fun_name": "do_GET", "commit_message": "[Datasets] Improve read_xxx experience of HTTP file (#26454)", "code": "def do_GET(self):\n file_path = self.path.rstrip(\"/\")\n file_data = self.files.get(file_path)\n if file_data is None:\n return self._respond(404)\n if \"Range\" in self.headers:\n ran = self.headers[\"Range\"]\n b, ran = ran.split(\"=\")\n start, end = ran.split(\"-\")\n if start:\n file_data = file_data[int(start) : (int(end) + 1) if end else None]\n else:\n # suffix only\n file_data = file_data[-int(end) :]\n if \"give_length\" in self.headers:\n response_headers = {\"Content-Length\": len(file_data)}\n self._respond(200, response_headers, file_data)\n elif \"give_range\" in self.headers:\n self._respond(\n 200,\n {\"Content-Range\": \"0-%i/%i\" % (len(file_data) - 1, len(file_data))},\n file_data,\n )\n else:\n self._respond(200, data=file_data)\n\n\n@contextlib.contextmanager", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@contextlib.contextmanager", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 338, "n_words": 80, "vocab_size": 58, "complexity": 7, "nloc": 24, "token_counts": 185, "n_ast_nodes": 316, "n_identifiers": 21, "random_cut": "def do_GET(self):\n file_path = self.path.rstrip(\"/\")\n file_data = self.files.get(file_path)\n if file_data is None:\n return self._respond(404)\n if \"Range\" in self.headers:\n ran = self.headers[\"Range\"]\n b, ran = ran.split(\"=\")\n start, end = ran.split(\"-\")\n if start:\n file_data = file_data[int(start) : (int(end) + 1) if end else None]\n else:\n # suffix only\n file_data = file_data[-int(end) :]\n if \"give_length\" in self.headers:\n response_headers = {\"Content-Length\": len(file_data)}\n self._respond(200, response_headers, file_data)\n elif \"give_range\" in self.headers:\n self._respond(\n 200,\n " }, { "id": 252160, "commit_id": "6e66875e73ad1cb3dd2b93b69fd6079ea348da87", "repo": "mitmproxy", "path": "mitmproxy/proxy/layers/quic.py", "file_name": "quic.py", "fun_name": "process_events", "commit_message": "[quic] first connectable version", "code": "def process_events(self) -> layer.CommandGenerator[None]:\n assert self.quic is not None\n assert self.tls is not None\n\n # handle all buffered aioquic connection events\n event = self.quic.next_event()\n while event is not None:\n if isinstance(event, quic_events.ConnectionIdIssued):\n if self.issue_connection_id_callback is not None:\n self.issue_connection_id_callback(event.connection_id)\n\n elif isinstance(event, quic_events.ConnectionIdRetired):\n if self.retire_connection_id_callback is not None:\n self.retire_connection_id_callback(event.connection_id)\n\n elif isinstance(event, quic_events.ConnectionTerminated):\n yield from self.shutdown_connection(\n reason=event.reason_phrase or str(event.error_code),\n level=(\n \"info\" if event.error_code is QuicErrorCode.NO_ERROR else \"warn\"\n ),\n )\n\n elif isinstance(event, quic_events.HandshakeCompleted):\n # concatenate all peer certificates\n all_certs = []\n if self.quic.tls._peer_certificate is not None:\n all_certs.append(self.quic.tls._peer_certificate)\n if self.quic.tls._peer_certificate_chain is not None:\n all_certs.extend(self.quic.tls._peer_certificate_chain)\n\n # set the connection's TLS properties\n self.conn.timestamp_tls_setup = self._loop.time()\n self.conn.certificate_list = [\n certs.Cert.from_pyopenssl(x) for x in all_certs\n ]\n self.conn.alpn = event.alpn_protocol.encode(\"ascii\")\n self.conn.cipher = self.quic.tls.key_schedule.cipher_suite.name\n self.conn.tls_version = \"QUIC\"\n\n # report the success to addons\n tls_data = QuicTlsData(\n conn=self.conn, context=self.context, settings=self.tls\n )\n if self.conn is self.context.client:\n yield layers.tls.TlsEstablishedClientHook(tls_data)\n else:\n yield layers.tls.TlsEstablishedServerHook(tls_data)\n\n # perform next layer decisions now\n if isinstance(self.child_layer, layer.NextLayer):\n yield from self.child_layer._ask()\n\n # forward the event as a QuicConnectionEvent to the child layer\n yield from self.event_to_child(\n QuicConnectionEvent(connection=self.conn, event=event)\n )\n\n # handle the next event\n event = self.quic.next_event()\n\n # transmit buffered data and re-arm timer\n yield from self.transmit()\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 928, "n_words": 185, "vocab_size": 112, "complexity": 15, "nloc": 45, "token_counts": 381, "n_ast_nodes": 605, "n_identifiers": 62, "random_cut": "def process_events(self) -> layer.CommandGenerator[None]:\n assert self.quic is not None\n assert self.tls is not None\n\n # handle all buffered aioquic connection events\n event = self.quic.next_event()\n while event is not None:\n if isinstance(event, quic_events.ConnectionIdIssued):\n if self.issue_connection_id_callback is not None:\n self.issue_connection_id_callback(event.connection_id)\n\n elif isinstance(event, quic_events.ConnectionIdRetired):\n if self.retire_connection_id_callback is not None:\n self.retire_connection_id_callback(event.connection_id)\n\n elif isinstance(event, quic_events.ConnectionTerminated):\n yield from self.shutdown_connection(\n reason=event.reason_phrase or str(event.error_code),\n level=(\n \"info\" if event.error_code is QuicErrorCode.NO_ERROR else \"warn\"\n ),\n )\n\n elif isinstance(event, quic_events.HandshakeCompleted):\n # concatenate all peer certificates\n all_certs = []\n if self.quic.tls._peer_certi" }, { "id": 282706, "commit_id": "e077859b95730b7936da051aaf2c24b89d4ef402", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/portfolio/portfolio_controller.py", "file_name": "portfolio_controller.py", "fun_name": "call_po", "commit_message": "Portfolio fix (#1419)\n\n* Fixing Allocation Plotting Issue\r\n\r\n* Linting\r\n\r\n* More Linting", "code": "def call_po(self, _):\n \n if self.portfolio.empty:\n tickers = []\n else:\n tickers = (\n self.portfolio._stock_tickers\n + self.portfolio._etf_tickers\n + self.portfolio._crypto_tickers\n )\n self.queue = self.load_class(\n po_controller.PortfolioOptimization, tickers, self.queue\n )\n\n # BUG: The commands in pa menu throw errors. First one says that it's related to\n # string formatting and the second one has something to do with None being used\n # instead of [] in the queue (assumption) what throws errors on the logger.\n # TODO: This submenu is disabled until the bug is fixed.\n # def call_pa(self, _):\n # \n # from gamestonk_terminal.portfolio.portfolio_analysis import pa_controller\n #\n # self.queue = self.queue = self.load_class(\n # pa_controller.PortfolioAnalysis, self.queue\n # )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 295, "n_words": 103, "vocab_size": 70, "complexity": 2, "nloc": 12, "token_counts": 60, "n_ast_nodes": 106, "n_identifiers": 13, "random_cut": "def call_po(self, _):\n \n if self.portfolio.empty:\n tickers = []\n else:\n tickers = (\n self.portfolio._stock_tickers\n + self.portfolio._etf_tickers\n + self.portfolio._crypto_tickers\n )\n self.queue = self.load_class(\n po_controller.PortfolioOptimization, tickers, self.queue\n )\n\n # BUG: The commands in pa menu throw errors. First one says that it's related to\n # string formatting and the second one has something to do with None being used\n # instead of [] in the queue (assumption) what throws errors on the logger.\n # TODO: This submenu is disabled until the bug is fixed.\n # def call_pa(self, _):\n # \n # from gamestonk_terminal.portfolio.portfolio_analysis import pa_controller\n #\n # self.queue " }, { "id": 283643, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/stocks/quantitative_analysis/test_qa_controller.py", "file_name": "test_qa_controller.py", "fun_name": "test_menu_with_queue", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def test_menu_with_queue(expected, mocker, queue):\n mocker.patch(\n target=(\n \"openbb_terminal.stocks.quantitative_analysis.qa_controller.\"\n \"QaController.switch\"\n ),\n return_value=[\"quit\"],\n )\n result_menu = qa_controller.QaController(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2021-12-21\", \"%Y-%m-%d\"),\n interval=\"1440min\",\n stock=DF_STOCK.copy(),\n queue=queue,\n ).menu()\n\n assert result_menu == expected\n\n\n@pytest.mark.vcr(record_mode=\"none\")", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 117, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 16, "token_counts": 73, "n_ast_nodes": 141, "n_identifiers": 23, "random_cut": "def test_menu_with_queue(expected, mocker, queue):\n mocker.patch(\n target=(\n \"openbb_terminal.stocks.quantitative_analysis.qa_controller.\"\n \"QaController.switch\"\n ),\n return_value=[\"quit\"],\n )\n result_menu = qa_controller.QaController(\n ticker=\"TSLA\",\n start=datetime.strptime(\"2021-12-21\", \"%Y-%m-%d\"),\n " }, { "id": 219701, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "_format_sign", "commit_message": "add python 3.10.4 for windows", "code": "def _format_sign(is_negative, spec):\n \n\n if is_negative:\n return '-'\n elif spec['sign'] in ' +':\n return spec['sign']\n else:\n return ''\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 17, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 30, "n_ast_nodes": 59, "n_identifiers": 3, "random_cut": "def _format_sign(is_negative, spec):\n \n\n if is_negative:\n return '-'\n elif spec['sign'] in ' +':\n return spec['sign']\n else:\n " }, { "id": 27463, "commit_id": "f2ce999fa5865917b8d104d38ef3269eebaf6c06", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py", "file_name": "test_create_deliveries_for_subscription.py", "fun_name": "test_product_variant_created", "commit_message": "Meta fields added to subscription webhooks event types. (#9759)\n\n* Meta fields added to subscription webhooks event types.\r\n\r\n* Imports adjustments.\r\n\r\n* Change Event type from Union to Interface.\r\n\r\n* Rebase fixes.\r\n\r\n* Review fixes\r\n\r\n* Handle AnonymousUser as requestor.", "code": "def test_product_variant_created(variant, subscription_product_variant_created_webhook):\n webhooks = [subscription_product_variant_created_webhook]\n event_type = WebhookEventAsyncType.PRODUCT_VARIANT_CREATED\n variant_id = graphene.Node.to_global_id(\"ProductVariant\", variant.id)\n deliveries = create_deliveries_for_subscriptions(event_type, variant, webhooks)\n expected_payload = json.dumps({\"productVariant\": {\"id\": variant_id}})\n\n assert deliveries[0].payload.payload == expected_payload\n assert len(deliveries) == len(webhooks)\n assert deliveries[0].webhook == webhooks[0]\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 58, "n_words": 35, "vocab_size": 26, "complexity": 1, "nloc": 9, "token_counts": 90, "n_ast_nodes": 142, "n_identifiers": 20, "random_cut": "def test_product_variant_created(variant, subscription_product_variant_created_webhook):\n webhooks = [subscription_product_variant_created_webhook]\n event_type = WebhookEventAsyncType.PRODUCT_VARIANT_CREATED\n variant_id = graphene.Node.to_global_id(\"ProductVariant\", variant.id)\n deliveries = create_deliveries_for_subscriptions(event_type, variant, webhooks)\n expected_payload = json.dumps({\"productVariant\": {\"id\": variant_id}})\n\n assert deliveries[0].payload.payload == expected_payload\n assert len(deliveries) == len(webhooks)\n assert deliveries[0" }, { "id": 188826, "commit_id": "843dc628c78e2373fcd33ee6ec0b74042e532f2e", "repo": "calibre", "path": "src/calibre/gui2/preferences/columns.py", "file_name": "columns.py", "fun_name": "setup_row", "commit_message": "Enhancement: allow sorting the columns in the create custom column dialog.", "code": "def setup_row(self, row, key):\n flags = Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable\n\n if self.is_custom_key(key):\n cc = self.custcols[key]\n original_key = cc['original_key']\n else:\n cc = self.field_metadata[key]\n original_key = key\n\n item = QTableWidgetItem()\n item.setData(Qt.ItemDataRole.DisplayRole, QVariant(row))\n item.setToolTip(str(row))\n item.setData(Qt.ItemDataRole.UserRole, key)\n item.setFlags(flags)\n self.opt_columns.setItem(row, 0, item)\n\n flags |= Qt.ItemFlag.ItemIsUserCheckable\n if key == 'ondevice':\n item.setFlags(flags & ~Qt.ItemFlag.ItemIsEnabled)\n item.setCheckState(Qt.CheckState.PartiallyChecked)\n else:\n item.setFlags(flags)\n item.setCheckState(Qt.CheckState.Unchecked if key in self.hidden_cols else\n Qt.CheckState.Checked)\n\n item = QTableWidgetItem(cc['name'])\n item.setToolTip(cc['name'])\n item.setFlags(flags)\n if self.is_custom_key(key):\n item.setData(Qt.ItemDataRole.DecorationRole, (QIcon(I('column.png'))))\n self.opt_columns.setItem(row, 1, item)\n\n item = QTableWidgetItem(key)\n item.setToolTip(key)\n item.setFlags(flags)\n self.opt_columns.setItem(row, 2, item)\n\n if key == 'title':\n coltype = _('Text')\n elif key == 'ondevice':\n coltype = _('Yes/No with text')\n else:\n dt = cc['datatype']\n if cc['is_multiple']:\n if key == 'authors' or cc.get('display', {}).get('is_names', False):\n coltype = _('Ampersand separated text, shown in the Tag browser')\n else:\n coltype = self.column_desc['*' + dt]\n else:\n coltype = self.column_desc[dt]\n item = QTableWidgetItem(coltype)\n item.setToolTip(coltype)\n item.setFlags(flags)\n self.opt_columns.setItem(row, 3, item)\n\n desc = cc['display'].get('description', \"\")\n item = QTableWidgetItem(desc)\n item.setToolTip(desc)\n item.setFlags(flags)\n self.opt_columns.setItem(row, 4, item)\n\n if '*deleted' in cc:\n col_status = _('Deleted column. Double-click to undelete it')\n elif self.is_new_custom_column(cc):\n col_status = _('New column')\n elif original_key != key:\n col_status = _('Edited. Lookup name was {}').format(original_key)\n elif '*edited' in cc:\n col_status = _('Edited')\n else:\n col_status = ''\n item = QTableWidgetItem(col_status)\n item.setToolTip(col_status)\n item.setFlags(flags)\n self.opt_columns.setItem(row, 5, item)\n", "url": "https://github.com/kovidgoyal/calibre.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 798, "n_words": 194, "vocab_size": 110, "complexity": 14, "nloc": 68, "token_counts": 521, "n_ast_nodes": 871, "n_identifiers": 45, "random_cut": "def setup_row(self, row, key):\n flags = Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable\n\n if self.is_custom_key(key):\n cc = self.custcols[key]\n original_key = cc['original_key']\n else:\n cc = self.field_metadata[key]\n original_key = key\n\n item = QTableWidgetItem()\n item.setData(Qt.ItemDataRole.DisplayRole, QVariant(row))\n item.setToolTip(str(row))\n item.setData(Qt.ItemDataRole.UserRole, key)\n item.setFlags(flags)\n self.opt_columns.setItem(row, 0, item)\n\n flags |= Qt.ItemFlag.ItemIsUserCheckable\n if key == 'ondevice':\n item.setFlags(flags & ~Qt.ItemFlag.ItemIsEnabled)\n i" }, { "id": 322222, "commit_id": "a5f8a3ec9d4a5363d4d3644ad06773db76a8a503", "repo": "PaddleNLP", "path": "examples/language_model/data_tools/create_pretraining_data.py", "file_name": "create_pretraining_data.py", "fun_name": "get_args", "commit_message": "[Pre-Training] Add tutorial for clue small 14g dataset (#1555)\n\n* add tutorial for clue small 14g.\r\n\r\n* add pre-train weight to community.\r\n\r\n* fix typos.\r\n\r\n* fix typo.\r\n\r\n* add dataset link.\r\n\r\n* change name to ernie-1.0-cluecorpussmall\r\n\r\nCo-authored-by: Zeyu Chen ", "code": "def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model_name', type=str, required=True, help='What model to use.')\n parser.add_argument(\n '--tokenizer_name',\n type=str,\n required=True,\n choices=[\n 'ErnieTokenizer', 'BertTokenizer', 'GPTTokenizer',\n 'GPTChineseTokenizer'\n ],\n help='What type of tokenizer to use.')\n group = parser.add_argument_group(title='data input/output')\n group.add_argument(\n '--input_path',\n type=str,\n required=True,\n help='Path to input JSON files.')\n group.add_argument(\n '--output_prefix',\n type=str,\n required=True,\n help='Output prefix to store output file.')\n group.add_argument(\n '--data_format',\n type=str,\n default='text',\n choices=['JSON'],\n help='Only support json format for now. One document per line.')\n group.add_argument(\n '--json_key',\n type=str,\n default='text',\n help='For JSON format. Space separate listed of keys to extract from json'\n )\n group.add_argument(\n '--split_sentences',\n action='store_true',\n help='Split documents into sentences.')\n\n group = parser.add_argument_group(title='chinese words')\n group.add_argument(\n '--chinese',\n action='store_true',\n help=\"Is corpus need words segmentation step for chinese words.\")\n group.add_argument(\n '--cn_whole_word_segment',\n action='store_true',\n help=\"Is corpus need words segmentation step for chinese words WWM.\")\n group.add_argument(\n '--cn_seg_func',\n type=str,\n default='jieba',\n choices=['lac', 'seg', 'jieba'],\n help='Words segment function for chinese words.')\n group.add_argument(\n '--cn_splited',\n action='store_true',\n help=\"Is chinese corpus is splited in to words.\")\n group.add_argument(\n '--cn_split_dimer',\n type=str,\n default=' ',\n help=\"Split dimer between chinese words.\")\n\n group = parser.add_argument_group(title='common config')\n group.add_argument(\n '--append_eos',\n action='store_true',\n help='Append an token to the end of a document.')\n group.add_argument(\n '--log_interval',\n type=int,\n default=100,\n help='Interval between progress updates')\n group.add_argument(\n '--workers',\n type=int,\n default=1,\n help='Number of worker processes to launch')\n\n args = parser.parse_args()\n return args\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 673, "n_words": 194, "vocab_size": 126, "complexity": 1, "nloc": 81, "token_counts": 328, "n_ast_nodes": 567, "n_identifiers": 18, "random_cut": "def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model_name', type=str, required=True, help='What model to use.')\n parser.add_argument(\n '--tokenizer_name',\n type=str,\n required=True,\n choices=[\n 'ErnieTokenizer', 'BertTokenizer', 'GPTTokenizer',\n 'GPTChineseTokenizer'\n ],\n help='What type of tokenizer to use.')\n group = parser.add_argument_group(title='data input/output')\n group.add_argument(\n '--input_path',\n type=str,\n required=True,\n help='Path to input JSON files.')\n group.add_argument(\n '--output_prefix',\n type=str,\n required=True,\n help='Output prefix to store output file.')\n group.add_argument(\n '--data_format',\n type=str,\n default='text',\n choices=['JSON'],\n help='Only support json format for now. One document per line.')\n group.add_argument(\n '--json_key',\n type=str,\n default='text',\n help='For JSON format. Space separate listed of keys to extract from json'\n )\n group.add_argument(\n '--split_sentences',\n action='store_true',\n help='Split documents into sentences.')\n\n group = parser.add_argument_group(title='chinese words')\n group.add_argument(\n '--chinese',\n action='store_true',\n help=\"Is corpus need words segmentation step for chinese words.\")\n group.add_argument(\n '--cn_whole_word_segment',\n action='store_true',\n help=\"Is corpus need words segmentation step for chinese words WWM.\")\n group.add_argument(\n '--cn_seg_func',\n type=str,\n default='jieba',\n choices=['lac', 'seg', 'jieba'],\n help='Words segment function for chinese words.')\n group.add_argument(\n '--cn_splited',\n action='store_true',\n help=\"Is chinese corpus is splited in to words.\")\n group.add_argument(\n '--cn_split_dimer',\n type=str,\n default=' ',\n help=\"Split dimer between " }, { "id": 27451, "commit_id": "f2ce999fa5865917b8d104d38ef3269eebaf6c06", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py", "file_name": "test_create_deliveries_for_subscription.py", "fun_name": "test_checkout_create", "commit_message": "Meta fields added to subscription webhooks event types. (#9759)\n\n* Meta fields added to subscription webhooks event types.\r\n\r\n* Imports adjustments.\r\n\r\n* Change Event type from Union to Interface.\r\n\r\n* Rebase fixes.\r\n\r\n* Review fixes\r\n\r\n* Handle AnonymousUser as requestor.", "code": "def test_checkout_create(checkout, subscription_checkout_created_webhook):\n webhooks = [subscription_checkout_created_webhook]\n event_type = WebhookEventAsyncType.CHECKOUT_CREATED\n checkout_id = graphene.Node.to_global_id(\"Checkout\", checkout.pk)\n deliveries = create_deliveries_for_subscriptions(event_type, checkout, webhooks)\n expected_payload = json.dumps({\"checkout\": {\"id\": checkout_id}})\n assert deliveries[0].payload.payload == expected_payload\n assert len(deliveries) == len(webhooks)\n assert deliveries[0].webhook == webhooks[0]\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 58, "n_words": 35, "vocab_size": 26, "complexity": 1, "nloc": 9, "token_counts": 90, "n_ast_nodes": 142, "n_identifiers": 20, "random_cut": "def test_checkout_create(checkout, subscription_checkout_created_webhook):\n webhooks = [subscription_checkout_created_webhook]\n event_type = WebhookEventAsyncType.CHECKOUT_CREATED\n checkout_id = graphene.Node.to_global_id(\"Checkout\", checkout.pk)\n deliveries = create_deliveries_for_subscriptions(event_type, checkout, webhooks)\n expected_payload = json.dumps({\"checkout\": {\"id\": checkout_id}})\n assert deliveries[0].payload.payload" }, { "id": 181886, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/builtins/zero_count.py", "file_name": "zero_count.py", "fun_name": "transform", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def transform(self, X, y=None):\n \n X = check_array(X)\n n_features = X.shape[1]\n\n X_transformed = np.copy(X)\n\n non_zero_vector = np.count_nonzero(X_transformed, axis=1)\n non_zero = np.reshape(non_zero_vector, (-1, 1))\n zero_col = np.reshape(n_features - non_zero_vector, (-1, 1))\n\n X_transformed = np.hstack((non_zero, X_transformed))\n X_transformed = np.hstack((zero_col, X_transformed))\n\n return X_transformed\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 109, "n_words": 39, "vocab_size": 26, "complexity": 1, "nloc": 10, "token_counts": 104, "n_ast_nodes": 159, "n_identifiers": 17, "random_cut": "def transform(self, X, y=None):\n \n X = check_array(X)\n n_features = X.shape[1]\n\n X_transformed = np.copy(X)\n\n non_zero_vector = np.count_nonzero(X_transformed, axis=1)\n non_zero = np.reshape(non_zero_vector, (-1, 1))\n zero_col = np.reshape(n_features - non_zero_vector, (-1, 1))\n\n X_transformed = np.hstack((non_zero, X_transformed))\n X_transformed = np.hstack((zero_col, X_transfor" }, { "id": 271572, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "to_json", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def to_json(self, **kwargs):\n \n model_config = self._updated_config()\n return json.dumps(\n model_config, default=json_utils.get_json_type, **kwargs\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 51, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 32, "n_ast_nodes": 52, "n_identifiers": 10, "random_cut": "def to_json(self, **kwargs):\n \n model_config = self._updated_config()\n return json.dumps(\n model_config, default=json_utils.get_json_type, **kwargs\n )\n" }, { "id": 318580, "commit_id": "fc695896dd8b0169001c438054a79e347053fac6", "repo": "paperless-ngx", "path": "src/documents/tests/test_classifier.py", "file_name": "test_classifier.py", "fun_name": "test_two_tags_predict_singledoc", "commit_message": "Format Python code with black", "code": "def test_two_tags_predict_singledoc(self):\n t1 = Tag.objects.create(name=\"t1\", matching_algorithm=Tag.MATCH_AUTO, pk=12)\n t2 = Tag.objects.create(name=\"t2\", matching_algorithm=Tag.MATCH_AUTO, pk=121)\n\n doc4 = Document.objects.create(\n title=\"doc1\", content=\"this is a document from c4\", checksum=\"D\"\n )\n\n doc4.tags.add(t1)\n doc4.tags.add(t2)\n self.classifier.train()\n self.assertListEqual(self.classifier.predict_tags(doc4.content), [t1.pk, t2.pk])\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 96, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 10, "token_counts": 117, "n_ast_nodes": 187, "n_identifiers": 22, "random_cut": "def test_two_tags_predict_singledoc(self):\n t1 = Tag.objects.create(name=\"t1\", matching_algorithm=Tag.MATCH_AUTO, pk=12)\n t2 = Tag.objects.create(name=\"t2\", matching_algorithm=Tag.MATCH_AUTO, pk=121)\n\n doc4 = Document.objects.create(\n title=\"doc1\", content=\"this is a document fr" }, { "id": 200737, "commit_id": "77f32b35d806fa64818b4370b9dd3298be236178", "repo": "sympy", "path": "sympy/algebras/quaternion.py", "file_name": "quaternion.py", "fun_name": "set_norm", "commit_message": "code cleanup", "code": "def set_norm(self, norm):\n \n norm = sympify(norm)\n\n if norm is not None and norm.is_number:\n if not norm.is_positive:\n raise ValueError(\"Input norm must be positive.\")\n\n numerical = all(elem.is_number is True for elem in self.args)\n if (numerical and norm**2 != self.norm()**2):\n raise ValueError(\"Incompatible value for norm.\")\n\n self._norm = norm\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 136, "n_words": 45, "vocab_size": 33, "complexity": 7, "nloc": 9, "token_counts": 78, "n_ast_nodes": 129, "n_identifiers": 12, "random_cut": "def set_norm(self, norm):\n \n norm = sympify(norm)\n\n if norm is not None and norm.is_number:\n if not norm.is_positive:\n raise ValueError(\"Input norm must be positive.\")\n\n numerical = all(elem.is_number is True for elem in self.args)\n " }, { "id": 149666, "commit_id": "ae01afdd0f3bd9f63b3b39d8bb22ae8a3a38830b", "repo": "freqtrade", "path": "freqtrade/persistence/models.py", "file_name": "models.py", "fun_name": "recalc_trade_from_orders", "commit_message": "Models:Trade: Fix open_rate updates.", "code": "def recalc_trade_from_orders(self):\n filled_orders_count = len(self.select_filled_orders(self.entry_side))\n latest_order_in_trade = self.select_order(self.entry_side, True)\n # No fills - update open_rate in case order was replaced\n if (filled_orders_count == 0 and latest_order_in_trade is not None and\n latest_order_in_trade.price is not None):\n # after ensuring there is a populated order price\n self.open_rate = latest_order_in_trade.price\n # We need at least 2 entry orders for averaging amounts and rates.\n # TODO: this condition could probably be removed\n if filled_orders_count < 2:\n self.stake_amount = self.amount * self.open_rate / self.leverage\n\n # Just in case, still recalc open trade value\n self.recalc_open_trade_value()\n return\n\n total_amount = 0.0\n total_stake = 0.0\n for o in self.orders:\n if (o.ft_is_open or\n (o.ft_order_side != self.entry_side) or\n (o.status not in NON_OPEN_EXCHANGE_STATES)):\n continue\n\n tmp_amount = o.safe_amount_after_fee\n tmp_price = o.average or o.price\n if o.filled is not None:\n tmp_amount = o.filled\n if tmp_amount > 0.0 and tmp_price is not None:\n total_amount += tmp_amount\n total_stake += tmp_price * tmp_amount\n\n if total_amount > 0:\n # Leverage not updated, as we don't allow changing leverage through DCA at the moment.\n self.open_rate = total_stake / total_amount\n self.stake_amount = total_stake / (self.leverage or 1.0)\n self.amount = total_amount\n self.fee_open_cost = self.fee_open * self.stake_amount\n self.recalc_open_trade_value()\n if self.stop_loss_pct is not None and self.open_rate is not None:\n self.adjust_stop_loss(self.open_rate, self.stop_loss_pct)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 598, "n_words": 196, "vocab_size": 112, "complexity": 17, "nloc": 32, "token_counts": 248, "n_ast_nodes": 379, "n_identifiers": 31, "random_cut": "def recalc_trade_from_orders(self):\n filled_orders_count = len(self.select_filled_orders(self.entry_side))\n latest_order_in_trade = self.select_order(self.entry_side, True)\n # No fills - update open_rate in case order was replaced\n if (filled_orders_count == 0 and latest_order_in_trade is not None and\n latest_order_in_trade.price is not None):\n # after ensuring there is a populated order price\n self.open_rate = latest_order_in_trade.price\n # We need at least 2 entry orders for averaging amounts and rates.\n # TODO: this condition could probably be removed\n if filled_or" }, { "id": 291963, "commit_id": "8f4ec89be6c2505d8a59eee44de335abe308ac9f", "repo": "core", "path": "tests/components/hue/test_light_v2.py", "file_name": "test_light_v2.py", "fun_name": "test_grouped_lights", "commit_message": "Bump aiohue to version 4.1.2 (#66609)", "code": "async def test_grouped_lights(hass, mock_bridge_v2, v2_resources_test_data):\n \n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n\n await setup_platform(hass, mock_bridge_v2, \"light\")\n\n # test if entities for hue groups are created and disabled by default\n for entity_id in (\"light.test_zone\", \"light.test_room\"):\n ent_reg = er.async_get(hass)\n entity_entry = ent_reg.async_get(entity_id)\n\n assert entity_entry\n assert entity_entry.disabled\n assert entity_entry.disabled_by is er.RegistryEntryDisabler.INTEGRATION\n # entity should not have a device assigned\n assert entity_entry.device_id is None\n\n # enable the entity\n updated_entry = ent_reg.async_update_entity(\n entity_entry.entity_id, **{\"disabled_by\": None}\n )\n assert updated_entry != entity_entry\n assert updated_entry.disabled is False\n\n # reload platform and check if entities are correctly there\n await hass.config_entries.async_forward_entry_unload(\n mock_bridge_v2.config_entry, \"light\"\n )\n await hass.config_entries.async_forward_entry_setup(\n mock_bridge_v2.config_entry, \"light\"\n )\n await hass.async_block_till_done()\n\n # test light created for hue zone\n test_entity = hass.states.get(\"light.test_zone\")\n assert test_entity is not None\n assert test_entity.attributes[\"friendly_name\"] == \"Test Zone\"\n assert test_entity.state == \"on\"\n assert test_entity.attributes[\"brightness\"] == 119\n assert test_entity.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert set(test_entity.attributes[\"supported_color_modes\"]) == {\n COLOR_MODE_COLOR_TEMP,\n COLOR_MODE_XY,\n }\n assert test_entity.attributes[\"min_mireds\"] == 153\n assert test_entity.attributes[\"max_mireds\"] == 500\n assert test_entity.attributes[\"is_hue_group\"] is True\n assert test_entity.attributes[\"hue_scenes\"] == {\"Dynamic Test Scene\"}\n assert test_entity.attributes[\"hue_type\"] == \"zone\"\n assert test_entity.attributes[\"lights\"] == {\n \"Hue light with color and color temperature 1\",\n \"Hue light with color and color temperature gradient\",\n \"Hue light with color and color temperature 2\",\n }\n\n # test light created for hue room\n test_entity = hass.states.get(\"light.test_room\")\n assert test_entity is not None\n assert test_entity.attributes[\"friendly_name\"] == \"Test Room\"\n assert test_entity.state == \"off\"\n assert test_entity.attributes[\"supported_color_modes\"] == [COLOR_MODE_COLOR_TEMP]\n assert test_entity.attributes[\"min_mireds\"] == 153\n assert test_entity.attributes[\"max_mireds\"] == 454\n assert test_entity.attributes[\"is_hue_group\"] is True\n assert test_entity.attributes[\"hue_scenes\"] == {\"Regular Test Scene\"}\n assert test_entity.attributes[\"hue_type\"] == \"room\"\n assert test_entity.attributes[\"lights\"] == {\n \"Hue on/off light\",\n \"Hue light with color temperature only\",\n }\n\n # Test calling the turn on service on a grouped light\n test_light_id = \"light.test_zone\"\n await hass.services.async_call(\n \"light\",\n \"turn_on\",\n {\n \"entity_id\": test_light_id,\n \"brightness_pct\": 100,\n \"xy_color\": (0.123, 0.123),\n \"transition\": 0.25,\n },\n blocking=True,\n )\n\n # PUT request should have been sent to ALL group lights with correct params\n assert len(mock_bridge_v2.mock_requests) == 3\n for index in range(0, 3):\n assert mock_bridge_v2.mock_requests[index][\"json\"][\"on\"][\"on\"] is True\n assert (\n mock_bridge_v2.mock_requests[index][\"json\"][\"dimming\"][\"brightness\"] == 100\n )\n assert mock_bridge_v2.mock_requests[index][\"json\"][\"color\"][\"xy\"][\"x\"] == 0.123\n assert mock_bridge_v2.mock_requests[index][\"json\"][\"color\"][\"xy\"][\"y\"] == 0.123\n assert (\n mock_bridge_v2.mock_requests[index][\"json\"][\"dynamics\"][\"duration\"] == 200\n )\n\n # Now generate update events by emitting the json we've sent as incoming events\n for index, light_id in enumerate(\n [\n \"02cba059-9c2c-4d45-97e4-4f79b1bfbaa1\",\n \"b3fe71ef-d0ef-48de-9355-d9e604377df0\",\n \"8015b17f-8336-415b-966a-b364bd082397\",\n ]\n ):\n event = {\n \"id\": light_id,\n \"type\": \"light\",\n **mock_bridge_v2.mock_requests[index][\"json\"],\n }\n mock_bridge_v2.api.emit_event(\"update\", event)\n await hass.async_block_till_done()\n await hass.async_block_till_done()\n\n # the light should now be on and have the properties we've set\n test_light = hass.states.get(test_light_id)\n assert test_light is not None\n assert test_light.state == \"on\"\n assert test_light.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert test_light.attributes[\"brightness\"] == 255\n assert test_light.attributes[\"xy_color\"] == (0.123, 0.123)\n\n # Test calling the turn off service on a grouped light.\n mock_bridge_v2.mock_requests.clear()\n await hass.services.async_call(\n \"light\",\n \"turn_off\",\n {\"entity_id\": test_light_id},\n blocking=True,\n )\n\n # PUT request should have been sent to ONLY the grouped_light resource with correct params\n assert len(mock_bridge_v2.mock_requests) == 1\n assert mock_bridge_v2.mock_requests[0][\"method\"] == \"put\"\n assert mock_bridge_v2.mock_requests[0][\"json\"][\"on\"][\"on\"] is False\n\n # Now generate update event by emitting the json we've sent as incoming event\n event = {\n \"id\": \"f2416154-9607-43ab-a684-4453108a200e\",\n \"type\": \"grouped_light\",\n **mock_bridge_v2.mock_requests[0][\"json\"],\n }\n mock_bridge_v2.api.emit_event(\"update\", event)\n mock_bridge_v2.api.emit_event(\"update\", mock_bridge_v2.mock_requests[0][\"json\"])\n await hass.async_block_till_done()\n\n # the light should now be off\n test_light = hass.states.get(test_light_id)\n assert test_light is not None\n assert test_light.state == \"off\"\n\n # Test calling the turn off service on a grouped light with transition\n mock_bridge_v2.mock_requests.clear()\n test_light_id = \"light.test_zone\"\n await hass.services.async_call(\n \"light\",\n \"turn_off\",\n {\n \"entity_id\": test_light_id,\n \"transition\": 0.25,\n },\n blocking=True,\n )\n\n # PUT request should have been sent to ALL group lights with correct params\n assert len(mock_bridge_v2.mock_requests) == 3\n for index in range(0, 3):\n assert mock_bridge_v2.mock_requests[index][\"json\"][\"on\"][\"on\"] is False\n assert (\n mock_bridge_v2.mock_requests[index][\"json\"][\"dynamics\"][\"duration\"] == 200\n )\n\n # Test sending short flash effect to a grouped light\n mock_bridge_v2.mock_requests.clear()\n test_light_id = \"light.test_zone\"\n await hass.services.async_call(\n \"light\",\n \"turn_on\",\n {\n \"entity_id\": test_light_id,\n \"flash\": \"short\",\n },\n blocking=True,\n )\n\n # PUT request should have been sent to ALL group lights with correct params\n assert len(mock_bridge_v2.mock_requests) == 3\n for index in range(0, 3):\n assert (\n mock_bridge_v2.mock_requests[index][\"json\"][\"identify\"][\"action\"]\n == \"identify\"\n )\n\n # Test sending long flash effect to a grouped light\n mock_bridge_v2.mock_requests.clear()\n test_light_id = \"light.test_zone\"\n await hass.services.async_call(\n \"light\",\n \"turn_on\",\n {\n \"entity_id\": test_light_id,\n \"flash\": \"long\",\n },\n blocking=True,\n )\n\n # PUT request should have been sent to ALL group lights with correct params\n assert len(mock_bridge_v2.mock_requests) == 3\n for index in range(0, 3):\n assert (\n mock_bridge_v2.mock_requests[index][\"json\"][\"alert\"][\"action\"] == \"breathe\"\n )\n\n # Test sending flash effect in turn_off call\n mock_bridge_v2.mock_requests.clear()\n test_light_id = \"light.test_zone\"\n await hass.services.async_call(\n \"light\",\n \"turn_off\",\n {\n \"entity_id\": test_light_id,\n \"flash\": \"short\",\n },\n blocking=True,\n )\n\n # PUT request should have been sent to ALL group lights with correct params\n assert len(mock_bridge_v2.mock_requests) == 3\n for index in range(0, 3):\n assert (\n mock_bridge_v2.mock_requests[index][\"json\"][\"identify\"][\"action\"]\n == \"identify\"\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 1868, "n_words": 726, "vocab_size": 231, "complexity": 8, "nloc": 188, "token_counts": 1123, "n_ast_nodes": 1901, "n_identifiers": 46, "random_cut": "async def test_grouped_lights(hass, mock_bridge_v2, v2_resources_test_data):\n \n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n\n await setup_platform(hass, mock_bridge_v2, \"light\")\n\n # test if entities for hue groups are created and disabled by default\n for entity_id in (\"light.test_zone\", \"light.test_room\"):\n ent_reg = er.async_get(hass)\n entity_entry = ent_reg.async_get(entity_id)\n\n assert entity_entry\n assert entity_entry.disabled\n assert entity_entry.disabled_by is er.RegistryEntryDisabler.INTEGRATION\n # entity should not have a device assigned\n assert entity_entry.device_id is None\n\n # enable the entity\n updated_entry = ent_reg.async_update_entity(\n entity_entry.entity_id, **{\"disabled_by\": None}\n )\n assert updated_entry != entity_entry\n assert updated_entry.disabled is False\n\n # reload platform and check if entities are correctly there\n await hass.config_entries.async_forward_entry_unload(\n mock_bridge_v2.config_entry, \"light\"\n )\n await hass.config_entries.async_forward_entry_setup(\n mock_bridge_v2.config_entry, \"light\"\n )\n await hass.async_block_till_done()\n\n # test light created for hue zone\n test_entity = hass.states.get(\"light.test_zone\")\n assert test_entity is not None\n assert test_entity.attributes[\"friendly_name\"] == \"Test Zone\"\n assert test_entity.state == \"on\"\n assert test_entity.attributes[\"brightness\"] == 119\n assert test_entity.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert set(test_entity.attributes[\"supported_color_modes\"]) == {\n COLOR_MODE_COLOR_TEMP,\n COLOR_MODE_XY,\n }\n assert test_entity.attributes[\"min_mireds\"] == 153\n assert test_entity.attributes[\"max_mireds\"] == 500\n assert test_entity.attributes[\"is_hue_group\"] is True\n assert test_entity.attributes[\"hue_scenes\"] == {\"Dynamic Test Scene\"}\n assert test_entity.attributes[\"hue_type\"] == \"zone\"\n assert test_entity.attributes[\"lights\"] == {\n \"Hue light with color and color temperature 1\",\n \"Hue light with color and color temperature gradient\",\n \"Hue light with color and color temperature 2\",\n }\n\n # test light created for hue room\n test_entity = hass.states.get(\"light.test_room\")\n assert test_entity is not None\n assert test_entity.attributes[\"friendly_name\"] == \"Test Room\"\n assert test_entity.state == \"off\"\n assert test_entity.attributes[\"supported_color_modes\"] == [COLOR_MODE_COLOR_TEMP]\n assert test_entity.attributes[\"min_mireds\"] == 153\n assert test_entity.attributes[\"max_mireds\"] == 454\n assert test_entity.attributes[\"is_hue_group\"] is True\n assert test_entity.attributes[\"hue_scenes\"] == {\"Regular Test Scene\"}\n assert test_entity.attributes[\"hue_type\"] == \"room\"\n assert test_entity.attributes[\"lights\"] == {\n \"Hue on/off light\",\n \"Hue light with color temperature only\",\n }\n\n # Test calling the turn on service on a grouped light\n test_light_id = \"light.test_zone\"\n await hass.services.async_call(\n \"light\",\n \"turn_on\",\n {\n \"entity_id\": test_light_id,\n \"brightness_pct\": 100,\n \"xy_color\": (0.123, 0.123),\n \"transition\": 0.25,\n },\n blocking=True,\n )\n\n # PUT request should have been sent to ALL group lights with correct params\n assert len(mock_bridge_v2.mock_requests) == 3\n for index in range(0, 3):\n assert mock_bridge_v2.mock_requests[index][\"json\"][\"on\"][\"on\"] is True\n assert (\n mock_bridge_v2.mock_requests[index][\"json\"][\"dimming\"][\"brightness\"] == 100\n )\n assert mock_bridge_v2.mock_requests[index][\"json\"][\"color\"][\"xy\"][\"x\"] == 0.123\n assert mock_bridge_v2.mock_requests[index][\"json\"][\"color\"][\"xy\"][\"y\"] == 0.123\n assert (\n mock_bridge_v2.mock_requests[index][\"json\"][\"dynamics\"][\"duration\"] == 200\n )\n\n # Now generate update events by emitting the json we've sent as incoming events\n for index, light_id in enumerate(\n [\n \"02cba059-9c2c-4d45-97e4-4f79b1bfbaa1\",\n \"b3fe71ef-d0ef-48de-9355-d9e604377df0\",\n \"8015b17f-8336-415b-966a-b364bd082397\",\n ]\n ):\n event = {\n \"id\": light_id,\n \"type\": \"light\",\n **mock_bridge_v2.mock_requests[index][\"json\"],\n }\n mock_bridge_v2.api.emit_event(\"update\", event)\n await hass.async_block_till_done()\n await hass.async_block_till_done()\n\n # the light should now be on and have the properties we've set\n test_light = hass.states.get(test_light_id)\n assert test_light is not None\n assert test_light.state == \"on\"\n assert test_light.attributes[\"color_mode\"] == COLOR" }, { "id": 252887, "commit_id": "c69239bb90c55993326c324908ac78cc2a174e44", "repo": "mitmproxy", "path": "test/mitmproxy/addons/test_termlog.py", "file_name": "test_termlog.py", "fun_name": "test_output", "commit_message": "switch to stdlib logging\n\nmitmproxy previously used a homegrown logging mechanism based around\n`mitmproxy.ctx.log` and the `add_log` hook. This worked well for everything\nwe control, but does not work outside the mitmproxy universe.\nFor now we have simply ignored logging in e.g. tornado or h2, but with the\nupcoming introduction of mitmproxy_wireguard we now have a dependency\non some Rust/PyO3 code for which we definitely want logs, but which also\ncannot easily be changed to use our homegrown logging (PyO3 does the heavy\nlifting to add interoperability with stdlib logging). Long story short,\nwe want to introduce a log handler for stdlib logging.\n\nNow there are two ways how such a handler could operate:\n\n 1. We could build a handler that forwards all stdlib log events\n into our homegrown mechanism.\n 2. We embrace stdlib's logging as the correct way to do things,\n and get rid of our homegrown stuff.\n\nThis PR follows the second approach by removing the `add_log` hook and\nrewriting the `TermLog` and `EventStore` addons to listen for stdlib log records.\nThis means that all `mitmproxy.ctx.log.info` events are now simply `logging.info` etc.\n\nOne upside of this approach is that many parts of the codebase now don't depend\non the existence of `mitmproxy.ctx` and we can use off-the-shelf things like pytest's\n`caplog`. We can also now better colorize log output and/or add timestamps.", "code": "def test_output(capsys):\n logging.getLogger().setLevel(logging.DEBUG)\n t = termlog.TermLog()\n with taddons.context(t) as tctx:\n tctx.options.termlog_verbosity = \"info\"\n tctx.configure(t)\n logging.info(\"one\")\n logging.debug(\"two\")\n logging.warning(\"three\")\n logging.error(\"four\")\n out, err = capsys.readouterr()\n assert \"one\" in out\n assert \"two\" not in out\n assert \"three\" in out\n assert \"four\" in out\n t.done()\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 108, "n_words": 40, "vocab_size": 29, "complexity": 1, "nloc": 16, "token_counts": 102, "n_ast_nodes": 188, "n_identifiers": 23, "random_cut": "def test_output(capsys):\n logging.getLogger().setLevel(logging.DEBUG)\n t = termlog.TermLog()\n with taddons.context(t) as tctx:\n tctx.options.termlog_verbosity = \"info\"\n tctx.configure(t)\n logging.info(\"one\")\n logging.debug(\"two\")\n logging.warning(\"three\")\n logging.error(\"four\")\n out, err = capsys.readouterr()\n assert \"one\" in out\n assert \"two\" not in out\n assert \"three\" in out\n asse" }, { "id": 38973, "commit_id": "e46d808a1b6cb7e04cb2806e38547b1e3e50c25a", "repo": "DeepSpeed", "path": "deepspeed/runtime/engine.py", "file_name": "engine.py", "fun_name": "_save_moe_checkpoint", "commit_message": "MoE inference + PR-MoE model support (#1705)\n\nCo-authored-by: Reza Yazdani \r\nCo-authored-by: Zhewei Yao \r\nCo-authored-by: Ammar Ahmad Awan \r\nCo-authored-by: Jeff Rasley \r\nCo-authored-by: Samyam Rajbhandari ", "code": "def _save_moe_checkpoint(self, save_dir, tag, client_state={}):\n save_path = self._get_ckpt_name(save_dir, tag)\n # A hack to save the checkpointing directory. Pipeline parallelism overrides\n # module_state_dict() and uses this path to save the model. module_state_dict()\n # then instead just returns None.\n\n # Using layer_#_export_# to save the model's expert state_dict\n moe_layer_id = 0\n for n_module, module in self.module.named_modules():\n if isinstance(module, MoE): # and torch.distributed.get_rank() == 0:\n group_name = module.expert_group_name\n num_local_experts = module.num_local_experts\n expp_rank = groups.get_expert_parallel_rank(group_name)\n exp_dp_rank = groups.get_expert_data_parallel_rank(group_name)\n # print(expp_rank, exp_dp_rank)\n if exp_dp_rank != 0:\n moe_layer_id += 1\n continue\n\n # get all moe parameters\n moe_state_dict = {}\n for n, p in module.state_dict().items():\n if 'expert' in n and 'moe.gate.wg.weight' not in n:\n moe_state_dict[n_module + '.' + n] = p\n moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'\n # print(moe_state_dict.keys()) # until now, everything is fine. So the bug happens at next few lines\n # Reorder the moe name rank, so that each checkpoint only has one expert\n experts_state_dict = defaultdict(dict)\n for key in list(moe_state_dict.keys()):\n m = re.match(f\".*{moe_str_prefix}([0-9]+).*\", key)\n\n local_expert_id = None\n if not m:\n logger.warn(f'No expert found in key {key}.')\n else:\n local_expert_id = m.group(1)\n\n global_expert_id = expp_rank * \\\n num_local_experts + int(local_expert_id)\n expert_key = key.replace(f'{moe_str_prefix}{local_expert_id}',\n f'{moe_str_prefix}{global_expert_id}')\n experts_state_dict[str(\n global_expert_id)][expert_key] = moe_state_dict.pop(key)\n\n # let save the moe parameters\n for global_expert_id, expert_state_dict in experts_state_dict.items():\n # save the moe parameters\n moe_save_path = self._get_expert_ckpt_name(\n save_dir,\n moe_layer_id,\n global_expert_id,\n tag)\n torch.save(expert_state_dict, moe_save_path)\n moe_layer_id += 1\n\n self._curr_ckpt_path = os.path.join(save_dir, tag)\n\n largest_group_name = groups.get_max_expert_size_name()\n expp_rank = groups.get_expert_parallel_rank(largest_group_name)\n exp_dp_rank = groups.get_expert_data_parallel_rank(largest_group_name)\n\n # In the case of E + D parallelism, only the\n # first expert parallel group should save the expert weights\n # since each expert parallel group is a copy of the model's experts\n if exp_dp_rank != 0:\n return\n\n # Save optimizer states. They are different across each exp parallel rank.\n optimizer_state = {\n 'optimizer':\n self.optimizer.state_dict()\n if self.optimizer and not self.zero_optimization() else None\n }\n torch.save(optimizer_state,\n self._get_optimizer_ckpt_name(save_dir,\n tag,\n expp_rank))\n\n # get non-moe parameters\n model_state_dict = self._get_non_moe_state_dict(self.module_state_dict())\n\n if expp_rank == 0:\n # TODO: update num experts info,.. in checkpoint\n state = {\n 'module':\n model_state_dict,\n 'lr_scheduler':\n self.lr_scheduler.state_dict()\n if self.lr_scheduler is not None else None,\n 'sparse_tensor_module_names':\n self.sparse_tensor_module_names,\n 'skipped_steps':\n self.skipped_steps,\n 'global_steps':\n self.global_steps,\n 'global_samples':\n self.global_samples,\n 'dp_world_size':\n self.dp_world_size,\n 'mp_world_size':\n self.mp_world_size,\n 'num_experts':\n self.num_experts\n }\n state.update(client_state)\n logger.info(f'Saving model checkpoint: {save_path}')\n torch.save(state, save_path)\n self._curr_save_path = None\n", "url": "https://github.com/microsoft/DeepSpeed.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1791, "n_words": 357, "vocab_size": 218, "complexity": 15, "nloc": 81, "token_counts": 451, "n_ast_nodes": 767, "n_identifiers": 76, "random_cut": "def _save_moe_checkpoint(self, save_dir, tag, client_state={}):\n save_path = self._get_ckpt_name(save_dir, tag)\n # A hack to save the checkpointing directory. Pipeline parallelism overrides\n # module_state_dict() and uses this path to save the model. module_state_dict()\n # then instead just returns None.\n\n # Using layer_#_export_# to save the model's expert state_dict\n moe_layer_id = 0\n for n_module, module in self.module.named_modules():\n if isinstance(module, MoE): # and torch.distributed.get_rank() == 0:\n group_name = module.expert_group_name\n num_local_experts = module.num_local_experts\n expp_rank = groups.get_expert_parallel_rank(group_name)\n exp_dp_rank = groups.get_expert_data_parallel_rank(group_name)\n # print(expp_rank, exp_dp_rank)\n if exp_dp_rank != 0:\n moe_layer_id += 1\n continue\n\n # get all moe parameters\n moe_state_dict = {}\n for n, p in module.state_dict().items():\n if 'expert' in n and 'moe.gate.wg.weight' not in n:\n moe_state_dict[n_module + '.' + n] = p\n moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.'\n # print(moe_state_dict.keys()) # until now, everything is fine. So the bug happens at next few lines\n # Reorder the moe name rank, so that each checkpoint only has one expert\n experts_state_dict = defaultdict(dict)\n for key in list(moe_state_dict.keys()):\n m = re.match(f\".*{moe_str_prefix}([0-9]+).*\", key)\n\n local_expert_id = None\n if not m:\n logger.warn(f'No expert found in key {key}.')\n else:\n local_expert_id = m.group(1)\n\n global_expert_id = expp_rank * \\\n num_local_experts + int(local_expert_id)\n expert_key = key.replace(f'{moe_str_prefix}{local_expert_id}',\n f'{moe_str_prefix}{global_expert_id}')\n experts_state_dict[str(\n global_expert_id)][expert_key] = moe_state_dict.pop(key)\n\n # let save the moe parameters\n for global_expert_id, expert_state_dict in experts_state_dict.items():\n # save the moe parameters\n moe_save_path = self._get_expert_ckpt_name(\n save_dir,\n moe_layer_id,\n global_expert_id,\n tag)\n torch.save(expert_state_dict, moe_save_path)\n moe_layer_id += 1\n\n self._curr_ckpt_path = os.path.join(save_dir, tag)\n\n largest_group_name = groups.get_max_expert_size_name()\n expp_rank = groups.get_expert_parallel_rank(largest_group_name)\n exp_dp_rank = groups.get_expert_data_parallel_rank(largest_group_name)\n\n # In the case of E + D parallelism, only the\n # first expert parallel group should save the expert weights\n # since each expert parallel group is a copy of the model's experts\n if exp_dp_rank != 0:\n return\n\n # Save optimizer states. They are different across each exp parallel rank.\n optimizer_state = {\n 'optimizer':\n self.optimizer.state_dict()\n if self.optimizer and not self.zero_optimization() else None\n }\n torch.save(optimizer_state,\n self._get_optimizer_ckpt_name(save_dir,\n tag,\n expp_rank))\n\n # get non-moe parameters\n model_state_dict = self._get_non_moe_state_dict(self.module_state_dict())\n\n if ex" }, { "id": 161019, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg2mel/rnn_decoder_mol.py", "file_name": "rnn_decoder_mol.py", "fun_name": "attend", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def attend(self, decoder_input):\n cell_input = torch.cat((decoder_input, self.attention_context), -1)\n self.attention_hidden, self.attention_cell = self.attention_rnn(\n cell_input, (self.attention_hidden, self.attention_cell))\n self.attention_context, attention_weights = self.attention_layer(\n self.attention_hidden, self.memory, None, self.mask)\n \n decoder_rnn_input = torch.cat(\n (self.attention_hidden, self.attention_context), -1)\n\n return decoder_rnn_input, self.attention_context, attention_weights\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 108, "n_words": 33, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 99, "n_ast_nodes": 144, "n_identifiers": 15, "random_cut": "def attend(self, decoder_input):\n cell_input = torch.cat((decoder_input, self.atte" }, { "id": 161806, "commit_id": "878632c354f10fc5b20ec5ffb4d7e070367de719", "repo": "rich", "path": "tests/test_inspect.py", "file_name": "test_inspect.py", "fun_name": "test_inspect_text", "commit_message": "Fix test_inspect_text to support Python 3.11", "code": "def test_inspect_text():\n num_attributes = 34 if sys.version_info >= (3, 11) else 33\n expected = (\n \"╭──────────────── ─────────────────╮\\n\"\n \"│ str(object='') -> str │\\n\"\n \"│ str(bytes_or_buffer[, encoding[, errors]]) -> │\\n\"\n \"│ str │\\n\"\n \"│ │\\n\"\n f\"│ {num_attributes} attribute(s) not shown. Run │\\n\"\n \"│ inspect(inspect) for options. │\\n\"\n \"╰────────────────────────────────────────────────╯\\n\"\n )\n print(repr(expected))\n assert expected == render(\"Hello\")\n\n\n@skip_py36\n@skip_py37\n@skip_pypy3", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "@skip_py36\n@skip_py37\n@skip_pypy3", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 273, "n_words": 57, "vocab_size": 44, "complexity": 2, "nloc": 14, "token_counts": 46, "n_ast_nodes": 108, "n_identifiers": 11, "random_cut": "def test_inspect_text():\n num_attributes = 34 if sys.version_info >= (3, 11) else 33\n expected = (\n \"╭──────────────── ─────────────────╮\\n\"\n \"│ str(object='') -> str │\\n\"\n \"│ str(bytes_or_buffer[, encoding[, errors]]) -> │\\n\"\n " }, { "id": 177976, "commit_id": "bb6c48b5a4203c97db1747419b25bd39c996b958", "repo": "label-studio", "path": "label_studio/io_storages/localfiles/models.py", "file_name": "models.py", "fun_name": "get_data", "commit_message": "fix: Quote relative path (#2633)", "code": "def get_data(self, key):\n path = Path(key)\n if self.use_blob_urls:\n # include self-hosted links pointed to local resources via\n # {settings.HOSTNAME}/data/local-files?d=\n document_root = Path(settings.LOCAL_FILES_DOCUMENT_ROOT)\n relative_path = str(path.relative_to(document_root))\n return {settings.DATA_UNDEFINED_NAME: f'{settings.HOSTNAME}/data/local-files/?d={quote(str(relative_path))}'}\n\n try:\n with open(path, encoding='utf8') as f:\n value = json.load(f)\n except (UnicodeDecodeError, json.decoder.JSONDecodeError):\n raise ValueError(\n f\"Can\\'t import JSON-formatted tasks from {key}. If you're trying to import binary objects, \"\n f\"perhaps you've forgot to enable \\\"Treat every bucket object as a source file\\\" option?\")\n\n if not isinstance(value, dict):\n raise ValueError(f\"Error on key {key}: For {self.__class__.__name__} your JSON file must be a dictionary with one task.\") # noqa\n return value\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 270, "n_words": 95, "vocab_size": 80, "complexity": 4, "nloc": 16, "token_counts": 104, "n_ast_nodes": 216, "n_identifiers": 29, "random_cut": "def get_data(self, key):\n path = Path(key)\n if self.use_blob_urls:\n # include self-hosted links pointed to local resources via\n # {settings.HOSTNAME}/data/local-files?d=\n document_root = Path(settings.LOCAL_FILES_DOCUMENT_ROOT)\n relative_path = str(path.relative_to(document_root))\n return {settings.DATA_UNDEFINED_NAME: f'{settings.HOSTNAME}/data/local-files/?d={quote(str(relative_path))}'}\n\n try:\n wi" }, { "id": 148608, "commit_id": "0a52d79208fa7514749c78a5cda8289173f7243f", "repo": "freqtrade", "path": "tests/rpc/test_rpc_telegram.py", "file_name": "test_rpc_telegram.py", "fun_name": "test_forcesell_handle_invalid", "commit_message": "Update forcesell to work as forceexit", "code": "def test_forcesell_handle_invalid(default_conf, update, mocker) -> None:\n mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price',\n return_value=15000.0)\n\n telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf)\n patch_get_signal(freqtradebot)\n\n # Trader is not running\n freqtradebot.state = State.STOPPED\n # /forcesell 1\n context = MagicMock()\n context.args = [\"1\"]\n telegram._forceexit(update=update, context=context)\n assert msg_mock.call_count == 1\n assert 'not running' in msg_mock.call_args_list[0][0][0]\n\n # No argument\n msg_mock.reset_mock()\n freqtradebot.state = State.RUNNING\n context = MagicMock()\n context.args = []\n telegram._forceexit(update=update, context=context)\n assert msg_mock.call_count == 1\n assert \"You must specify a trade-id or 'all'.\" in msg_mock.call_args_list[0][0][0]\n\n # Invalid argument\n msg_mock.reset_mock()\n freqtradebot.state = State.RUNNING\n # /forcesell 123456\n context = MagicMock()\n context.args = [\"123456\"]\n telegram._forceexit(update=update, context=context)\n assert msg_mock.call_count == 1\n assert 'invalid argument' in msg_mock.call_args_list[0][0][0]\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 198, "n_words": 99, "vocab_size": 54, "complexity": 1, "nloc": 25, "token_counts": 204, "n_ast_nodes": 326, "n_identifiers": 22, "random_cut": "def test_forcesell_handle_invalid(default_conf, update, mocker) -> None:\n mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price',\n return_value=15000.0)\n\n telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf)\n patch_get_signal(freqtradebot)\n\n # Trader is not running\n freqtradebot.state = State.STOPPED\n # /forcesell 1\n context = MagicMock()\n context.args = [\"1\"]\n telegram._forceexit(update=update, context=context)\n assert msg_mock.call_count == 1\n assert 'not running' in msg_mock.call_args_list[0][0][0]\n\n # No argument\n msg_mock.reset_mock()\n freqtradebot.state = State.RUNNING\n context = MagicMock()\n context.args = []\n telegram._forceexit(update=update, context=context)\n assert msg_mock.call_count == 1\n assert \"You must specify a trade-id or 'all'.\" in msg_mock.call_args_list[0][0][0]\n\n # Invalid argument\n msg_mock.reset" }, { "id": 143650, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/examples/env/two_step_game.py", "file_name": "two_step_game.py", "fun_name": "_obs", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _obs(self):\n if self.with_state:\n return {\n self.agent_1: {\"obs\": self.agent_1_obs(), ENV_STATE: self.state},\n self.agent_2: {\"obs\": self.agent_2_obs(), ENV_STATE: self.state},\n }\n else:\n return {self.agent_1: self.agent_1_obs(), self.agent_2: self.agent_2_obs()}\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 99, "n_words": 23, "vocab_size": 17, "complexity": 2, "nloc": 8, "token_counts": 77, "n_ast_nodes": 122, "n_identifiers": 9, "random_cut": "def _obs(self):\n if self.with_state:\n return {\n self.agent_1: {\"obs\": self.agent_1_obs(), ENV_STATE: self.state},\n " }, { "id": 188854, "commit_id": "2e56a2de31e44251e4ddf10943ea1a018ab13254", "repo": "calibre", "path": "src/calibre/ebooks/oeb/polish/container.py", "file_name": "container.py", "fun_name": "has_name_and_is_not_empty", "commit_message": "Fix #1956779 [Cant read epub file written using ebooklib](https://bugs.launchpad.net/calibre/+bug/1956779)", "code": "def has_name_and_is_not_empty(self, name):\n if not self.has_name(name):\n return False\n try:\n return os.path.getsize(self.name_path_map[name]) > 0\n except OSError:\n return False\n", "url": "https://github.com/kovidgoyal/calibre.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 70, "n_words": 17, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 41, "n_ast_nodes": 65, "n_identifiers": 9, "random_cut": "def has_name_and_is_not_empty(self, name):\n if not self.has_name(name):\n return False\n try:\n return os.path.getsize(self.name_path_map[name]) > 0\n except OSError:\n" }, { "id": 285052, "commit_id": "b6f52429d1206ec67302530c8e788bc417a921b3", "repo": "OpenBBTerminal", "path": "openbb_terminal/helper_funcs.py", "file_name": "helper_funcs.py", "fun_name": "get_flair", "commit_message": "allow user to set flag (#2074)", "code": "def get_flair() -> str:\n \n flairs = {\n \":openbb\": \"(🦋)\",\n \":rocket\": \"(🚀)\",\n \":diamond\": \"(💎)\",\n \":stars\": \"(✨)\",\n \":baseball\": \"(⚾)\",\n \":boat\": \"(⛵)\",\n \":phone\": \"(☎)\",\n \":mercury\": \"(☿)\",\n \":hidden\": \"\",\n \":sun\": \"(☼)\",\n \":moon\": \"(☾)\",\n \":nuke\": \"(☢)\",\n \":hazard\": \"(☣)\",\n \":tunder\": \"(☈)\",\n \":king\": \"(♔)\",\n \":queen\": \"(♕)\",\n \":knight\": \"(♘)\",\n \":recycle\": \"(♻)\",\n \":scales\": \"(⚖)\",\n \":ball\": \"(⚽)\",\n \":golf\": \"(⛳)\",\n \":piece\": \"(☮)\",\n \":yy\": \"(☯)\",\n }\n\n flair = (\n flairs[str(obbff.USE_FLAIR)]\n if str(obbff.USE_FLAIR) in flairs\n else str(obbff.USE_FLAIR)\n )\n if obbff.USE_DATETIME and get_user_timezone_or_invalid() != \"INVALID\":\n dtime = datetime.now(pytz.timezone(get_user_timezone())).strftime(\n \"%Y %b %d, %H:%M\"\n )\n\n # if there is no flair, don't add an extra space after the time\n if flair == \"\":\n return f\"{dtime}\"\n\n return f\"{dtime} {flair}\"\n\n return flair\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 364, "n_words": 104, "vocab_size": 92, "complexity": 5, "nloc": 40, "token_counts": 176, "n_ast_nodes": 364, "n_identifiers": 15, "random_cut": "def get_flair() -> str:\n \n flairs = {\n \":openbb\": \"(🦋)\",\n \":rocket\": \"(🚀)\",\n \":diamond\": \"(💎)\",\n \":stars\": \"(✨)\",\n \":baseball\": \"(⚾)\",\n \":boat\": \"(⛵)\",\n \":phone\": \"(☎)\",\n \":mercury\": \"(☿)\",\n \":hidden\": \"\",\n \":sun\": \"(☼)\",\n \":moon\": \"(☾)\",\n \":nuke\": \"(☢)\",\n \":hazard\": \"(☣)\",\n \":tunder\": \"(☈)\",\n \":king\": \"(♔)\",\n \":queen\": \"(♕)\",\n \":knight\": \"(♘)\",\n \":recycle\": \"(♻)\",\n \":scales\": \"(⚖)\",\n \":ball\": \"(⚽)\",\n \":golf\": \"(⛳)\",\n \":piece\": \"(☮)\",\n \":yy\": \"(☯)\",\n }\n\n flair = (\n flairs[str(obbff.USE_FLAIR)]\n if str(obbff.USE_FLAIR) in flairs\n else str(obbff.USE_FLAIR)\n )\n if obbff.USE_DATETIME and get_user_timezone_or_invalid() != \"INVALID\":\n dtime = datetime.now(pytz.timezone(get_user_timezone())).strftime(\n \"%Y %b %d, %H:%M\"\n )\n\n # if there is no" }, { "id": 87820, "commit_id": "4890581d67a73e29dc9e944246c4e468226ba1dd", "repo": "sentry", "path": "src/sentry/templatetags/sentry_avatars.py", "file_name": "sentry_avatars.py", "fun_name": "profile_photo_url", "commit_message": "fix(issues): Make system URL prefix handling more consistent (#40968)\n\nfixing up PR [here](https://github.com/getsentry/sentry/pull/39597) so\r\nthat tests pass\r\nthis makes system url prefix handling more consistent by using the sentry.http.utils.absolute_uri function\r\n\r\nCo-authored-by: Ville Laitila ", "code": "def profile_photo_url(context, user_id, size=None):\n try:\n avatar = UserAvatar.objects.get_from_cache(user=user_id)\n except UserAvatar.DoesNotExist:\n return\n url = reverse(\"sentry-user-avatar-url\", args=[avatar.ident])\n if size:\n url += \"?\" + urlencode({\"s\": size})\n return absolute_uri(url)\n\n\n# Don't use this in any situations where you're rendering more\n# than 1-2 avatars. It will make a request for every user!\n@register.simple_tag(takes_context=True)", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@register.simple_tag(takes_context=True)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 81, "n_words": 49, "vocab_size": 45, "complexity": 3, "nloc": 9, "token_counts": 65, "n_ast_nodes": 125, "n_identifiers": 19, "random_cut": "def profile_photo_url(context, user_id, size=None):\n try:\n avatar = UserAvatar.objects.get_from_cache(user=user_id)\n except UserAvatar.DoesNotExist:\n return\n url = reverse(\"sentry-user-avatar-url\", args=[avatar.ident])\n if size:\n url += \"?\" + urlencode({\"s\": size})\n return absolute_uri(url)\n\n\n# Don't use this in any situations where you're renderi" }, { "id": 9512, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/training/loss.py", "file_name": "loss.py", "fun_name": "G_logistic", "commit_message": "initialize ostec", "code": "def G_logistic(G, D, opt, training_set, minibatch_size):\n _ = opt\n latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])\n labels = training_set.get_random_labels_tf(minibatch_size)\n fake_images_out = G.get_output_for(latents, labels, is_training=True)\n fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)\n loss = -tf.nn.softplus(fake_scores_out) # log(1-sigmoid(fake_scores_out)) # pylint: disable=invalid-unary-operand-type\n return loss, None\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 58, "n_words": 38, "vocab_size": 30, "complexity": 1, "nloc": 8, "token_counts": 88, "n_ast_nodes": 132, "n_identifiers": 20, "random_cut": "def G_logistic(G, D, opt, training_set, minibatch_size):\n _ = opt\n latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])\n labels = training_set.get_random_labels_tf(minibatch_size)\n fake_images_out = G.get_output_for(latents, labels, is_training=True)\n fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)\n loss = -tf.nn.softplus(fake_scores_out) # log(1-sigmoid(fake_scores_out)) # pylint: disable=invalid-unary-operand-type\n return loss, No" }, { "id": 114644, "commit_id": "ce99adc96da2e6f98f722f9e3733af00204b26f3", "repo": "mindsdb", "path": "tests/integration_tests/flows/test_http.py", "file_name": "test_http.py", "fun_name": "test_06_sql_create_database", "commit_message": "http test", "code": "def test_06_sql_create_database(self):\n \n print(f'\\nExecuting {inspect.stack()[0].function}')\n created_db_names = []\n for db_type, db_creds in self.sql_db_creds.items():\n queries = [\n {\n 'create': 'CREATE DATASOURCE',\n 'drop': 'DROP DATASOURCE'\n }, {\n 'create': 'CREATE DATABASE',\n 'drop': 'DROP DATABASE'\n }, {\n 'create': 'CREATE DATABASE',\n 'drop': None\n }\n ]\n for query in queries:\n create_query = query['create']\n drop_query = query['drop']\n db_name = db_type.upper()\n created_db_names.append(db_name)\n with self.subTest(msg=f'{db_type}'):\n print(f\"\\nExecuting {self._testMethodName} ({__name__}.{self.__class__.__name__}) [{db_name}]\")\n query = f\n self.sql_via_http(query, RESPONSE_TYPE.OK)\n self.assertTrue(db_name in self.show_databases())\n if drop_query is not None:\n self.sql_via_http(f'{drop_query} {db_name}', RESPONSE_TYPE.OK)\n self.assertTrue(db_name.upper() not in self.show_databases())\n\n for query in ['show databases', 'show datasources']:\n resp = self.sql_via_http(query, RESPONSE_TYPE.TABLE)\n db_names = [x[0] for x in resp['data']]\n for name in created_db_names:\n self.assertTrue(name in db_names)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 610, "n_words": 104, "vocab_size": 68, "complexity": 7, "nloc": 38, "token_counts": 216, "n_ast_nodes": 450, "n_identifiers": 35, "random_cut": "def test_06_sql_create_database(self):\n \n print(f'\\nExecuting {inspect.stack()[0].function}')\n created_db_names = []\n for db_type, db_creds in self.sql_db_creds.items():\n queries = [\n {\n 'create': 'CREATE DATASOURCE',\n 'drop': 'DROP DATASOURCE'\n }, {\n 'create': 'CREATE DATABASE',\n 'drop': 'DROP" }, { "id": 94707, "commit_id": "cdca9910c03df835f35dd02a09120fdbd96df22b", "repo": "sentry", "path": "src/sentry/utils/performance_issues/performance_detection.py", "file_name": "performance_detection.py", "fun_name": "get_default_detection_settings", "commit_message": "feat(perf): Add detection for render-blocking asset performance issues (#37826)\n\n* feat(perf): Add detection for render-blocking asset performance issues\r\n\r\nTag transactions that have slow asset load spans before a slow FCP as having\r\nrender-blocking assets. The thresholds are configurable, but currently we're\r\nlooking for transactions with an FCP between 2s and 10s, where an asset load\r\ntakes up at least 25% of that time.\r\n\r\nThe thresholds will be tuned before we start generating\r\nactual Performance Issues from this data - tagging the transactions will let us\r\nsee what we're detecting it and validate/tune it before it becomes visible to\r\nusers.\r\n\r\nThis detector's use of event properties is a little awkward given the current\r\n`PerformanceDetector` interface, but I thought it would be better to get the\r\nrest of our planned detectors in before we refactor too much.\r\n\r\nFixes PERF-1677", "code": "def get_default_detection_settings():\n return {\n DetectorType.DUPLICATE_SPANS: [\n {\n \"count\": 5,\n \"cumulative_duration\": 500.0, # ms\n \"allowed_span_ops\": [\"db\", \"http\"],\n }\n ],\n DetectorType.DUPLICATE_SPANS_HASH: [\n {\n \"count\": 5,\n \"cumulative_duration\": 500.0, # ms\n \"allowed_span_ops\": [\"http\"],\n },\n ],\n DetectorType.SEQUENTIAL_SLOW_SPANS: [\n {\n \"count\": 3,\n \"cumulative_duration\": 1200.0, # ms\n \"allowed_span_ops\": [\"db\", \"http\", \"ui\"],\n }\n ],\n DetectorType.SLOW_SPAN: [\n {\n \"duration_threshold\": 1000.0, # ms\n \"allowed_span_ops\": [\"db\"],\n },\n {\n \"duration_threshold\": 2000.0, # ms\n \"allowed_span_ops\": [\"http\"],\n },\n ],\n DetectorType.LONG_TASK_SPANS: [\n {\n \"cumulative_duration\": 500.0, # ms\n \"allowed_span_ops\": [\"ui.long-task\", \"ui.sentry.long-task\"],\n }\n ],\n DetectorType.RENDER_BLOCKING_ASSET_SPAN: {\n \"fcp_minimum_threshold\": 2000.0, # ms\n \"fcp_maximum_threshold\": 10000.0, # ms\n \"fcp_ratio_threshold\": 0.25,\n \"allowed_span_ops\": [\"resource.link\", \"resource.script\"],\n },\n }\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 592, "n_words": 94, "vocab_size": 41, "complexity": 1, "nloc": 46, "token_counts": 182, "n_ast_nodes": 290, "n_identifiers": 8, "random_cut": "def get_default_detection_settings():\n re" }, { "id": 34040, "commit_id": "28e091430eea9e0d40839e56fd0d57aec262f5f9", "repo": "transformers", "path": "tests/test_modeling_nystromformer.py", "file_name": "test_modeling_nystromformer.py", "fun_name": "test_model_from_pretrained", "commit_message": "Add Nystromformer (#14659)\n\n* Initial commit\r\n\r\n* Config and modelling changes\r\n\r\nAdded Nystromformer-specific attributes to config and removed all decoder functionality from modelling.\r\n\r\n* Modelling and test changes\r\n\r\nAdded Nystrom approximation and removed decoder tests.\r\n\r\n* Code quality fixes\r\n\r\n* Modeling changes and conversion script\r\n\r\nInitial commits to conversion script, modeling changes.\r\n\r\n* Minor modeling changes and conversion script\r\n\r\n* Modeling changes\r\n\r\n* Correct modeling, add tests and documentation\r\n\r\n* Code refactor\r\n\r\n* Remove tokenizers\r\n\r\n* Code refactor\r\n\r\n* Update __init__.py\r\n\r\n* Fix bugs\r\n\r\n* Update src/transformers/__init__.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/__init__.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/nystromformer/__init__.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update docs/source/model_doc/nystromformer.mdx\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/nystromformer/configuration_nystromformer.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/nystromformer/configuration_nystromformer.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/nystromformer/configuration_nystromformer.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/nystromformer/configuration_nystromformer.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/nystromformer/convert_nystromformer_original_pytorch_checkpoint_to_pytorch.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/nystromformer/configuration_nystromformer.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update modeling and test_modeling\r\n\r\n* Code refactor\r\n\r\n* .rst to .mdx\r\n\r\n* doc changes\r\n\r\n* Doc changes\r\n\r\n* Update modeling_nystromformer.py\r\n\r\n* Doc changes\r\n\r\n* Fix copies\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update configuration_nystromformer.py\r\n\r\n* Fix copies\r\n\r\n* Update tests/test_modeling_nystromformer.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update test_modeling_nystromformer.py\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Lysandre Debut \r\n\r\n* Fix code style\r\n\r\n* Update modeling_nystromformer.py\r\n\r\n* Update modeling_nystromformer.py\r\n\r\n* Fix code style\r\n\r\n* Reformat modeling file\r\n\r\n* Update modeling_nystromformer.py\r\n\r\n* Modify NystromformerForMultipleChoice\r\n\r\n* Fix code quality\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Code style changes and torch.no_grad()\r\n\r\n* make style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def test_model_from_pretrained(self):\n for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = NystromformerModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n@require_torch", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@require_torch", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 38, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 28, "n_ast_nodes": 49, "n_identifiers": 9, "random_cut": "def test_model_from_pretrained(self):\n for model_name in NYSTROMFORMER_PR" }, { "id": 41944, "commit_id": "c2270e79aea173f7b4023b5078921d140855217c", "repo": "seaborn", "path": "tests/_core/test_scales.py", "file_name": "test_scales.py", "fun_name": "test_fill_nunique_warning", "commit_message": "Add Scale.label interface for formatting ticks (#2877)\n\n* Step one of bikeshedding scale names\r\n\r\n* Remove concept of separate Scale / ScaleSpec\r\n\r\n* Begin transitioning Scale tick/label parameterization to declarations\r\n\r\n* Add initial draft of Continuous.label API\r\n\r\n* Note to self\r\n\r\n* Add label tests\r\n\r\n* Add docstring for Continuous.label\r\n\r\n* Fix tests\r\n\r\n* Partialy update Nominal\r\n\r\n* Hide _priority from dataclass signature\r\n\r\n* A little more test coverage and documentation\r\n\r\n* MPL<3.3 compat in temporal tests\r\n\r\n* Use (sym)log locator / formatter for symlog scales by default\r\n\r\n* Remove code duplication\r\n\r\n* Rename transform parameter to trans\r\n\r\n* Pass through residual TODO comments\r\n\r\n* Lint", "code": "def test_fill_nunique_warning(self):\n\n x = pd.Series([\"a\", \"b\", \"c\", \"a\", \"b\"], name=\"x\")\n with pytest.warns(UserWarning, match=\"The variable assigned to fill\"):\n s = Nominal()._setup(x, Fill())\n assert_array_equal(s(x), [True, False, True, True, False])\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 58, "n_words": 27, "vocab_size": 25, "complexity": 1, "nloc": 5, "token_counts": 72, "n_ast_nodes": 120, "n_identifiers": 15, "random_cut": "def test_fill_nunique_warning(self):\n\n x = pd.Series([\"a\", \"b\", \"c\", \"a\", \"b\"], name=\"x\")\n with pytest.warns(UserWarning, match=\"The variable assigned to fill\"):\n s = Nominal()._setup(x, Fill())\n assert_array_equal(s(x), [True, False, True, True, False])\n" }, { "id": 18403, "commit_id": "da2770a5ec3fa6b18e789f202cdebdab71925f64", "repo": "ccxt", "path": "python/ccxt/async_support/independentreserve.py", "file_name": "independentreserve.py", "fun_name": "describe", "commit_message": "1.72.96\n\n[ci skip]", "code": "def describe(self):\n return self.deep_extend(super(independentreserve, self).describe(), {\n 'id': 'independentreserve',\n 'name': 'Independent Reserve',\n 'countries': ['AU', 'NZ'], # Australia, New Zealand\n 'rateLimit': 1000,\n 'has': {\n 'CORS': None,\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'addMargin': False,\n 'cancelOrder': True,\n 'createOrder': True,\n 'createReduceOnlyOrder': False,\n 'fetchBalance': True,\n 'fetchBorrowRate': False,\n 'fetchBorrowRateHistories': False,\n 'fetchBorrowRateHistory': False,\n 'fetchBorrowRates': False,\n 'fetchBorrowRatesPerSymbol': False,\n 'fetchClosedOrders': True,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchIsolatedPositions': False,\n 'fetchLeverage': False,\n 'fetchLeverageTiers': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchMyTrades': True,\n 'fetchOpenOrders': True,\n 'fetchOrder': True,\n 'fetchOrderBook': True,\n 'fetchPosition': False,\n 'fetchPositions': False,\n 'fetchPositionsRisk': False,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchTicker': True,\n 'fetchTrades': True,\n 'reduceMargin': False,\n 'setLeverage': False,\n 'setMarginMode': False,\n 'setPositionMode': False,\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/51840849/87182090-1e9e9080-c2ec-11ea-8e49-563db9a38f37.jpg',\n 'api': {\n 'public': 'https://api.independentreserve.com/Public',\n 'private': 'https://api.independentreserve.com/Private',\n },\n 'www': 'https://www.independentreserve.com',\n 'doc': 'https://www.independentreserve.com/API',\n },\n 'api': {\n 'public': {\n 'get': [\n 'GetValidPrimaryCurrencyCodes',\n 'GetValidSecondaryCurrencyCodes',\n 'GetValidLimitOrderTypes',\n 'GetValidMarketOrderTypes',\n 'GetValidOrderTypes',\n 'GetValidTransactionTypes',\n 'GetMarketSummary',\n 'GetOrderBook',\n 'GetAllOrders',\n 'GetTradeHistorySummary',\n 'GetRecentTrades',\n 'GetFxRates',\n 'GetOrderMinimumVolumes',\n 'GetCryptoWithdrawalFees',\n ],\n },\n 'private': {\n 'post': [\n 'GetOpenOrders',\n 'GetClosedOrders',\n 'GetClosedFilledOrders',\n 'GetOrderDetails',\n 'GetAccounts',\n 'GetTransactions',\n 'GetFiatBankAccounts',\n 'GetDigitalCurrencyDepositAddress',\n 'GetDigitalCurrencyDepositAddresses',\n 'GetTrades',\n 'GetBrokerageFees',\n 'GetDigitalCurrencyWithdrawal',\n 'PlaceLimitOrder',\n 'PlaceMarketOrder',\n 'CancelOrder',\n 'SynchDigitalCurrencyDepositAddressWithBlockchain',\n 'RequestFiatWithdrawal',\n 'WithdrawFiatCurrency',\n 'WithdrawDigitalCurrency',\n ],\n },\n },\n 'fees': {\n 'trading': {\n 'taker': 0.5 / 100,\n 'maker': 0.5 / 100,\n 'percentage': True,\n 'tierBased': False,\n },\n },\n 'commonCurrencies': {\n 'PLA': 'PlayChip',\n },\n })\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 2105, "n_words": 193, "vocab_size": 128, "complexity": 1, "nloc": 113, "token_counts": 377, "n_ast_nodes": 686, "n_identifiers": 5, "random_cut": "def describe(self):\n return self.deep_extend(super(independentreserve, self).describe(), {\n 'id': 'independentreserve',\n 'name': 'Independent Reserve',\n 'countries': ['AU', 'NZ'], # Australia, New Zealand\n 'rateLimit': 1000,\n 'has': {\n 'CORS': None,\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n " }, { "id": 134249, "commit_id": "45420f56688d4017cb5e63b229d822bbe722f1a5", "repo": "ray", "path": "rllib/models/specs/tests/test_check_specs.py", "file_name": "test_check_specs.py", "fun_name": "test_filter", "commit_message": "[RLlib] created check_specs decorator, RLModule PR 1/N (#29599)\n\n* 1. created check_specs decorator 2. updated unittests 3. refactored the names a little bit for generality\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi ", "code": "def test_filter(self):\n # create an arbitrary large input dict and test the behavior with and without a\n # filter\n input_dict = NestedDict({\"input\": 2})\n for i in range(100):\n inds = (str(i),) + tuple(str(j) for j in range(i + 1, i + 11))\n input_dict[inds] = i\n\n correct_module = CorrectImplementation()\n\n # should run without errors\n correct_module.check_input_and_output(input_dict)\n\n # should raise an error (read the implementation of\n # check_input_and_output_wo_filter)\n self.assertRaises(\n ValueError,\n lambda: correct_module.check_input_and_output_wo_filter(input_dict),\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 189, "n_words": 69, "vocab_size": 51, "complexity": 3, "nloc": 11, "token_counts": 86, "n_ast_nodes": 141, "n_identifiers": 16, "random_cut": "def test_filter(self):\n # create an arbitrary large input dict and test the behavior with and without a\n # filter\n input_dict = NestedDict({\"input\": 2})\n for i in range(100):\n inds = (str(i),) + tuple(str(j) for j in range(i + 1, i + 11))\n input_dict[inds] = i\n\n correct_module = CorrectImplementation()\n\n # should run without errors\n correct_module.check_input_and_output(input_dict)\n\n # should raise an error (read the implementation of\n # check_input_a" }, { "id": 309294, "commit_id": "f4aa18de31985cc6a93f991e9fc87b5d937a2352", "repo": "core", "path": "homeassistant/components/evohome/__init__.py", "file_name": "__init__.py", "fun_name": "call_client_api", "commit_message": "Replace hass.helpers: async_call_later() (#63929)", "code": "async def call_client_api(self, api_function, update_state=True) -> Any:\n \n try:\n result = await api_function\n except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:\n _handle_exception(err)\n return\n\n if update_state: # wait a moment for system to quiesce before updating state\n async_call_later(self.hass, 1, self._update_v2_api_state)\n\n return result\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 117, "n_words": 37, "vocab_size": 35, "complexity": 3, "nloc": 10, "token_counts": 55, "n_ast_nodes": 90, "n_identifiers": 15, "random_cut": "async def call_client_api(self, api_function, update_state=True) -> Any:\n \n try:\n result = await api_function\n except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:\n _handle_exception(err)\n return\n\n if update_state: # wait a moment f" }, { "id": 283552, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/stocks/fundamental_analysis/test_av_view.py", "file_name": "test_av_view.py", "fun_name": "test_check_output", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def test_check_output(func, kwargs_dict, monkeypatch, use_tab):\n monkeypatch.setattr(helper_funcs.obbff, \"USE_TABULATE_DF\", use_tab)\n getattr(av_view, func)(**kwargs_dict)\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n \"func, mocked_func, kwargs_dict\",\n [\n (\n \"display_overview\",\n \"get_overview\",\n {\"ticker\": \"TSLA\"},\n ),\n (\n \"display_key\",\n \"get_key_metrics\",\n {\"ticker\": \"TSLA\"},\n ),\n (\n \"display_earnings\",\n \"get_earnings\",\n {\"ticker\": \"TSLA\", \"limit\": 5, \"quarterly\": False},\n ),\n ],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n \"func, mocked_func, kwargs_dict\",\n [\n (\n \"display_overview\",\n \"get_overview\",\n {\"ticker\": \"TSLA\"},\n ),\n (\n \"display_key\",\n \"get_key_metrics\",\n {\"ticker\": \"TSLA\"},\n ),\n (\n \"display_earnings\",\n \"get_earnings\",\n {\"ticker\": \"TSLA\", \"limit\": 5, \"quarterly\": False},\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 192, "n_words": 41, "vocab_size": 34, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 186, "n_identifiers": 16, "random_cut": "def test_check_output(func, kwargs_dict, monkeypatch, use_tab):\n monkeypatch.setattr(helper_funcs.obbff, \"USE_TABULATE_DF\", use_tab)\n getattr(av_view, func)(**kwargs_dict)\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.record_stdout\n@pytest.mark.parametrize(\n \"func, mocked_func, kwargs_dict\",\n [\n (\n \"display_overview\",\n \"get_overview\",\n {\"ticker\": \"TSLA\"},\n ),\n (\n \"displa" }, { "id": 121261, "commit_id": "ea1593a9b21d645a3d0ea82be84fb4614d74f52c", "repo": "jax", "path": "jax/experimental/pjit.py", "file_name": "pjit.py", "fun_name": "_pjit_jaxpr", "commit_message": "Make the `_check_shapes_against_resources` check general for all `XLACompatibleSharding`s by looking at the opsharding proto of the shardings.\n\nPiperOrigin-RevId: 463161459", "code": "def _pjit_jaxpr(fun, out_shardings_thunk, global_in_avals, out_tree):\n prev_positional_val = maps._positional_semantics.val\n try:\n maps._positional_semantics.val = maps._PositionalSemantics.GLOBAL\n with dispatch.log_elapsed_time(f\"Finished tracing + transforming {fun.__name__} \"\n \"for pjit in {elapsed_time} sec\"):\n jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(fun, global_in_avals)\n finally:\n maps._positional_semantics.val = prev_positional_val\n jaxpr = core.ClosedJaxpr(jaxpr, consts)\n\n out_shardings_flat = flatten_axis_resources(\n \"pjit out_axis_resources\", out_tree(), out_shardings_thunk(), tupled_args=False)\n\n pjit_check_aval_sharding(out_shardings_flat, global_out_avals, \"pjit outputs\",\n allow_uneven_sharding=False)\n\n normalized_out_shardings_flat = tuple(\n o if _is_unspecified(o) or _is_auto(o) else o.normalize()\n for o in out_shardings_flat\n )\n\n # lu.cache needs to be able to create weakrefs to outputs, so we can't return a plain tuple\n return _ListWithW([jaxpr, normalized_out_shardings_flat])\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 185, "n_words": 87, "vocab_size": 70, "complexity": 5, "nloc": 19, "token_counts": 139, "n_ast_nodes": 226, "n_identifiers": 33, "random_cut": "def _pjit_jaxpr(fun, out_shardings_thunk, global_in_avals, out_tree):\n prev_positional_val = maps._positional" }, { "id": 100001, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_users.py", "file_name": "test_organization_users.py", "fun_name": "test_simple", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_simple(self):\n projects_ids = [self.project_1.id, self.project_2.id]\n response = self.get_success_response(self.org.slug, project=projects_ids)\n expected = serialize(\n list(\n self.org.member_set.filter(user__in=[self.owner_user, self.user_2]).order_by(\n \"user__email\"\n )\n ),\n self.user_2,\n OrganizationMemberWithProjectsSerializer(project_ids=projects_ids),\n )\n assert response.data == expected\n\n projects_ids = [self.project_2.id]\n response = self.get_success_response(self.org.slug, project=projects_ids)\n expected = serialize(\n list(self.org.member_set.filter(user__in=[self.user_2]).order_by(\"user__email\")),\n self.user_2,\n OrganizationMemberWithProjectsSerializer(project_ids=projects_ids),\n )\n assert response.data == expected\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 239, "n_words": 44, "vocab_size": 24, "complexity": 1, "nloc": 21, "token_counts": 159, "n_ast_nodes": 244, "n_identifiers": 23, "random_cut": "def test_simple(self):\n projects_ids = [self.project_1.id, self.project_2.id]\n response = self.get_success_response(self.org.slug, project=projects_ids)\n expected = serialize(\n list(\n self.org.member_set.filter(user__in=[self.owner_user, self.user_2]).order_by(\n \"user__email\"\n )\n ),\n self.use" }, { "id": 274881, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/mixed_precision/autocast_variable.py", "file_name": "autocast_variable.py", "fun_name": "numpy_text", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def numpy_text(tensor, is_repr=False):\n \n if tensor.dtype.is_numpy_compatible:\n # pylint: disable=protected-access\n text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())\n # pylint: enable=protected-access\n else:\n text = \"\"\n if \"\\n\" in text:\n text = \"\\n\" + text\n return text\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 83, "n_words": 33, "vocab_size": 22, "complexity": 4, "nloc": 8, "token_counts": 55, "n_ast_nodes": 101, "n_identifiers": 9, "random_cut": "def numpy_text(tensor, is_repr=False):\n \n if tensor.dtype.is_numpy_compatible:\n # pylint: disable=protected-access\n text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())\n # pylint: enable=protected-access\n else:\n text = \"\"\n if \"\\n\" in text:\n text = \"\\n\" + text\n return text\n\n" }, { "id": 62094, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/metadata.py", "file_name": "metadata.py", "fun_name": "__delitem__", "commit_message": "upd; format", "code": "def __delitem__(self, name):\n field_name = self._convert_name(name)\n try:\n del self._fields[field_name]\n except KeyError:\n raise KeyError(name)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 55, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 32, "n_ast_nodes": 51, "n_identifiers": 7, "random_cut": "def __delitem__(self, name):\n field_name = self._convert_name(name)\n try:\n " }, { "id": 247704, "commit_id": "1da0f79d5455b594f2aa989106a672786f5b990f", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "test_redact_relation_annotation", "commit_message": "Refactor relations tests (#12232)\n\n* Moves the relation pagination tests to a separate class.\r\n* Move the assertion of the response code into the `_send_relation` helper.\r\n* Moves some helpers into the base-class.", "code": "def test_redact_relation_annotation(self) -> None:\n \n channel = self._send_relation(RelationTypes.ANNOTATION, \"m.reaction\", \"a\")\n to_redact_event_id = channel.json_body[\"event_id\"]\n\n channel = self._send_relation(\n RelationTypes.ANNOTATION, \"m.reaction\", \"a\", access_token=self.user2_token\n )\n unredacted_event_id = channel.json_body[\"event_id\"]\n\n # Both relations should exist.\n event_ids = self._get_related_events()\n relations = self._get_bundled_aggregations()\n self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id])\n self.assertEquals(\n relations[\"m.annotation\"],\n {\"chunk\": [{\"type\": \"m.reaction\", \"key\": \"a\", \"count\": 2}]},\n )\n\n # Both relations appear in the aggregation.\n chunk = self._get_aggregations()\n self.assertEqual(chunk, [{\"type\": \"m.reaction\", \"key\": \"a\", \"count\": 2}])\n\n # Redact one of the reactions.\n self._redact(to_redact_event_id)\n\n # The unredacted relation should still exist.\n event_ids = self._get_related_events()\n relations = self._get_bundled_aggregations()\n self.assertEquals(event_ids, [unredacted_event_id])\n self.assertEquals(\n relations[\"m.annotation\"],\n {\"chunk\": [{\"type\": \"m.reaction\", \"key\": \"a\", \"count\": 1}]},\n )\n\n # The unredacted aggregation should still exist.\n chunk = self._get_aggregations()\n self.assertEqual(chunk, [{\"type\": \"m.reaction\", \"key\": \"a\", \"count\": 1}])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 350, "n_words": 113, "vocab_size": 57, "complexity": 1, "nloc": 33, "token_counts": 232, "n_ast_nodes": 413, "n_identifiers": 21, "random_cut": "def test_redact_relation_annotation(self) -> None:\n \n channel = self._send_relation(RelationTypes.ANNOTATION, \"m.reaction\", \"a\")\n to_redact_event_id = channel.json_body[\"event_id\"]\n\n channel = self._send_relation(\n RelationTypes.ANNOTATION, \"m.reaction\", \"a\", access_token=self.user2_token\n )\n unredacted_event_id = channel.json_body[\"event_id\"]\n\n # Both relations should exist.\n event_ids = self._get_related_events()\n relations = self._get_bundled_aggregations()\n self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id])\n self.assertEquals(\n relations[\"m.annotation\"],\n {\"chunk\": [{\"type\": \"m.reaction\", \"key\": \"a\", \"count\": 2}]},\n )\n\n # Both relations appear in the aggregation.\n chunk = self._get_aggregations()\n self.assertEqual(chunk, [{\"type\": \"m.reaction\", \"key\": \"a\", \"count\": 2}])\n\n # Redact one of the reactions.\n self._redact(to_redact_event_id)\n\n # The unredacted relation should still exist.\n event_ids = self._get_related_events()\n relations = self._get_bundled_aggregations()\n self.assertEquals(event_ids, [unredacted_event_id])\n self.assertEquals(\n relations[\"m.annotation\"],\n {\"chunk\": [{\"type\": \"m.reaction\", \"key\": \"a\", \"count\": 1}]},\n )\n\n # The unredacted aggregation should still exist.\n chunk = self._get_aggregations()\n self.assertEqual(chunk, [{\"type\": \"m.reaction\", \"key\": \"" }, { "id": 283411, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/economy/test_economy_controller.py", "file_name": "test_economy_controller.py", "fun_name": "test_menu_without_queue_sys_exit", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def test_menu_without_queue_sys_exit(mock_input, mocker):\n path_controller = \"openbb_terminal.economy.economy_controller\"\n\n # DISABLE AUTO-COMPLETION\n mocker.patch.object(\n target=economy_controller.obbff,\n attribute=\"USE_PROMPT_TOOLKIT\",\n new=False,\n )\n mocker.patch(\n target=f\"{path_controller}.session\",\n return_value=None,\n )\n\n # MOCK USER INPUT\n mocker.patch(\"builtins.input\", return_value=mock_input)\n\n # MOCK SWITCH", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 88, "n_words": 27, "vocab_size": 23, "complexity": 1, "nloc": 22, "token_counts": 107, "n_ast_nodes": 95, "n_identifiers": 12, "random_cut": "def test_menu_without_queue_sys_exit(mock_input, mocker):\n path" }, { "id": 163712, "commit_id": "6cf8880c16a17000f4d7c57fce942919cfdf2346", "repo": "pandas", "path": "pandas/tests/arrays/floating/test_astype.py", "file_name": "test_astype.py", "fun_name": "test_astype", "commit_message": "REF: share BooleanArray.astype+NumericArray.astype (#45420)", "code": "def test_astype():\n # with missing values\n arr = pd.array([0.1, 0.2, None], dtype=\"Float64\")\n\n with pytest.raises(ValueError, match=\"cannot convert NA to integer\"):\n arr.astype(\"int64\")\n\n with pytest.raises(ValueError, match=\"cannot convert float NaN to bool\"):\n arr.astype(\"bool\")\n\n result = arr.astype(\"float64\")\n expected = np.array([0.1, 0.2, np.nan], dtype=\"float64\")\n tm.assert_numpy_array_equal(result, expected)\n\n # no missing values\n arr = pd.array([0.0, 1.0, 0.5], dtype=\"Float64\")\n result = arr.astype(\"int64\")\n expected = np.array([0, 1, 0], dtype=\"int64\")\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.astype(\"bool\")\n expected = np.array([False, True, True], dtype=\"bool\")\n tm.assert_numpy_array_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 130, "n_words": 72, "vocab_size": 43, "complexity": 1, "nloc": 16, "token_counts": 194, "n_ast_nodes": 297, "n_identifiers": 16, "random_cut": "def test_astype():\n # with missing values\n arr = pd.array([0.1, 0.2, None], dtype=\"Float64\")\n\n with pytest.raises(ValueError, match=\"cannot convert NA to integer\"):\n arr.astype(\"int64\")\n\n with pytest.raises(ValueError, match=\"cannot convert float NaN to bool\"):\n arr.astype(\"bool\")\n\n result = arr.astype(\"float64\")\n expected = np.array([0.1, 0.2, np.nan], dtype=\"float64\")\n tm.assert_numpy_array_equal(result, expected)\n\n # no missing values\n arr = pd.array([0.0, 1.0, 0.5], dtype=\"Float64\")\n result = arr.astype(\"int64\")\n expected = np.array([0, 1, 0], dtype=\"int64\")\n tm.a" }, { "id": 15717, "commit_id": "504698b7cc87c9f6a3549cd17a376b549aebcc8a", "repo": "ccxt", "path": "python/ccxt/async_support/independentreserve.py", "file_name": "independentreserve.py", "fun_name": "fetch_ticker", "commit_message": "1.67.86\n\n[ci skip]", "code": "async def fetch_ticker(self, symbol, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'primaryCurrencyCode': market['baseId'],\n 'secondaryCurrencyCode': market['quoteId'],\n }\n response = await self.publicGetGetMarketSummary(self.extend(request, params))\n # {\n # \"DayHighestPrice\":43489.49,\n # \"DayLowestPrice\":41998.32,\n # \"DayAvgPrice\":42743.9,\n # \"DayVolumeXbt\":44.54515625000,\n # \"DayVolumeXbtInSecondaryCurrrency\":0.12209818,\n # \"CurrentLowestOfferPrice\":43619.64,\n # \"CurrentHighestBidPrice\":43153.58,\n # \"LastPrice\":43378.43,\n # \"PrimaryCurrencyCode\":\"Xbt\",\n # \"SecondaryCurrencyCode\":\"Usd\",\n # \"CreatedTimestampUtc\":\"2022-01-14T22:52:29.5029223Z\"\n # }\n return self.parse_ticker(response, market)\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 250, "n_words": 52, "vocab_size": 35, "complexity": 1, "nloc": 9, "token_counts": 69, "n_ast_nodes": 127, "n_identifiers": 11, "random_cut": "async def fetch_ticker(self, symbol, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'primaryCurrencyCode': market['baseId'],\n 'secondaryCurrencyCode': market['quoteId'],\n }\n response = await self.publicGetGetMarketSummary(self.extend(request, params))\n # {\n # \"DayHighestPrice\":43489.49,\n # \"DayLowestPrice\":41998.32,\n # \"DayAvgPrice\":42743.9,\n # \"DayVolumeXbt\":44.54515625000,\n # \"DayVolumeXbtInSecondaryCurrrency\":0.12209818,\n # \"CurrentLowestOfferPrice\":43619.64,\n # \"CurrentHighestBidPrice\":43153.58,\n # \"LastPrice\":43378.43,\n # \"PrimaryCurrencyCode\":\"Xbt\",\n # \"SecondaryCurrencyCode\":\"Usd\",\n # \"CreatedTimestampUtc\":\"2022-01-14T22:52:29.5029223Z\"\n # }\n return self.parse_ticker(response, market)\n" }, { "id": 184698, "commit_id": "f794c85b2fcfeb16c6451fa8fa0df5e9375632d6", "repo": "textual", "path": "src/textual/css/stylesheet.py", "file_name": "stylesheet.py", "fun_name": "rules_map", "commit_message": "precalculate rules_map", "code": "def rules_map(self) -> dict[str, list[RuleSet]]:\n \n if self._rules_map is None:\n rules_map: dict[str, list[RuleSet]] = defaultdict(list)\n for rule in self.rules:\n for name in rule.selector_names:\n rules_map[name].append(rule)\n self._rules_map = dict(rules_map)\n return self._rules_map\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 116, "n_words": 28, "vocab_size": 22, "complexity": 4, "nloc": 13, "token_counts": 74, "n_ast_nodes": 113, "n_identifiers": 13, "random_cut": "def rules_map(self) -> dict[str, list[RuleSet]]:\n \n if self._rules_map is None:\n rules_map: dict[str, list[RuleSet]] = defaultdict(list)\n for rule in self.rules:\n for name in rule.selector_names:\n rules_map[name].append(rule)\n self._rules_map = dict(rules_map)\n return self._rules_map\n" }, { "id": 93783, "commit_id": "2fbf550ec05c8501cbc9eca62e73526e717dcbdf", "repo": "sentry", "path": "tests/sentry/integrations/jira_server/test_integration.py", "file_name": "test_integration.py", "fun_name": "test_sync_assignee_outbound_case_insensitive", "commit_message": "ref(Jira): Split Jira Cloud and Jira Server (#37034)\n\n* Split Jira Cloud and Jira Server", "code": "def test_sync_assignee_outbound_case_insensitive(self):\n self.user = self.create_user(email=\"bob@example.com\")\n issue_id = \"APP-123\"\n assign_issue_url = \"https://jira.example.org/rest/api/2/issue/%s/assignee\" % issue_id\n external_issue = ExternalIssue.objects.create(\n organization_id=self.organization.id,\n integration_id=self.installation.model.id,\n key=issue_id,\n )\n responses.add(\n responses.GET,\n \"https://jira.example.org/rest/api/2/user/assignable/search\",\n json=[{\"name\": \"Alive Tofu\", \"emailAddress\": \"Bob@example.com\"}],\n )\n responses.add(responses.PUT, assign_issue_url, json={})\n self.installation.sync_assignee_outbound(external_issue, self.user)\n\n assert len(responses.calls) == 2\n\n # assert user above was successfully assigned\n assign_issue_response = responses.calls[1][1]\n assert assign_issue_url in assign_issue_response.url\n assert assign_issue_response.status_code == 200\n assert assign_issue_response.request.body == b'{\"name\": \"Alive Tofu\"}'\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 232, "n_words": 62, "vocab_size": 48, "complexity": 1, "nloc": 21, "token_counts": 149, "n_ast_nodes": 240, "n_identifiers": 31, "random_cut": "def test_sync_assignee_outbound_case_insensitive(self):\n self.user = self.create_user(email=\"bob@example.com\")\n issue_id = \"APP-123\"\n assign_issue_url = \"https://jira.example.org/rest/api/2/issue/%s/assignee\" % issue_id\n external_issue = Exte" }, { "id": 91667, "commit_id": "7f60db924ea37f34e0cfe6856777239e2a2ffe13", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_orderby_percentile_with_many_fields_multiple_entities", "commit_message": "feat(metrics): make indexer more configurable (#35604)\n\nThis makes the sentry_metrics indexer more configurable in the following ways, to enable indexing on the ingest-performance-metrics topic:\r\n\r\n- configurable input Kafka topic\r\n- configurable output Kafka topic\r\n- configurable model from which to pull index results\r\n- tags for internal metrics to distinguish between the two modes operationally", "code": "def test_orderby_percentile_with_many_fields_multiple_entities(self):\n \n org_id = self.organization.id\n transaction_id = _indexer_record(org_id, \"transaction\")\n transaction_1 = _indexer_record(org_id, \"/foo/\")\n transaction_2 = _indexer_record(org_id, \"/bar/\")\n\n self._send_buckets(\n [\n {\n \"org_id\": org_id,\n \"project_id\": self.project.id,\n \"metric_id\": self.transaction_lcp_metric,\n \"timestamp\": int(time.time()),\n \"type\": \"d\",\n \"value\": numbers,\n \"tags\": {tag: value},\n \"retention_days\": 90,\n }\n for tag, value, numbers in (\n (transaction_id, transaction_1, [10, 11, 12]),\n (transaction_id, transaction_2, [4, 5, 6]),\n )\n ],\n entity=\"metrics_distributions\",\n )\n self._send_buckets(\n [\n {\n \"org_id\": org_id,\n \"project_id\": self.project.id,\n \"metric_id\": _indexer_record(org_id, TransactionMRI.USER.value),\n \"timestamp\": int(time.time()),\n \"tags\": {tag: value},\n \"type\": \"s\",\n \"value\": numbers,\n \"retention_days\": 90,\n }\n for tag, value, numbers in (\n (transaction_id, transaction_1, list(range(1))),\n (transaction_id, transaction_2, list(range(5))),\n )\n ],\n entity=\"metrics_sets\",\n )\n\n response = self.get_success_response(\n self.organization.slug,\n field=[\n f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n f\"count_unique({TransactionMetricKey.USER.value})\",\n ],\n statsPeriod=\"1h\",\n interval=\"1h\",\n groupBy=[\"project_id\", \"transaction\"],\n orderBy=f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n )\n groups = response.data[\"groups\"]\n assert len(groups) == 2\n\n expected = [\n (\"/bar/\", 5.0, 5),\n (\"/foo/\", 11.0, 1),\n ]\n for (expected_tag_value, expected_lcp_count, users), group in zip(expected, groups):\n # With orderBy, you only get totals:\n assert group[\"by\"] == {\"transaction\": expected_tag_value, \"project_id\": self.project.id}\n assert group[\"totals\"] == {\n f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\": expected_lcp_count,\n f\"count_unique({TransactionMetricKey.USER.value})\": users,\n }\n assert group[\"series\"] == {\n f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\": [expected_lcp_count],\n f\"count_unique({TransactionMetricKey.USER.value})\": [users],\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1100, "n_words": 167, "vocab_size": 105, "complexity": 4, "nloc": 70, "token_counts": 405, "n_ast_nodes": 700, "n_identifiers": 41, "random_cut": "def test_orderby_percentile_with_many_fields_multiple_entities(self):\n \n org_id = self.organization.id\n transaction_id = _indexer_record(org_id, \"transaction\")\n tran" }, { "id": 110317, "commit_id": "383de519505964ed879c40b23ef36e90c17ebe0d", "repo": "matplotlib", "path": "lib/matplotlib/axes/_axes.py", "file_name": "_axes.py", "fun_name": "secondary_yaxis", "commit_message": "[Doc] fix more spelling and grammar", "code": "def secondary_yaxis(self, location, *, functions=None, **kwargs):\n \n if location in ['left', 'right'] or isinstance(location, Number):\n secondary_ax = SecondaryAxis(self, 'y', location,\n functions, **kwargs)\n self.add_child_axes(secondary_ax)\n return secondary_ax\n else:\n raise ValueError('secondary_yaxis location must be either '\n 'a float or \"left\"/\"right\"')\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 169, "n_words": 36, "vocab_size": 32, "complexity": 3, "nloc": 9, "token_counts": 64, "n_ast_nodes": 107, "n_identifiers": 11, "random_cut": "def secondary_yaxis(self, location, *, functions=None, **kwargs):\n \n if location in ['left', 'right'] or isinstance(location, Number):\n secondary_ax = SecondaryAxis(self, 'y', location,\n functions, **kwargs)\n " }, { "id": 251575, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/tools/console/statusbar.py", "file_name": "statusbar.py", "fun_name": "sig_message", "commit_message": "make it black!", "code": "def sig_message(self, sender, message, expire=1):\n if self.prompting:\n return\n cols, _ = self.master.ui.get_cols_rows()\n w = urwid.Text(self.shorten_message(message, cols))\n self._w = w\n if expire:\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 66, "n_words": 21, "vocab_size": 17, "complexity": 3, "nloc": 9, "token_counts": 71, "n_ast_nodes": 85, "n_identifiers": 16, "random_cut": "def sig_message(self, sender, message, expire=1):\n if self.prompting:\n return\n cols, _ = self.master.ui.get_cols_rows()\n w = urwid.Text(self.shorten_message(message, cols))\n " }, { "id": 12404, "commit_id": "049666d92f4c467d6430aa14d858156c0b0cab56", "repo": "jina", "path": "tests/unit/orchestrate/flow/flow-orchestrate/test_flow_change_gateway.py", "file_name": "test_flow_change_gateway.py", "fun_name": "test_get_set_client_gateway_in_flow", "commit_message": "fix: code review (#4821)\n\n\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def test_get_set_client_gateway_in_flow(protocol):\n f = Flow(protocol=protocol, port=12345)\n assert f.client_args.protocol == GatewayProtocolType.from_string(protocol)\n assert f.gateway_args.protocol == GatewayProtocolType.from_string(protocol)\n assert f.client_args.port == 12345\n assert f.gateway_args.port == 12345\n f._update_network_interface(port=54321)\n assert f.client_args.port == 54321\n assert f.gateway_args.port == 54321\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 54, "n_words": 31, "vocab_size": 16, "complexity": 1, "nloc": 9, "token_counts": 83, "n_ast_nodes": 128, "n_identifiers": 10, "random_cut": "def test_get_set_client_gateway_in_flow(protocol):\n f = Flow(protocol=protocol, port=12345)\n assert f.client_args.protocol == G" }, { "id": 338614, "commit_id": "611e21cb137e08b0b9a35c76676eb46de9d30627", "repo": "text-generation-inference", "path": "server/tests/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_stopping_criteria_eos", "commit_message": "fix(server): Fix stop sequences (#11)", "code": "def test_stopping_criteria_eos():\n criteria = StoppingCriteria(0, [StopSequenceCriteria(\"/test;\")], max_new_tokens=5)\n assert criteria(1, \"\") == (False, None)\n assert criteria(0, \"\") == (True, \"eos_token\")\n\n", "url": "https://github.com/huggingface/text-generation-inference.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 27, "n_words": 19, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 47, "n_ast_nodes": 77, "n_identifiers": 5, "random_cut": "def test_stopping_criteria_eos():\n criteria = StoppingCriteria(0, [StopSequenceCriteria(\"/test;\")], max_new_tokens=5)\n assert criteria(1," }, { "id": 122440, "commit_id": "532cd7ed74ea2a282b0c626b410308bdd626cfe3", "repo": "jax", "path": "benchmarks/api_benchmark.py", "file_name": "api_benchmark.py", "fun_name": "pjit_simple_benchmark", "commit_message": "Skip the benchmarks properly via state.skip_with_error when enough devices are not present.\n\nPiperOrigin-RevId: 485931295", "code": "def pjit_simple_benchmark(state, num_devices, num_args, cpp_jit, use_aot=False):\n spec = pjit_lib.PartitionSpec('x')\n mesh = create_mesh((num_devices,), ('x',), state)\n if mesh is None:\n return\n s = sharding.MeshPspecSharding(mesh, spec)\n inp_data = np.arange(num_devices).astype(np.float32)\n x = array.make_array_from_callback(inp_data.shape, s, lambda idx: inp_data[idx])\n\n x = [x for _ in range(num_args)]\n\n prev_state = jax_config.FLAGS.experimental_cpp_pjit\n jax_config.FLAGS.experimental_cpp_pjit = cpp_jit\n\n in_axis_resources = sharding.MeshPspecSharding(mesh, spec)\n out_axis_resources = sharding.MeshPspecSharding(mesh, spec)\n\n f = pjit_lib.pjit(\n lambda x: jax.tree_map(lambda x: x + 1, x),\n in_axis_resources=in_axis_resources,\n out_axis_resources=out_axis_resources)\n\n if use_aot:\n f = f.lower(x).compile()\n\n x = f(x)\n\n while state:\n x = f(x)\n\n jax_config.FLAGS.experimental_cpp_pjit = prev_state\n\n\n@google_benchmark.register\n@google_benchmark.option.arg_names(['num_args', 'cpp_pjit'])\n@google_benchmark.option.args([1, False])\n@google_benchmark.option.args([1, True])\n@google_benchmark.option.args([10, False])\n@google_benchmark.option.args([10, True])\n@google_benchmark.option.args([100, False])\n@google_benchmark.option.args([100, True])\n@jax_config.jax_array(True)", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@google_benchmark.register\n@google_benchmark.option.arg_names(['num_args', 'cpp_pjit'])\n@google_benchmark.option.args([1, False])\n@google_benchmark.option.args([1, True])\n@google_benchmark.option.args([10, False])\n@google_benchmark.option.args([10, True])\n@google_benchmark.option.args([100, False])\n@google_benchmark.option.args([100, True])\n@jax_config.jax_array(True)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 129, "n_words": 99, "vocab_size": 61, "complexity": 5, "nloc": 23, "token_counts": 204, "n_ast_nodes": 469, "n_identifiers": 44, "random_cut": "def pjit_simple_benchmark(state, num_devices, num_args, cpp_jit, use_aot=False):\n spec = pjit_lib.PartitionSpec('x')\n mesh = create_mesh((num_devices,), ('x',), state)\n if mesh is None:\n return\n s = sharding.MeshPspecSharding(mesh, spec)\n inp_data = np.arange(num_devices).astype(np.float32)\n x = array.make_array_from_callback(inp_data.shape, s, lambda idx: inp_data[idx])\n\n x = [x for _ in range(num_args)]\n\n prev_state = jax_config.FLAGS.experimental_cpp_pjit\n jax_config.FLAGS.experimental_cpp_pjit = cpp_jit\n\n in_axis_resources = sharding.MeshPspecSharding(mesh, spec)\n out_axis_resources = sharding.MeshPspecSharding(mesh, spec)\n\n f = pjit_lib.pjit(\n lambda x: jax.tree_map(lambda x: x + 1, x),\n in_axis_resources=in_axis_resources,\n out_axis_resources=out_axis_resources)\n\n if use_aot:\n f = f.lower(x).compile()\n\n x = f(x)\n\n while state:\n x = f(x)\n\n jax_config.FLAGS.experimental_cpp_pjit = prev_state\n\n\n@google_benchmark.register\n@google_benchmark.option.arg_names(['num_args', 'cpp_pjit'])\n@google_benchmark.option.args([1, False])\n@google_benchmark.option.args([1, True])\n@google_benchmark.option.args([10, False])\n@google_benchmark.option.args([10, True])\n@google_benchmark.option.args([100, False])\n@google_benchmark.option.args(" }, { "id": 294541, "commit_id": "cc75cebfc5b3e9316cdbaf82c5c72437521f819b", "repo": "core", "path": "homeassistant/components/samsungtv/bridge.py", "file_name": "bridge.py", "fun_name": "async_device_info", "commit_message": "Add support for setting up encrypted samsung tvs from config flow (#68717)\n\nCo-authored-by: epenet ", "code": "async def async_device_info(self) -> dict[str, Any] | None:\n \n if self._rest_api is None:\n assert self.port\n rest_api = SamsungTVAsyncRest(\n host=self.host,\n session=async_get_clientsession(self.hass),\n port=self.port,\n timeout=TIMEOUT_WEBSOCKET,\n )\n\n with contextlib.suppress(*REST_EXCEPTIONS):\n device_info: dict[str, Any] = await rest_api.rest_device_info()\n LOGGER.debug(\"Device info on %s is: %s\", self.host, device_info)\n self._device_info = device_info\n return device_info\n\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 210, "n_words": 45, "vocab_size": 38, "complexity": 2, "nloc": 16, "token_counts": 101, "n_ast_nodes": 159, "n_identifiers": 23, "random_cut": "async def async_device_info(self) -> dict[str, Any] | None:\n \n if self._rest_api is None:\n assert self.port\n rest_api = SamsungTVAsyncRest(\n host=self.host,\n session=async_get_clientsession(self.hass),\n port=self.port,\n timeout=TIMEOUT_WEBSOCKET,\n )\n\n with contextlib.suppress(*REST_EXCEPTIONS):\n device_info: dict[str, Any] = await rest_api.rest_device_info()\n LOGGER.debug(\"Device info on %s is: %s\", self.host, device_info" }, { "id": 180673, "commit_id": "a424832ec119c490d5d1d2d7d635b4a7232dc77e", "repo": "gradio", "path": "gradio/examples.py", "file_name": "examples.py", "fun_name": "cache_interface_examples", "commit_message": "Fix async tests (#2000)\n\n* fix examples test\r\n\r\n* formatting\r\n\r\n* async examples\r\n\r\n* working on mix\r\n\r\n* comment out failing test\r\n\r\n* fixed interface problem\r\n\r\n* fixes", "code": "async def cache_interface_examples(self) -> None:\n \n if os.path.exists(self.cached_file):\n print(\n f\"Using cache from '{os.path.abspath(self.cached_folder)}' directory. If method or examples have changed since last caching, delete this folder to clear cache.\"\n )\n else:\n print(f\"Caching examples at: '{os.path.abspath(self.cached_file)}'\")\n cache_logger = CSVLogger()\n cache_logger.setup(self.outputs, self.cached_folder)\n for example_id, _ in enumerate(self.examples):\n try:\n prediction = await self.process_example(example_id)\n cache_logger.flag(prediction)\n except Exception as e:\n shutil.rmtree(self.cached_folder)\n raise e\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 265, "n_words": 57, "vocab_size": 55, "complexity": 4, "nloc": 17, "token_counts": 93, "n_ast_nodes": 193, "n_identifiers": 24, "random_cut": "async def cache_interface_examples(self) -> None:\n \n if os.path.exists(self.cached_file):\n print(\n f\"Using cache from '{os.path.abspath(self.cached_folder)}' directory. If method or examples have changed since last caching, delete this folder to clear cache.\"\n )\n else:\n print(f\"Caching examples at: '{os.path" }, { "id": 207764, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_save_as_new_with_validation_errors", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_save_as_new_with_validation_errors(self):\n \n response = self.client.post(\n reverse(\"admin:admin_views_person_change\", args=(self.per1.pk,)),\n {\n \"_saveasnew\": \"\",\n \"gender\": \"invalid\",\n \"_addanother\": \"fail\",\n },\n )\n self.assertContains(response, \"Please correct the errors below.\")\n self.assertFalse(response.context[\"show_save_and_add_another\"])\n self.assertFalse(response.context[\"show_save_and_continue\"])\n self.assertTrue(response.context[\"show_save_as_new\"])\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 152, "n_words": 25, "vocab_size": 25, "complexity": 1, "nloc": 13, "token_counts": 87, "n_ast_nodes": 154, "n_identifiers": 13, "random_cut": "def test_save_as_new_with_validation_errors(self):\n \n response = self.client.post(\n reverse(\"admin:admin_views_person_change\", args=(self.per1.pk,)),\n {\n \"_saveasnew\": \"\",\n \"gender\"" }, { "id": 80353, "commit_id": "a4a3ba65d736045733cb49430d7076b73aec23bb", "repo": "awx", "path": "awx/main/tasks/system.py", "file_name": "system.py", "fun_name": "handle_removed_image", "commit_message": "Refactored tasks.py to a package\n--- Added 3 new sub-package : awx.main.tasks.system , awx.main.tasks.jobs , awx.main.tasks.receptor\n--- Modified the functional tests and unit tests accordingly", "code": "def handle_removed_image(remove_images=None):\n \n _cleanup_images_and_files(remove_images=remove_images, file_pattern='')\n\n\n@task(queue=get_local_queuename)", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "@task(queue=get_local_queuename)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 10, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def handle_removed_image(remove_images=None):\n \n _cleanup_images_and_files(remove_images=remove_images, file_pattern='')\n\n\n@task(queue=get_loca" }, { "id": 279715, "commit_id": "062073cfc4a5fe4c24ed3e326c673951c040982f", "repo": "keras", "path": "keras/integration_test/parameter_server_training_metric_test.py", "file_name": "parameter_server_training_metric_test.py", "fun_name": "setUpClass", "commit_message": "Use Model metrics as logs in `fit` and `evaluate` instead of last worker train or test step result\n\nCurrently the model evaluate returns the last scheduled worker metrics. This is troublesome when using distributed workers as the last one could fail. in Parameter Server Strategy, the last worker may finish sooner than earlier scheduled worker resulting in incorrect metrics being returned. So always rely on current model metrics.\n\nPiperOrigin-RevId: 471137058", "code": "def setUpClass(cls):\n super().setUpClass()\n cls.cluster = multi_worker_test_base.create_multi_process_cluster(\n num_workers=2, num_ps=3, rpc_layer=\"grpc\"\n )\n cls.cluster_resolver = cls.cluster.cluster_resolver\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 41, "n_ast_nodes": 67, "n_identifiers": 10, "random_cut": "def setUpClass(cls):\n super().setUpClass()\n cls.cluster = multi_worker_test" }, { "id": 72037, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_page_chooser.py", "file_name": "test_page_chooser.py", "fun_name": "setUp", "commit_message": "Reformat with black", "code": "def setUp(self):\n self.tree_root = Page.objects.get(id=1)\n self.home_page = Page.objects.get(id=2)\n\n self.about_page = self.home_page.add_child(\n instance=SimplePage(title=\"About\", content=\"About Foo\")\n )\n self.contact_page = self.about_page.add_child(\n instance=SimplePage(title=\"Contact\", content=\"Content Foo\")\n )\n self.people_page = self.about_page.add_child(\n instance=SimplePage(title=\"People\", content=\"The people of Foo\")\n )\n\n self.event_index = self.make_event_section(\"Events\")\n\n self.login()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 137, "n_words": 35, "vocab_size": 25, "complexity": 1, "nloc": 14, "token_counts": 117, "n_ast_nodes": 197, "n_identifiers": 19, "random_cut": "def setUp(self):\n self.tree_root = Page.objects.get(id=1)\n self.home_page = Page.objects.get(id=2)\n\n self.about_page = self.home_page.add_child(\n instance=SimplePage(title=\"About\", content=\"About Foo\")\n )\n self.contact_page = self.about_page.add_child(\n instance=SimplePage(title=\"Contact\"," }, { "id": 182281, "commit_id": "42b1f7ef4a744fc36f68fac367ea0736eb9a7870", "repo": "textual", "path": "tests/renderables/test_opacity.py", "file_name": "test_opacity.py", "fun_name": "test_value_zero_sets_foreground_color_to_background_color", "commit_message": "Rename module, use positional args for Segment, rename value -> opacity in Opacity", "code": "def test_value_zero_sets_foreground_color_to_background_color(text):\n foreground = background = \"0;255;0\"\n assert render(Opacity(text, opacity=0)) == (\n f\"\\x1b[38;2;{foreground};48;2;{background}mHello, world!{STOP}\"\n )\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 30, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 27, "n_ast_nodes": 58, "n_identifiers": 8, "random_cut": "def test_value_zero_sets_foreground_color_to_background_color(text):\n foreground = background = \"0;255;0\"\n assert render(Opacity(text, opacity=0)) == (\n f\"\\x1b[38;2;{foreground};48;2;{background}mHello, world!{STOP}\"\n )\n\n" }, { "id": 184737, "commit_id": "009c556ca9e62f87bae3be12784cd446fad55697", "repo": "textual", "path": "examples/calculator.py", "file_name": "calculator.py", "fun_name": "compute_show_ac", "commit_message": "calculator example", "code": "def compute_show_ac(self) -> bool:\n \n return self.value in (\"\", \"0\") and self.numbers == \"0\"\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 3, "token_counts": 24, "n_ast_nodes": 45, "n_identifiers": 5, "random_cut": "def compute_show_ac(self) -> bool:\n \n return self.value in (\"\", \"0\") and self.numbers == " }, { "id": 250167, "commit_id": "3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b", "repo": "synapse", "path": "tests/storage/test_monthly_active_users.py", "file_name": "test_monthly_active_users.py", "fun_name": "test_user_last_seen_monthly_active", "commit_message": "Require types in tests.storage. (#14646)\n\nAdds missing type hints to `tests.storage` package\r\nand does not allow untyped definitions.", "code": "def test_user_last_seen_monthly_active(self) -> None:\n user_id1 = \"@user1:server\"\n user_id2 = \"@user2:server\"\n user_id3 = \"@user3:server\"\n\n result = self.get_success(self.store.user_last_seen_monthly_active(user_id1))\n self.assertNotEqual(result, 0)\n\n self.get_success(self.store.upsert_monthly_active_user(user_id1))\n self.get_success(self.store.upsert_monthly_active_user(user_id2))\n\n result = self.get_success(self.store.user_last_seen_monthly_active(user_id1))\n assert result is not None\n self.assertGreater(result, 0)\n\n result = self.get_success(self.store.user_last_seen_monthly_active(user_id3))\n self.assertIsNone(result)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 117, "n_words": 34, "vocab_size": 24, "complexity": 1, "nloc": 13, "token_counts": 114, "n_ast_nodes": 188, "n_identifiers": 13, "random_cut": "def test_user_last_seen_monthly_active(self) -> None:\n user_id1 = \"@user1:server\"\n user_id2 = \"@user2:server\"\n user_id3 = \"@user3:server\"\n\n result = self.get_success(self.store.user_last_seen_monthly_active(user_id1))\n self.assertNotEqual(result, 0)\n\n self.get_succes" }, { "id": 335971, "commit_id": "f448360bd0dfe5e28ee65ab2130532db91d5eafe", "repo": "diffusers", "path": "tests/test_scheduler.py", "file_name": "test_scheduler.py", "fun_name": "test_from_pretrained_save_pretrained", "commit_message": "Finish scheduler API (#91)\n\n* finish\r\n\r\n* up", "code": "def test_from_pretrained_save_pretrained(self):\n kwargs = dict(self.forward_default_kwargs)\n\n num_inference_steps = kwargs.pop(\"num_inference_steps\", None)\n\n for scheduler_class in self.scheduler_classes:\n sample = self.dummy_sample\n residual = 0.1 * sample\n\n scheduler_config = self.get_scheduler_config()\n scheduler = scheduler_class(**scheduler_config)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n scheduler.save_config(tmpdirname)\n new_scheduler = scheduler_class.from_config(tmpdirname)\n\n if num_inference_steps is not None and hasattr(scheduler, \"set_timesteps\"):\n scheduler.set_timesteps(num_inference_steps)\n new_scheduler.set_timesteps(num_inference_steps)\n elif num_inference_steps is not None and not hasattr(scheduler, \"set_timesteps\"):\n kwargs[\"num_inference_steps\"] = num_inference_steps\n\n output = scheduler.step(residual, 1, sample, **kwargs)[\"prev_sample\"]\n new_output = new_scheduler.step(residual, 1, sample, **kwargs)[\"prev_sample\"]\n\n assert np.sum(np.abs(output - new_output)) < 1e-5, \"Scheduler outputs are not identical\"\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 285, "n_words": 80, "vocab_size": 56, "complexity": 6, "nloc": 19, "token_counts": 180, "n_ast_nodes": 288, "n_identifiers": 29, "random_cut": "def test_from_pretrained_save_pretrained(self):\n kwargs = dict(self.forward_default_kwargs)\n\n num_inference_steps = kwargs.pop(\"num_inference_steps\", None)\n\n for scheduler_class in self.scheduler_classes:\n sample = self.dummy_sample\n residual = 0.1 * sample\n\n scheduler_config = self.get_scheduler_config()\n scheduler = scheduler_class(**scheduler_config)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n scheduler.save_config(tmpdirname)\n new_scheduler = scheduler_class.from_config(tmpdirname)\n\n if num_inference_steps is not None and hasattr(scheduler, \"set_timesteps\"):\n scheduler.set_timesteps(num_inference_steps)\n new_scheduler.set_timesteps(num_inference_steps)\n elif num_inference_steps is not None and not hasattr(scheduler, \"set_timesteps\"):\n kwargs[\"num_inference_steps\"] = num_inference_steps\n\n output = scheduler.step(residual, 1, sample, **kwargs)[\"prev_sample\"]\n new_output = new_scheduler.step(residual, 1, sample, **kwargs" }, { "id": 104475, "commit_id": "0e6ab17c4a49413007869441a044b6ca6de019cf", "repo": "datasets", "path": "metrics/mae/mae.py", "file_name": "mae.py", "fun_name": "_get_feature_types", "commit_message": "add MSE and MAE metrics - V2 (#3874)\n\n* * add RMSE and MAE metrics\r\n* add required kwargs for missing params.\r\n* add support for multi-dimensional lists and update example.\r\n\r\n* Fix style and normalize whitespace in example\r\n\r\nCo-authored-by: mariosasko ", "code": "def _get_feature_types(self):\n if self.config_name == \"multilist\":\n return {\n \"predictions\": datasets.Sequence(datasets.Value(\"float\")),\n \"references\": datasets.Sequence(datasets.Value(\"float\")),\n }\n else:\n return {\n \"predictions\": datasets.Value(\"float\"),\n \"references\": datasets.Value(\"float\"),\n }\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 138, "n_words": 21, "vocab_size": 14, "complexity": 2, "nloc": 11, "token_counts": 66, "n_ast_nodes": 120, "n_identifiers": 6, "random_cut": "def _get_feature_types(self):\n if self.config_name == \"multilist\":\n return {\n \"predictions\": datasets.Sequence(datasets.Value(\"float\")),\n \"references\": datasets.Sequence(datasets.Value(\"float\")),\n }\n else:\n return {\n \"predictions\": datasets.Value(\"float\"),\n \"references\": datasets.Value(\"float\")," }, { "id": 19281, "commit_id": "af2061a1e784a6779bdc08976a43157ddae17884", "repo": "PythonRobotics", "path": "PathPlanning/HybridAStar/car.py", "file_name": "car.py", "fun_name": "check_car_collision", "commit_message": "fix doc artifact link in CI (#660)", "code": "def check_car_collision(x_list, y_list, yaw_list, ox, oy, kd_tree):\n for i_x, i_y, i_yaw in zip(x_list, y_list, yaw_list):\n cx = i_x + BUBBLE_DIST * cos(i_yaw)\n cy = i_y + BUBBLE_DIST * sin(i_yaw)\n\n ids = kd_tree.query_ball_point([cx, cy], BUBBLE_R)\n\n if not ids:\n continue\n\n if not rectangle_check(i_x, i_y, i_yaw,\n [ox[i] for i in ids], [oy[i] for i in ids]):\n return False # collision\n\n return True # no collision\n\n", "url": "https://github.com/AtsushiSakai/PythonRobotics.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 156, "n_words": 62, "vocab_size": 45, "complexity": 6, "nloc": 11, "token_counts": 107, "n_ast_nodes": 153, "n_identifiers": 21, "random_cut": "def check_car_collision(x_list, y_list, yaw_list, ox, oy, kd_tree):\n for i_x, i_y, i_yaw in zip(x_list, y_list, yaw_list):\n cx = i_x + BUBBLE_DIST * cos(i_yaw)\n cy = i_y + BUBBLE_DIST * sin(i_yaw)\n\n ids = kd_tree.query_ball_point([cx, cy], BUBBLE_R)\n\n if not ids:\n continue\n\n if not rectangle_check(i_x, i_y, i_yaw,\n " }, { "id": 147481, "commit_id": "1465eaa30634c189fe3ebc9db8609f47d19a78cc", "repo": "ray", "path": "python/ray/tune/tests/test_api.py", "file_name": "test_api.py", "fun_name": "_testDurableTrainable", "commit_message": "[tune] Use new Checkpoint interface internally (#22801)\n\nFollow up from #22741, also use the new checkpoint interface internally. This PR is low friction and just replaces some internal bookkeeping methods.\r\n\r\nWith the new Checkpoint interface, there is no need to revamp the save/restore APIs completely. Instead, we will focus on the bookkeeping part, which takes place in the Ray Tune's and Ray Train's checkpoint managers. These will be consolidated in a future PR.", "code": "def _testDurableTrainable(self, trainable, function=False, cleanup=True):\n tempdir = tempfile.mkdtemp()\n self.addCleanup(shutil.rmtree, tempdir)\n mocked_subprocess = mock_s3_sync(tempdir)\n remote_checkpoint_dir = \"s3://unit-test/bucket\"\n\n with patch(\"subprocess.check_call\", mocked_subprocess):\n log_creator = partial(\n noop_logger_creator, logdir=\"~/tmp/ray_results/exp/trial\"\n )\n test_trainable = trainable(\n logger_creator=log_creator, remote_checkpoint_dir=remote_checkpoint_dir\n )\n result = test_trainable.train()\n self.assertEqual(result[\"metric\"], 1)\n checkpoint_path = test_trainable.save()\n result = test_trainable.train()\n self.assertEqual(result[\"metric\"], 2)\n result = test_trainable.train()\n self.assertEqual(result[\"metric\"], 3)\n result = test_trainable.train()\n self.assertEqual(result[\"metric\"], 4)\n\n shutil.rmtree(\"~/tmp/ray_results/exp/\")\n if not function:\n test_trainable.state[\"hi\"] = 2\n test_trainable.restore(checkpoint_path)\n self.assertEqual(test_trainable.state[\"hi\"], 1)\n else:\n # Cannot re-use function trainable, create new\n tune.session.shutdown()\n test_trainable = trainable(\n logger_creator=log_creator,\n remote_checkpoint_dir=remote_checkpoint_dir,\n )\n test_trainable.restore(checkpoint_path)\n\n result = test_trainable.train()\n self.assertEqual(result[\"metric\"], 2)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 505, "n_words": 85, "vocab_size": 52, "complexity": 2, "nloc": 35, "token_counts": 233, "n_ast_nodes": 388, "n_identifiers": 31, "random_cut": "def _testDurableTrainable(self, trainable, function=False, cleanup=True):\n tempdir = tempfile.mkdtemp()\n self.addCleanup(shutil.rmtree, tempdir)\n mocked_subprocess = mock_s3_sync(tempdir)\n remote_checkpoint_dir = \"s3://unit-test/bucket\"\n\n with patch(\"subprocess.check_call\", mocked_subprocess):\n log_creator = partial(\n noop_logger_creator, logdir=\"~/tmp/ray_results/exp/trial\"\n )\n test_trainable = trainable(\n logger_creator=log_creator, remote_checkpoint_dir=remote_checkpoint_dir\n )\n result = test_trainable.train()\n self.assertEqual(result[\"metric\"], 1)\n checkpoint_path = test_trainable.save()\n result = test_trainable.train()\n self.assertEqual(result[\"metric\"], 2)\n result = test_trainable.train()\n self.assertEqual(result[\"metric\"], 3)\n result = test_trainable.train()\n self.assertEqual(result[\"metric\"], 4)\n\n shut" }, { "id": 89731, "commit_id": "2b9d93c52d7a67a19f2d329d5ec491341c2d2bca", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_monitor_details.py", "file_name": "test_monitor_details.py", "fun_name": "test_cronjob_nonstandard", "commit_message": "ref(hybrid-cloud): use organization_slug in MonitorStats (#42384)", "code": "def test_cronjob_nonstandard(self):\n with self.feature(\"organizations:monitors\"):\n for path_func in self._get_path_functions():\n monitor = self._create_monitor()\n path = path_func(monitor)\n\n resp = self.client.put(path, data={\"config\": {\"schedule\": \"@monthly\"}})\n\n assert resp.status_code == 200, resp.content\n assert resp.data[\"id\"] == str(monitor.guid)\n\n monitor = Monitor.objects.get(id=monitor.id)\n assert monitor.config[\"schedule_type\"] == ScheduleType.CRONTAB\n assert monitor.config[\"schedule\"] == \"0 0 1 * *\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 181, "n_words": 44, "vocab_size": 34, "complexity": 2, "nloc": 11, "token_counts": 115, "n_ast_nodes": 195, "n_identifiers": 23, "random_cut": "def test_cronjob_nonstandard(self):\n with self.feature(\"organizations:monitors\"):\n for path_func in self._get_path_functions():\n monitor = self._create_monitor()\n path = path_func(monitor)\n\n resp = self.client.put(path, data={\"config\": {\"schedule\": \"@monthly\"}})\n\n asser" }, { "id": 29017, "commit_id": "df06a6c572967384f6aa250947aa693e024b35a3", "repo": "saleor", "path": "saleor/graphql/core/mutations.py", "file_name": "mutations.py", "fun_name": "perform_mutation", "commit_message": "Allow to pass metadata directly in create and update mutations for product app models (#10689)\n\n* Allow to pass metadata directly in create and update mutations for product app models\r\n\r\n* Move metadata methods to BaseMutation class\r\n\r\n* Fix mypy check for validate_metadata_keys\r\n\r\n* CHANGELOG.md update", "code": "def perform_mutation(cls, _root, info, **data):\n \n instance = cls.get_instance(info, **data)\n data = data.get(\"input\")\n cleaned_input = cls.clean_input(info, instance, data)\n metadata_list = cleaned_input.pop(\"metadata\", None)\n private_metadata_list = cleaned_input.pop(\"private_metadata\", None)\n instance = cls.construct_instance(instance, cleaned_input)\n\n cls.validate_and_update_metadata(instance, metadata_list, private_metadata_list)\n cls.clean_instance(info, instance)\n cls.save(info, instance, cleaned_input)\n cls._save_m2m(info, instance, cleaned_input)\n cls.post_save_action(info, instance, cleaned_input)\n return cls.success_response(instance)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 136, "n_words": 45, "vocab_size": 32, "complexity": 1, "nloc": 13, "token_counts": 129, "n_ast_nodes": 198, "n_identifiers": 20, "random_cut": "def perform_mutation(cls, _root, info, **data):\n \n instance = cls.get_instance(info, **data)\n data = data.get(\"input\")\n cleaned_input = cls.clean_input(info, instance, data)\n metadata_list = cleaned_input.pop(\"metadata\", None)\n private_metadata_list = " }, { "id": 169054, "commit_id": "54347fe684e0f7844bf407b1fb958a5269646825", "repo": "pandas", "path": "pandas/plotting/_matplotlib/boxplot.py", "file_name": "boxplot.py", "fun_name": "orientation", "commit_message": "TYP: Autotyping (#48191)\n\n* annotate-magics\r\n\r\n* annotate-imprecise-magics\r\n\r\n* none-return\r\n\r\n* scalar-return\r\n\r\n* pyi files\r\n\r\n* ignore vendored file\r\n\r\n* manual changes\r\n\r\n* ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments)\r\n\r\n* run autotyping in pre-commit\r\n\r\n* remove final and expand safe (and add annotate-imprecise-magics)", "code": "def orientation(self) -> Literal[\"horizontal\", \"vertical\"]:\n if self.kwds.get(\"vert\", True):\n return \"vertical\"\n else:\n return \"horizontal\"\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 48, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 56, "n_identifiers": 5, "random_cut": "def orientation(self) -> Literal[\"horizontal\", \"vertical\"]:\n if self.kwds.get(\"vert\", True):\n return \"vertical\"\n else:\n return \"horizontal\"\n" }, { "id": 38905, "commit_id": "31ee80d55673f32c0f5d50936f371e661b74b21a", "repo": "transformers", "path": "tests/models/layoutlmv3/test_tokenization_layoutlmv3.py", "file_name": "test_tokenization_layoutlmv3.py", "fun_name": "test_call", "commit_message": "Add LayoutLMv3 (#17060)\n\n* Make forward pass work\r\n\r\n* More improvements\r\n\r\n* Remove unused imports\r\n\r\n* Remove timm dependency\r\n\r\n* Improve loss calculation of token classifier\r\n\r\n* Fix most tests\r\n\r\n* Add docs\r\n\r\n* Add model integration test\r\n\r\n* Make all tests pass\r\n\r\n* Add LayoutLMv3FeatureExtractor\r\n\r\n* Improve integration test + make fixup\r\n\r\n* Add example script\r\n\r\n* Fix style\r\n\r\n* Add LayoutLMv3Processor\r\n\r\n* Fix style\r\n\r\n* Add option to add visual labels\r\n\r\n* Make more tokenizer tests pass\r\n\r\n* Fix more tests\r\n\r\n* Make more tests pass\r\n\r\n* Fix bug and improve docs\r\n\r\n* Fix import of processors\r\n\r\n* Improve docstrings\r\n\r\n* Fix toctree and improve docs\r\n\r\n* Fix auto tokenizer\r\n\r\n* Move tests to model folder\r\n\r\n* Move tests to model folder\r\n\r\n* change default behavior add_prefix_space\r\n\r\n* add prefix space for fast\r\n\r\n* add_prefix_spcae set to True for Fast\r\n\r\n* no space before `unique_no_split` token\r\n\r\n* add test to hightligh special treatment of added tokens\r\n\r\n* fix `test_batch_encode_dynamic_overflowing` by building a long enough example\r\n\r\n* fix `test_full_tokenizer` with add_prefix_token\r\n\r\n* Fix tokenizer integration test\r\n\r\n* Make the code more readable\r\n\r\n* Add tests for LayoutLMv3Processor\r\n\r\n* Fix style\r\n\r\n* Add model to README and update init\r\n\r\n* Apply suggestions from code review\r\n\r\n* Replace asserts by value errors\r\n\r\n* Add suggestion by @ducviet00\r\n\r\n* Add model to doc tests\r\n\r\n* Simplify script\r\n\r\n* Improve README\r\n\r\n* a step ahead to fix\r\n\r\n* Update pair_input_test\r\n\r\n* Make all tokenizer tests pass - phew\r\n\r\n* Make style\r\n\r\n* Add LayoutLMv3 to CI job\r\n\r\n* Fix auto mapping\r\n\r\n* Fix CI job name\r\n\r\n* Make all processor tests pass\r\n\r\n* Make tests of LayoutLMv2 and LayoutXLM consistent\r\n\r\n* Add copied from statements to fast tokenizer\r\n\r\n* Add copied from statements to slow tokenizer\r\n\r\n* Remove add_visual_labels attribute\r\n\r\n* Fix tests\r\n\r\n* Add link to notebooks\r\n\r\n* Improve docs of LayoutLMv3Processor\r\n\r\n* Fix reference to section\r\n\r\nCo-authored-by: SaulLu \r\nCo-authored-by: Niels Rogge ", "code": "def test_call(self):\n # Tests that all call wrap to encode_plus and batch_encode_plus\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n # Test not batched\n words, boxes = self.get_words_and_boxes()\n encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)\n encoded_sequences_2 = tokenizer(words, boxes=boxes)\n self.assertEqual(encoded_sequences_1, encoded_sequences_2)\n\n # Test not batched pairs\n question, words, boxes = self.get_question_words_and_boxes()\n encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)\n encoded_sequences_2 = tokenizer(words, boxes=boxes)\n self.assertEqual(encoded_sequences_1, encoded_sequences_2)\n\n # Test batched\n words, boxes = self.get_words_and_boxes_batch()\n encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes)\n encoded_sequences_2 = tokenizer(words, boxes=boxes)\n self.assertEqual(encoded_sequences_1, encoded_sequences_2)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 333, "n_words": 77, "vocab_size": 40, "complexity": 2, "nloc": 16, "token_counts": 152, "n_ast_nodes": 250, "n_identifiers": 21, "random_cut": "def test_call(self):\n # Tests that all call wrap to encode_plus and batch_encode_plus\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n # Test not batch" }, { "id": 193220, "commit_id": "4cb83c2f285101f83b5143663e0d90305e9d7200", "repo": "vision", "path": "test/test_prototype_transforms.py", "file_name": "test_prototype_transforms.py", "fun_name": "test_trivial_aug", "commit_message": "[proto] Fixed RandAug and all AA consistency tests (#6519)\n\n* [proto] Fixed RandAug implementation\r\n\r\n* Fixed randomness in tests for trivial aug\r\n\r\n* Fixed all AA tests", "code": "def test_trivial_aug(self, inpt, interpolation, mocker):\n from torchvision.transforms import autoaugment as ref_transforms\n\n t_ref = ref_transforms.TrivialAugmentWide(interpolation=interpolation)\n t = transforms.TrivialAugmentWide(interpolation=interpolation)\n\n le = len(t._AUGMENTATION_SPACE)\n keys = list(t._AUGMENTATION_SPACE.keys())\n randint_values = []\n for i in range(le):\n # Stable API, op_index random call\n randint_values.append(i)\n key = keys[i]\n # Stable API, random magnitude\n aug_op = t._AUGMENTATION_SPACE[key]\n magnitudes = aug_op[0](2, 0, 0)\n if magnitudes is not None:\n randint_values.append(5)\n # Stable API, if signed there is another random call\n if aug_op[1]:\n randint_values.append(0)\n # New API, _get_random_item\n randint_values.append(i)\n # New API, random magnitude\n if magnitudes is not None:\n randint_values.append(5)\n\n randint_values = iter(randint_values)\n\n mocker.patch(\"torch.randint\", side_effect=lambda *arg, **kwargs: torch.tensor(next(randint_values)))\n mocker.patch(\"torch.rand\", return_value=1.0)\n\n for _ in range(le):\n expected_output = t_ref(inpt)\n output = t(inpt)\n\n if isinstance(inpt, PIL.Image.Image):\n expected_output = pil_to_tensor(expected_output)\n output = pil_to_tensor(output)\n\n torch.testing.assert_close(expected_output, output)\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 457, "n_words": 119, "vocab_size": 73, "complexity": 7, "nloc": 29, "token_counts": 243, "n_ast_nodes": 385, "n_identifiers": 42, "random_cut": "def test_trivial_aug(self, inpt, interpolation, mocker):\n from torchvision.transforms import autoaugment as ref_transforms\n\n t_ref = ref_transforms.TrivialAugmentWide(interpolation=interpolation)\n t = tr" }, { "id": 86948, "commit_id": "7e56c00f4a62e36aeb7015fa01493aca053c9dea", "repo": "sentry", "path": "tests/sentry/replays/test_organization_replay_index.py", "file_name": "test_organization_replay_index.py", "fun_name": "test_get_replays_no_multi_project_select", "commit_message": "fix(replays): enforce global views feature flag (#40063)\n\n- since we're not using `get_snuba_params`, we need to check this\r\nfeature flag in our logic in order to enforce it.\r\n\r\ncloses https://github.com/getsentry/replay-backend/issues/161", "code": "def test_get_replays_no_multi_project_select(self):\n self.create_project(teams=[self.team])\n self.create_project(teams=[self.team])\n\n user = self.create_user(is_superuser=False)\n self.create_member(\n user=user, organization=self.organization, role=\"member\", teams=[self.team]\n )\n self.login_as(user)\n\n with self.feature(REPLAYS_FEATURES), self.feature({\"organizations:global-views\": False}):\n response = self.client.get(self.url)\n assert response.status_code == 400\n assert response.data[\"detail\"] == \"You cannot view events from multiple projects.\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 127, "n_words": 35, "vocab_size": 31, "complexity": 1, "nloc": 12, "token_counts": 117, "n_ast_nodes": 191, "n_identifiers": 20, "random_cut": "def test_get_replays_no_multi_project_select(self):\n self.create_project(teams=[self.team])\n self.create_project(teams=[self.team])\n\n user = self.create_user(is_" }, { "id": 176803, "commit_id": "ddb1cb663d1c0be293aaa4d4284eab641255014f", "repo": "networkx", "path": "networkx/algorithms/centrality/tests/test_eigenvector_centrality.py", "file_name": "test_eigenvector_centrality.py", "fun_name": "test_empty", "commit_message": "MAINT: Cleanup centrality module, remove unused variables (#5308)\n\n* MAINT: Cleanup centrality module, remove unused variables\r\n\r\n* make isort linter happy\r\n\r\n* MAINT: make the loop more readable\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* make black happy\r\n\r\n* Use a different name for internal function variable\r\n\r\nCo-authored-by: Jarrod Millman \r\n\r\n* rename closeness_cen to closeness_dict\r\n\r\n* minor cleanup in current_flow_betweenness and group\r\n\r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Jarrod Millman ", "code": "def test_empty(self):\n with pytest.raises(nx.NetworkXException):\n nx.eigenvector_centrality(nx.Graph())\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 22, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 44, "n_identifiers": 8, "random_cut": "def test_empty(self):\n with pytest.raises(nx." }, { "id": 67235, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/hsn_wise_summary_of_outward_supplies/hsn_wise_summary_of_outward_supplies.py", "file_name": "hsn_wise_summary_of_outward_supplies.py", "fun_name": "get_columns", "commit_message": "style: format code with black", "code": "def get_columns():\n\tcolumns = [\n\t\t{\n\t\t\t\"fieldname\": \"gst_hsn_code\",\n\t\t\t\"label\": _(\"HSN/SAC\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"options\": \"GST HSN Code\",\n\t\t\t\"width\": 100,\n\t\t},\n\t\t{\"fieldname\": \"description\", \"label\": _(\"Description\"), \"fieldtype\": \"Data\", \"width\": 300},\n\t\t{\"fieldname\": \"stock_uom\", \"label\": _(\"Stock UOM\"), \"fieldtype\": \"Data\", \"width\": 100},\n\t\t{\"fieldname\": \"stock_qty\", \"label\": _(\"Stock Qty\"), \"fieldtype\": \"Float\", \"width\": 90},\n\t\t{\"fieldname\": \"total_amount\", \"label\": _(\"Total Amount\"), \"fieldtype\": \"Currency\", \"width\": 120},\n\t\t{\n\t\t\t\"fieldname\": \"taxable_amount\",\n\t\t\t\"label\": _(\"Total Taxable Amount\"),\n\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\"width\": 170,\n\t\t},\n\t]\n\n\treturn columns\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 48, "n_words": 69, "vocab_size": 42, "complexity": 1, "nloc": 21, "token_counts": 142, "n_ast_nodes": 281, "n_identifiers": 3, "random_cut": "def get_columns():\n\tcolumns = [\n\t\t{\n\t\t\t\"fieldname\": \"gst_hsn_code\",\n\t\t\t\"label\": _(\"HSN/SAC\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"op" }, { "id": 205695, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/lookups.py", "file_name": "lookups.py", "fun_name": "get_bound_params", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_bound_params(self, start, finish):\n raise NotImplementedError(\n \"subclasses of YearLookup must provide a get_bound_params() method\"\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 39, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 14, "n_ast_nodes": 23, "n_identifiers": 5, "random_cut": "def get_bound_params(self, start, finish):\n raise NotImplementedError(\n " }, { "id": 2612, "commit_id": "5be58f99dab36f962684360914679ba91ede242a", "repo": "PySyft", "path": "packages/hagrid/hagrid/deps.py", "file_name": "deps.py", "fun_name": "check", "commit_message": "Update deps.py", "code": "def check(self) -> None:\n binary_info = BinaryInfo(\n binary=\"docker\", version_cmd=\"docker compose version\"\n ).get_binary_info()\n\n if (\n binary_info.path\n and binary_info.version\n and binary_info.version > version.parse(MINIMUM_DOCKER_COMPOSE_VERSION):\n ):\n self.display = \"✅ Docker Compose \" + str(binary_info.version)\n else:\n self.issues.append(docker_compose_install())\n self.display = \"❌ Docker Compose v2 not installed\"\n\n\n@dataclass", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": ":@dataclass", "n_ast_errors": 2, "ast_levels": 12, "n_whitespaces": 151, "n_words": 41, "vocab_size": 34, "complexity": 4, "nloc": 13, "token_counts": 75, "n_ast_nodes": 133, "n_identifiers": 17, "random_cut": "def check(self) -> None:\n binary_info = BinaryInfo(\n binary=\"docker\", version_cmd=\"docker compose version\"\n ).get_binary_info()\n\n if (\n binary_info.path\n and binary_info.version\n " }, { "id": 21244, "commit_id": "37b1fb422b958d0ea0edf42302e597c0c508e979", "repo": "pipenv", "path": "tests/integration/test_requirements.py", "file_name": "test_requirements.py", "fun_name": "test_requirements_markers_get_included", "commit_message": "Include markers issue #5092 (#5114)\n\n* Include markers\r\n\r\n* Change to marker flag and include test\r\n\r\n* Flip markers flag to exclude by default\r\n\r\n* Docs and newsfile\r\n\r\n* Let default dependencies update dev dependencies", "code": "def test_requirements_markers_get_included(PipenvInstance):\n package, version, markers = \"werkzeug\", \"==2.1.2\", \"python_version >= '3.7'\"\n lockfile = {\n \"_meta\": {\"sources\": []},\n \"default\": {\n package: {\n \"hashes\": [\n \"sha256:1ce08e8093ed67d638d63879fd1ba3735817f7a80de3674d293f5984f25fb6e6\",\n \"sha256:72a4b735692dd3135217911cbeaa1be5fa3f62bffb8745c5215420a03dc55255\"\n ],\n \"markers\": markers,\n \"version\": version\n }\n },\n \"develop\": {}\n }\n\n with PipenvInstance(chdir=True) as p:\n with open(p.lockfile_path, 'w') as f:\n json.dump(lockfile, f)\n\n c = p.pipenv('requirements')\n assert c.returncode == 0\n assert f'{package}{version}; {markers}' in c.stdout\n\n\n@pytest.mark.requirements", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "@pytest.mark.requirements", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 256, "n_words": 59, "vocab_size": 51, "complexity": 1, "nloc": 22, "token_counts": 108, "n_ast_nodes": 217, "n_identifiers": 20, "random_cut": "def test_requirements_markers_get_included(PipenvInstance):\n package, version, markers = \"werkzeug\", \"==2.1.2\", \"python_version >= '3.7'\"\n lockfile = {\n \"_meta\": {\"sources\": []},\n \"default\": {\n package: {\n \"hashes\": [\n \"sha256:1ce08e8093ed67d638d63879fd1ba3735817f7a80de3674d293f5984f25fb6e6\",\n \"sha256:72a4b735692dd3135217911cbeaa1be5fa3f62bffb8745c5215420a03dc55255\"\n ],\n \"markers\": markers,\n \"version\": version\n }\n },\n \"develop\": {}\n }\n\n with PipenvInstance(chdir=True) as p:\n with open(p.lockfile_path, 'w') as f:\n json.dump(lockfile, f)\n\n c = p.pipenv('requirements')\n assert c.returncode == 0\n assert f" }, { "id": 200593, "commit_id": "0a180f07cdd5e38fbe7c1866b4b2595759005aed", "repo": "sympy", "path": "sympy/tensor/tests/test_tensor.py", "file_name": "test_tensor.py", "fun_name": "test_contract_metric4", "commit_message": "Add test_contract_metric4: check if contract_metric can handle an expression being canonicalized to be zero.\n\nhttps://github.com/sympy/sympy/issues/24354", "code": "def test_contract_metric4():\n R3 = TensorIndexType('R3', dim=3)\n p, q, r = tensor_indices(\"p q r\", R3)\n delta = R3.delta\n eps = R3.epsilon\n K = TensorHead(\"K\", [R3])\n\n #Check whether contract_metric chokes on an expandable expression which becomes zero on canonicalization.\n expr = eps(p,q,r)*( K(-p)*K(-q) + delta(-p,-q) )\n assert expr.contract_metric(delta) == 0\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 71, "n_words": 48, "vocab_size": 42, "complexity": 1, "nloc": 8, "token_counts": 88, "n_ast_nodes": 143, "n_identifiers": 15, "random_cut": "def test_contract_metric4():\n R3 = TensorIndexType('R3', dim=3)\n p, q, r = tensor_indices(\"p q r\", R3)\n delta = R3.delta\n eps = R3.epsilon\n K = TensorHead(\"K\", [R3])\n\n #Check " }, { "id": 246995, "commit_id": "1901cb1d4a8b7d9af64493fbd336e9aa2561c20c", "repo": "synapse", "path": "tests/rest/client/test_profile.py", "file_name": "test_profile.py", "fun_name": "test_get_avatar_url", "commit_message": "Add type hints to `tests/rest/client` (#12084)", "code": "def test_get_avatar_url(self) -> None:\n res = self._get_avatar_url()\n self.assertIsNone(res)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def test_get_avatar_url(self) -> None:\n res = self._get_avatar_url()\n self.assertIsNone(res)\n" }, { "id": 251856, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/http/test_http.py", "file_name": "test_http.py", "fun_name": "test_memory_usage_completed_flows", "commit_message": "make it black!", "code": "def test_memory_usage_completed_flows(tctx):\n \n gc.collect()\n flow_count = flows_tracked()\n\n server = Placeholder(Server)\n assert (\n Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)\n >> DataReceived(\n tctx.client,\n b\"GET http://example.com/ HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\",\n )\n << OpenConnection(server)\n >> reply(None)\n << SendData(server, b\"GET / HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\")\n >> DataReceived(server, b\"HTTP/1.1 204 No Content\\r\\n\\r\\n\")\n << SendData(tctx.client, b\"HTTP/1.1 204 No Content\\r\\n\\r\\n\")\n )\n\n gc.collect()\n assert flows_tracked() == flow_count\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 152, "n_words": 50, "vocab_size": 34, "complexity": 1, "nloc": 18, "token_counts": 106, "n_ast_nodes": 180, "n_identifiers": 20, "random_cut": "def test_memory_usage_completed_flows(tctx):\n \n gc.collect()\n flow_count = flows_tracked()\n\n server = Placeholder(Server)\n assert (\n Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)\n >> DataReceived(\n tctx.client,\n b\"GET http://" }, { "id": 107981, "commit_id": "2566eaa43278d99d1d90e0cb192c6e4d21a13487", "repo": "matplotlib", "path": "lib/matplotlib/widgets.py", "file_name": "widgets.py", "fun_name": "_update_val_from_pos", "commit_message": "RangeSlider handle set_val bugfix", "code": "def _update_val_from_pos(self, pos):\n \n idx = np.argmin(np.abs(self.val - pos))\n if idx == 0:\n val = self._min_in_bounds(pos)\n self.set_min(val)\n else:\n val = self._max_in_bounds(pos)\n self.set_max(val)\n if self._active_handle:\n if self.orientation == \"vertical\":\n self._active_handle.set_ydata([val])\n else:\n self._active_handle.set_xdata([val])\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 161, "n_words": 30, "vocab_size": 22, "complexity": 4, "nloc": 13, "token_counts": 94, "n_ast_nodes": 157, "n_identifiers": 16, "random_cut": "def _update_val_from_pos(self, pos):\n \n idx = np.argmin(np.abs(self.val - pos))\n if idx == 0:\n " }, { "id": 89340, "commit_id": "6fc6106b6a57149a5bae3c0f4677349cfbae1155", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_dynamic_sampling_sdk_versions.py", "file_name": "test_organization_dynamic_sampling_sdk_versions.py", "fun_name": "test_query_start_and_query_end_are_atmost_one_day_apart", "commit_message": "fix(dyn-sampling): Backend code clean up (#42001)\n\nWe are consolidating server-side-sampling and dynamic-sampling flags\r\ninto only dynamic-sampling. The flag is being controlled by plan", "code": "def test_query_start_and_query_end_are_atmost_one_day_apart(self):\n self.login_as(self.user)\n with Feature({\"organizations:dynamic-sampling\": True}):\n response = self.client.get(\n f\"{self.endpoint}?project=\"\n f\"{self.project.id}&start=2022-08-05T00:02:00+00:00&end=2022-08-07T00:00:02+00:00\"\n )\n assert response.status_code == 400\n assert (\n response.json()[\"detail\"] == \"'start' and 'end' have to be a maximum of 1 day apart\"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 146, "n_words": 33, "vocab_size": 30, "complexity": 1, "nloc": 11, "token_counts": 55, "n_ast_nodes": 114, "n_identifiers": 13, "random_cut": "def test_query_start_and_query_end_are_atmost_one_day_apart(self):\n self.login_as(self.user)\n with Feature({\"organ" }, { "id": 63170, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "_register", "commit_message": "upd; format", "code": "def _register(cls):\n loader_names = 'SourceFileLoader', 'SourcelessFileLoader',\n for name in loader_names:\n loader_cls = getattr(importlib_machinery, name, type(None))\n register_loader_type(loader_cls, cls)\n\n\nDefaultProvider._register()\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 52, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 35, "n_ast_nodes": 66, "n_identifiers": 10, "random_cut": "def _register(cls):\n loader_names = 'SourceFileLoader', 'SourcelessFileLoader',\n for name in loader_names:\n loader_cls = getattr(importlib_m" }, { "id": 9556, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/utils/ganfit_camera.py", "file_name": "ganfit_camera.py", "fun_name": "get_pose", "commit_message": "initialize ostec", "code": "def get_pose(camera_params):\n s, R, t = P2sRt(get_camera_matrices(camera_params)[0])\n return matrix2angle(R) # pitch:%.2f,\\n yaw:%.2f \\n , roll:%.2f \\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 22, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 42, "n_identifiers": 8, "random_cut": "def get_pose(camera_params):\n" }, { "id": 65359, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/tax_detail/tax_detail.py", "file_name": "tax_detail.py", "fun_name": "filter_match", "commit_message": "style: format code with black", "code": "def filter_match(value, string):\n\t\"Approximation to datatable filters\"\n\timport datetime\n\n\tif string == \"\":\n\t\treturn True\n\tif value is None:\n\t\tvalue = -999999999999999\n\telif isinstance(value, datetime.date):\n\t\treturn True\n\n\tif isinstance(value, str):\n\t\tvalue = value.lower()\n\t\tstring = string.lower()\n\t\tif string[0] == \"<\":\n\t\t\treturn True if string[1:].strip() else False\n\t\telif string[0] == \">\":\n\t\t\treturn False if string[1:].strip() else True\n\t\telif string[0] == \"=\":\n\t\t\treturn string[1:] in value if string[1:] else False\n\t\telif string[0:2] == \"!=\":\n\t\t\treturn string[2:] not in value\n\t\telif len(string.split(\":\")) == 2:\n\t\t\tpre, post = string.split(\":\")\n\t\t\treturn True if not pre.strip() and post.strip() in value else False\n\t\telse:\n\t\t\treturn string in value\n\telse:\n\t\tif string[0] in [\"<\", \">\", \"=\"]:\n\t\t\toperator = string[0]\n\t\t\tif operator == \"=\":\n\t\t\t\toperator = \"==\"\n\t\t\tstring = string[1:].strip()\n\t\telif string[0:2] == \"!=\":\n\t\t\toperator = \"!=\"\n\t\t\tstring = string[2:].strip()\n\t\telif len(string.split(\":\")) == 2:\n\t\t\tpre, post = string.split(\":\")\n\t\t\ttry:\n\t\t\t\treturn True if float(pre) <= value and float(post) >= value else False\n\t\t\texcept ValueError:\n\t\t\t\treturn False if pre.strip() else True\n\t\telse:\n\t\t\treturn string in str(value)\n\n\ttry:\n\t\tnum = float(string) if string.strip() else 0\n\t\treturn frappe.safe_eval(f\"{value} {operator} {num}\")\n\texcept ValueError:\n\t\tif operator == \"<\":\n\t\t\treturn True\n\t\treturn False\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 138, "n_words": 187, "vocab_size": 70, "complexity": 26, "nloc": 49, "token_counts": 361, "n_ast_nodes": 613, "n_identifiers": 19, "random_cut": "def filter_match(value, string):\n\t\"Approximation to datatable filters\"\n\timport datetime\n\n\tif string == \"\":\n\t\treturn True\n\tif value is None:\n\t\tvalue = -999999999999999\n\telif isinstance(value, datetime.date):\n\t\treturn True\n\n\tif isinstance(value, str):\n\t\tvalue = value.lower()\n\t\tstring = string.lower()\n\t\tif string[0] == \"<\":\n\t\t\treturn True if string[1:].strip() else False\n\t\telif string[0] == \">\":\n\t\t\treturn False if string[1:].strip() else True\n\t\telif string[0] == \"=\":\n\t\t\treturn string[1:] in value if string[1:] else False\n\t\telif string[0:2] == \"!=\":\n\t\t\treturn string[2:] not in value\n\t\telif len(string.split(\":\")) == 2:\n\t\t\tpre, post = string.split(\":\")\n\t\t\treturn True if not pre.strip() and post.strip() in value else False\n\t\telse:\n\t\t\treturn string in value\n\telse:\n\t\tif string[0] in [\"<\", \">\", \"=\"]:\n\t\t\toperator = string[0]\n\t\t\tif operator == \"=\":\n\t\t\t\toperator = \"==\"\n\t\t\tstring = string[1:].strip()\n\t\telif string[0:2] == \"!=\":\n\t\t\toperator = \"!=\"\n\t\t\tstring = string[2:].strip()\n\t\telif len(string.split(\":\")) == 2:\n\t\t\tpre, post = string.split(\":\")\n\t\t\ttry:\n\t\t\t\treturn True if float(pre) <= value and float(post) >= value else False\n\t\t\texcept ValueError:\n\t\t\t\treturn False if pre.strip() else True\n\t\telse:\n\t\t\treturn string in str(value)\n\n\ttry:\n\t\tnum = float(string) if string.strip() else 0\n\t\treturn frappe.safe_eval(f\"{value} {operator} {num}\")\n\texcept ValueError:\n\t\tif o" }, { "id": 304430, "commit_id": "3a3f41f3df932368791d3ee3f5fbae5fb3b38bfe", "repo": "core", "path": "homeassistant/components/egardia/binary_sensor.py", "file_name": "binary_sensor.py", "fun_name": "update", "commit_message": "Improve entity type hints [e] (#77041)", "code": "def update(self) -> None:\n \n egardia_input = self._egardia_system.getsensorstate(self._id)\n self._state = STATE_ON if egardia_input else STATE_OFF\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 29, "n_ast_nodes": 48, "n_identifiers": 9, "random_cut": "def update(self) -> None:\n \n ega" }, { "id": 42826, "commit_id": "841ed271017ff35a3124f1d1a53a5c74730fed60", "repo": "airflow", "path": "tests/providers/google/cloud/operators/test_spanner.py", "file_name": "test_spanner.py", "fun_name": "test_database_create", "commit_message": "Spanner assets & system tests migration (AIP-47) (#23957)", "code": "def test_database_create(self, mock_hook):\n mock_hook.return_value.get_database.return_value = None\n op = SpannerDeployDatabaseInstanceOperator(\n project_id=PROJECT_ID,\n instance_id=INSTANCE_ID,\n database_id=DB_ID,\n ddl_statements=DDL_STATEMENTS,\n task_id=\"id\",\n )\n context = mock.MagicMock()\n result = op.execute(context=context)\n mock_hook.assert_called_once_with(\n gcp_conn_id=\"google_cloud_default\",\n impersonation_chain=None,\n )\n mock_hook.return_value.create_database.assert_called_once_with(\n project_id=PROJECT_ID, instance_id=INSTANCE_ID, database_id=DB_ID, ddl_statements=DDL_STATEMENTS\n )\n mock_hook.return_value.update_database.assert_not_called()\n assert result\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 198, "n_words": 34, "vocab_size": 25, "complexity": 1, "nloc": 20, "token_counts": 106, "n_ast_nodes": 162, "n_identifiers": 27, "random_cut": "def test_database_create(self, mock_hook):\n mock_hook.return_value.get_database.return_value = None\n op = SpannerDeployDatabaseInstanceOperator(\n project_id=PROJECT_ID,\n instance_id=INSTANCE_ID,\n database_id=DB_ID,\n ddl_statements=DDL_STATEMENTS,\n task_id=\"id\",\n )\n context = mock.MagicMock()\n result = op.execute(context=context)\n mock_hook.assert_called_once_with(\n gcp_conn_id=\"google_cloud_default\",\n impersonation_chain=None,\n )\n mock_hook.return_value.create_database.assert_called_once_with(\n project_id=PROJECT_ID, instance_id=INSTANCE_ID, databa" }, { "id": 166384, "commit_id": "ba2fdb14fadd09f03863681d1df1fffa4ba13f9a", "repo": "pandas", "path": "pandas/tests/io/test_common.py", "file_name": "test_common.py", "fun_name": "test_get_attr", "commit_message": "CLN: mmap used by only read_csv (#46967)", "code": "def test_get_attr(self, mmap_file):\n with open(mmap_file) as target:\n wrapper = icom._CSVMMapWrapper(target)\n\n attrs = dir(wrapper.mmap)\n attrs = [attr for attr in attrs if not attr.startswith(\"__\")]\n attrs.append(\"__next__\")\n\n for attr in attrs:\n assert hasattr(wrapper, attr)\n\n assert not hasattr(wrapper, \"foo\")\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 98, "n_words": 35, "vocab_size": 25, "complexity": 4, "nloc": 9, "token_counts": 74, "n_ast_nodes": 124, "n_identifiers": 15, "random_cut": "def test_get_attr(self, mmap_file):\n with open(mmap_file) as target:\n wrapper = icom._CSVMMapWrapper(target)\n\n attrs = dir(wrapper.mmap)\n attrs = " }, { "id": 59829, "commit_id": "30f2d03ef656dd440e658fe99a802adfac257c70", "repo": "prefect", "path": "tests/cli/deployment/test_deployment_cli.py", "file_name": "test_deployment_cli.py", "fun_name": "test_message_with_missing_work_queue_name", "commit_message": "move build tests into separate file", "code": "def test_message_with_missing_work_queue_name(self, patch_import, tmp_path):\n d = Deployment.build_from_flow(\n flow=my_flow,\n name=\"TEST\",\n flow_name=\"my_flow\",\n output=str(tmp_path / \"test.yaml\"),\n work_queue_name=None,\n )\n invoke_and_assert(\n [\n \"deployment\",\n \"apply\",\n str(tmp_path / \"test.yaml\"),\n ],\n expected_output_contains=(\n \"This deployment does not specify a work queue name, which means agents \"\n \"will not be able to pick up its runs. To add a work queue, \"\n \"edit the deployment spec and re-run this command, or visit the deployment in the UI.\",\n ),\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 285, "n_words": 69, "vocab_size": 58, "complexity": 1, "nloc": 20, "token_counts": 67, "n_ast_nodes": 112, "n_identifiers": 16, "random_cut": "def test_message_with_missing_work_queue_name(self, patch_import, tmp_path):\n d = Deployment.build_from_flow(\n flow=my_flow,\n name=\"TEST\",\n flow_name=\"my_flow\",\n output=str(tmp_path / \"test.yaml\"),\n work_queue_name=None,\n )\n invoke_and_assert(\n [\n \"deployment\",\n \"apply\",\n str(tmp_path / \"test.yaml\"),\n ],\n expected_output_contains=(\n \"This deployment does not specify a work queue name, which means agents \"\n \"will not be able to pick up its runs. To add a work queue, \"\n \"edit the deployment spec and re-run this command, or visit the deployment " }, { "id": 5874, "commit_id": "4fb8f63181f5153b4f6778c6ef8dad61022c4f3f", "repo": "ludwig", "path": "tests/integration_tests/test_visualization.py", "file_name": "test_visualization.py", "fun_name": "test_visualization_compare_performance_output_saved", "commit_message": "Use tempfile to automatically garbage collect data and modeling artifacts in ludwig integration tests. (#1642)\n\n* Use tmpdir to automatically garbage collect data and modeling artifacts in ludwig integration tests.", "code": "def test_visualization_compare_performance_output_saved(csv_filename):\n \n input_features = [text_feature(encoder=\"parallel_cnn\")]\n output_features = [category_feature()]\n\n # Generate test data\n rel_path = generate_data(input_features, output_features, csv_filename)\n input_features[0][\"encoder\"] = \"parallel_cnn\"\n exp_dir_name = run_experiment_with_visualization(input_features, output_features, dataset=rel_path)\n vis_output_pattern_pdf = os.path.join(exp_dir_name, \"*.pdf\")\n vis_output_pattern_png = os.path.join(exp_dir_name, \"*.png\")\n test_stats = os.path.join(exp_dir_name, TEST_STATISTICS_FILE_NAME)\n\n test_cmd_pdf = [\n \"python\",\n \"-m\",\n \"ludwig.visualize\",\n \"--visualization\",\n \"compare_performance\",\n \"--test_statistics\",\n test_stats,\n test_stats,\n \"-m\",\n \"Model1\",\n \"Model2\",\n \"-od\",\n exp_dir_name,\n ]\n test_cmd_png = test_cmd_pdf.copy() + [\"-ff\", \"png\"]\n\n commands = [test_cmd_pdf, test_cmd_png]\n vis_patterns = [vis_output_pattern_pdf, vis_output_pattern_png]\n\n for command, viz_pattern in zip(commands, vis_patterns):\n result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n figure_cnt = glob.glob(viz_pattern)\n\n assert 0 == result.returncode\n assert 1 == len(figure_cnt)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 257, "n_words": 90, "vocab_size": 70, "complexity": 2, "nloc": 32, "token_counts": 200, "n_ast_nodes": 326, "n_identifiers": 37, "random_cut": "def test_visualization_compare_performance_output_saved(csv_filename):\n \n input_features = [text_feature(encoder=\"parallel_cnn\")]\n output_features = [category_feature()]\n\n # Generate test data\n rel_path = generate_data(input_features, output_features, csv_filename)\n input_features[0][\"encoder\"] = \"parallel_cnn\"\n exp_dir_name = run_experiment_with_visualization(input_features, output_features, dataset=rel_path)\n vis_output_pattern_pdf = os.path.join(exp_dir_name, \"*.pdf\")\n vis_output_pattern_png = os.path.join(exp_dir_name, \"*.png\")\n test_stats = os.path.join(exp_dir_name, TEST_STATISTICS_FILE_NAME)\n\n test_cmd_pdf = [\n \"python\",\n \"-m\",\n \"ludwig.visualize\",\n \"--visualization\",\n \"compare_performance\",\n \"--test_statistics\",\n test_stats,\n test_stats,\n \"-m\",\n \"Model1\",\n \"Model2\",\n \"-od\",\n exp_dir_name,\n ]\n test_cmd_png = test_cmd_pdf.copy() + [\"-ff\", \"png\"]\n\n commands = [test_cmd_pdf, test_cmd_png]\n vis_patterns = [vis_output_pattern_pdf, vis_output_pattern_png]\n\n for command, viz_pattern in zip(commands, vis_patterns):\n result = subprocess.run(command, s" }, { "id": 68678, "commit_id": "ab2d95a74d8beda1d751f7d795f37058826fff18", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/bom_update_log/bom_update_log.py", "file_name": "bom_update_log.py", "fun_name": "resume_bom_cost_update_jobs", "commit_message": "feat: Level-wise BOM cost updation\n\n- Process BOMs level wise and Pause after level is complete\n- Cron job will resume Paused jobs, which will again process the new level and pause at the end\n- This will go on until all BOMs are updated\n- Added Progress section with fields to track updated BOMs in Log\n- Cleanup: Add BOM Updation utils file to contain helper functions/sub-functions\n- Cleanup: BOM Update Log file will only contain functions that are in direct context of the Log\n\nCo-authored-by: Gavin D'souza ", "code": "def resume_bom_cost_update_jobs():\n\t\"Called every 10 minutes via Cron job.\"\n\tpaused_jobs = frappe.db.get_all(\"BOM Update Log\", {\"status\": \"Paused\"})\n\tif not paused_jobs:\n\t\treturn\n\n\tfor job in paused_jobs:\n\t\t# resume from next level\n\t\tprocess_boms_cost_level_wise(update_doc=frappe.get_doc(\"BOM Update Log\", job.name))\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 25, "n_words": 33, "vocab_size": 30, "complexity": 3, "nloc": 7, "token_counts": 46, "n_ast_nodes": 84, "n_identifiers": 10, "random_cut": "def resume_bom_cost_update_jobs():\n\t\"Called every 10 minutes via Cron job.\"\n\tpaused_jobs = frappe.db.get_all(\"BOM Update Log\", {\"status\": \"Paused\"})\n\tif not paused_jobs:\n\t\treturn\n\n\tfor " }, { "id": 271436, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/keras_tensor.py", "file_name": "keras_tensor.py", "fun_name": "__iter__", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def __iter__(self):\n shape = None\n if self.shape.ndims is not None:\n shape = [dim.value for dim in self.shape.dims]\n\n if shape is None:\n raise TypeError(\"Cannot iterate over a Tensor with unknown shape.\")\n if not shape:\n raise TypeError(\"Cannot iterate over a scalar.\")\n if shape[0] is None:\n raise TypeError(\n \"Cannot iterate over a Tensor with unknown first dimension.\"\n )\n return _KerasTensorIterator(self, shape[0])\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 169, "n_words": 58, "vocab_size": 35, "complexity": 6, "nloc": 13, "token_counts": 75, "n_ast_nodes": 122, "n_identifiers": 9, "random_cut": "def __iter__(self):\n shape = None\n if self.shape.ndims is not None:\n shape = [dim.value for dim in self.shape.dims]\n\n if shape is None:\n raise TypeError(\"Cannot iterate over a Tensor with unknown shape.\")\n if not shape:\n raise TypeError(\"Cannot iterate over a scalar.\")\n if shape[0] is None:\n raise TypeErr" }, { "id": 73004, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/forms/tests/test_models.py", "file_name": "test_models.py", "fun_name": "setUp", "commit_message": "Reformat with black", "code": "def setUp(self):\n # Create a form page\n self.form_page = make_form_page(to_address=\"to@email.com, another@email.com\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 5, "random_cut": "def setUp(self):\n # Create a form page\n s" }, { "id": 81949, "commit_id": "3bc86ca8cb5e07c8d95ee6e43e1556f2d425d7c6", "repo": "awx", "path": "awx/main/tasks/receptor.py", "file_name": "receptor.py", "fun_name": "write_receptor_config", "commit_message": "Follow up on new execution node creation\n\n- hop nodes are descoped\n- links need to be created on execution node creation\n- expose the 'edit' capabilities on the instance serializer", "code": "def write_receptor_config():\n receptor_config = list(RECEPTOR_CONFIG_STARTER)\n\n instances = Instance.objects.filter(node_type=Instance.Types.EXECUTION)\n for instance in instances:\n peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}\n receptor_config.append(peer)\n\n lock = FileLock(__RECEPTOR_CONF_LOCKFILE)\n with lock:\n with open(__RECEPTOR_CONF, 'w') as file:\n yaml.dump(receptor_config, file, default_flow_style=False)\n\n receptor_ctl = get_receptor_ctl()\n\n attempts = 10\n backoff = 1\n for attempt in range(attempts):\n try:\n receptor_ctl.simple_command(\"reload\")\n break\n except ValueError:\n logger.warning(f\"Unable to reload Receptor configuration. {attempts-attempt} attempts left.\")\n time.sleep(backoff)\n backoff += 1\n else:\n raise RuntimeError(\"Receptor reload failed\")\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 205, "n_words": 68, "vocab_size": 55, "complexity": 4, "nloc": 23, "token_counts": 138, "n_ast_nodes": 262, "n_identifiers": 38, "random_cut": "def write_receptor_config():\n receptor_config = list(RECEPTOR_CONFIG_STARTER)\n\n instances = Instance.objects.filter(node_type=Instance.Types.EXECUTION)\n for instance in instances:\n peer = {'tcp-peer': {'address': f'{instance.hostname}:{instance.listener_port}', 'tls': 'tlsclient'}}\n receptor_config.append(peer)\n\n lock = FileLock(__RECEPTOR_CONF_LOCKFILE)\n with lock:\n with open(__RECEPTOR_CONF, 'w') as file:\n yam" }, { "id": 305682, "commit_id": "420733a064286cfe6fc5cf11483835d15ff83462", "repo": "core", "path": "homeassistant/components/nmbs/sensor.py", "file_name": "sensor.py", "fun_name": "update", "commit_message": "Improve entity type hints [n] (#77824)", "code": "def update(self) -> None:\n \n liveboard = self._api_client.get_liveboard(self._station)\n\n if liveboard is None or not liveboard.get(\"departures\"):\n return\n\n next_departure = liveboard[\"departures\"][\"departure\"][0]\n\n self._attrs = next_departure\n self._state = (\n f\"Track {next_departure['platform']} - {next_departure['station']}\"\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 100, "n_words": 29, "vocab_size": 24, "complexity": 3, "nloc": 10, "token_counts": 59, "n_ast_nodes": 122, "n_identifiers": 10, "random_cut": "def update(self) -> None:\n \n liveboard = self._api_client.get_liveboard(self._station)\n\n if liv" }, { "id": 108421, "commit_id": "4d6568c8220e9141103a947837423144e694f312", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_backends_interactive.py", "file_name": "test_backends_interactive.py", "fun_name": "test_cross_Qt_imports", "commit_message": "TST: forgive more failures on pyside2 / pyside6 cross imports\n\ncloses #23004", "code": "def test_cross_Qt_imports():\n qt5_bindings = [\n dep for dep in ['PyQt5', 'PySide2']\n if importlib.util.find_spec(dep) is not None\n ]\n qt6_bindings = [\n dep for dep in ['PyQt6', 'PySide6']\n if importlib.util.find_spec(dep) is not None\n ]\n if len(qt5_bindings) == 0 or len(qt6_bindings) == 0:\n pytest.skip('need both QT6 and QT5 bindings')\n\n for qt5 in qt5_bindings:\n for qt6 in qt6_bindings:\n for pair in ([qt5, qt6], [qt6, qt5]):\n try:\n _run_helper(_impl_test_cross_Qt_imports,\n *pair,\n timeout=_test_timeout)\n except subprocess.CalledProcessError as ex:\n # if segfault, carry on. We do try to warn the user they\n # are doing something that we do not expect to work\n if ex.returncode == -11:\n continue\n # We got the abort signal which is likely because the Qt5 /\n # Qt6 cross import is unhappy, carry on.\n elif ex.returncode == -6:\n continue\n raise\n\n\n@pytest.mark.skipif('TF_BUILD' in os.environ,\n reason=\"this test fails an azure for unknown reasons\")\n@pytest.mark.skipif(os.name == \"nt\", reason=\"Cannot send SIGINT on Windows.\")", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@pytest.mark.skipif('TF_BUILD' in os.environ,\n reason=\"this test fails an azure for unknown reasons\")\n@pytest.mark.skipif(os.name == \"nt\", reason=\"Cannot send SIGINT on Windows.\")", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 522, "n_words": 144, "vocab_size": 101, "complexity": 13, "nloc": 24, "token_counts": 141, "n_ast_nodes": 287, "n_identifiers": 27, "random_cut": "def test_cross_Qt_imports():\n qt5_bindings = [\n dep for dep in ['PyQt5', 'PySide2']\n if importlib.util.find_spec(dep) is not None\n ]\n qt6_bindings = [\n dep for dep in ['PyQt6', 'PySide6']\n if importlib" }, { "id": 66088, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/employee_advance/test_employee_advance.py", "file_name": "test_employee_advance.py", "fun_name": "get_advances_for_claim", "commit_message": "style: format code with black", "code": "def get_advances_for_claim(claim, advance_name, amount=None):\n\tadvances = get_advances(claim.employee, advance_name)\n\n\tfor entry in advances:\n\t\tif amount:\n\t\t\tallocated_amount = amount\n\t\telse:\n\t\t\tallocated_amount = flt(entry.paid_amount) - flt(entry.claimed_amount)\n\n\t\tclaim.append(\n\t\t\t\"advances\",\n\t\t\t{\n\t\t\t\t\"employee_advance\": entry.name,\n\t\t\t\t\"posting_date\": entry.posting_date,\n\t\t\t\t\"advance_account\": entry.advance_account,\n\t\t\t\t\"advance_paid\": entry.paid_amount,\n\t\t\t\t\"unclaimed_amount\": allocated_amount,\n\t\t\t\t\"allocated_amount\": allocated_amount,\n\t\t\t},\n\t\t)\n\n\treturn claim\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 23, "n_words": 42, "vocab_size": 38, "complexity": 3, "nloc": 19, "token_counts": 93, "n_ast_nodes": 150, "n_identifiers": 16, "random_cut": "def get_advances_for_claim(claim, advance_name, amount=None):\n\tadvances = get_advances(claim.employee, advance_name)\n\n\tfor entry in advances:\n\t\tif" }, { "id": 255342, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/automatic_upgrade_test.py", "file_name": "automatic_upgrade_test.py", "fun_name": "test_MaxPool_2", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_MaxPool_2(self) -> None:\n self._test_op_upgrade('MaxPool', 8, [[1, 1, 5, 5]], [[1, 1, 4, 4], [1, 1, 4, 4]],\n output_types=[TensorProto.FLOAT, TensorProto.INT64],\n attrs={'kernel_shape': [2, 2]}\n )\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 73, "n_ast_nodes": 99, "n_identifiers": 8, "random_cut": "def test_MaxPool_2(self) -> None:\n self._test_op_upgrade('MaxPool', 8, [[1, 1, 5, 5]" }, { "id": 75654, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/tests/test_backends.py", "file_name": "test_backends.py", "fun_name": "test_autocomplete_with_fields_arg", "commit_message": "Reformat with black", "code": "def test_autocomplete_with_fields_arg(self):\n results = self.backend.autocomplete(\"Georg\", models.Author, fields=[\"name\"])\n self.assertUnsortedListEqual(\n [r.name for r in results],\n [\n \"George R.R. Martin\",\n ],\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 87, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 8, "token_counts": 45, "n_ast_nodes": 72, "n_identifiers": 11, "random_cut": "def test_autocomplete_with_fields_arg(self):\n results = self.backend.autocomplete(\"Georg\", models.Author, fields=[\"name\"])\n self.assertUnsortedListEqual(\n [r.name for r in results],\n [\n \"George R.R. Martin\",\n ],\n )\n" }, { "id": 199605, "commit_id": "fc2261c30ec672e27c61066c838f21baf173781e", "repo": "sympy", "path": "sympy/functions/special/gamma_functions.py", "file_name": "gamma_functions.py", "fun_name": "_eval_is_positive", "commit_message": "functions: Espinosa–Moll generalised polygamma function\n\nOlivier Espinosa and Victor Hugo Moll in \"A Generalized Polygamma\nFunction\" (http://www.math.tulane.edu/~vhm/papers_html/genoff.pdf)\ndefine the titular function as\n\n ζ'(s+1, z) + (ψ(-s) + γ) ζ(s+1, z)\nψ(s, z) = ──────────────────────────────────\n Γ(-s)\n\nwhich is defined iff z is not a nonpositive integer. Either directly or\nin the limit this expression reproduces the classical polygamma\nfunctions when s is a nonnegative integer AND for any complex s always\nsatisfies the fundamental identity\n\n∂\n──(ψ(s, z)) = ψ(s+1, z)\n∂z\n\nbut the definition implies ψ(-1, z) = logΓ(z) - log(2π)/2,\nnot just logΓ(z), which is fine.\n\nMathematica's PolyGamma[] also accepts complex arguments but does not\nproduce the same results as the above function, instead implementing a\ngeneralisation by Grossman (1976). mpmath does not include the\ngeneralisation, but Fredrik Johansson's other library Arb does:\nhttps://arblib.org/acb.html?highlight=polygamma#c.acb_polygamma", "code": "def _eval_is_positive(self):\n n, z = self.args\n if n.is_positive:\n if n.is_odd and z.is_real:\n return True\n if n.is_even and z.is_positive:\n return False\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 85, "n_words": 20, "vocab_size": 16, "complexity": 6, "nloc": 7, "token_counts": 39, "n_ast_nodes": 62, "n_identifiers": 9, "random_cut": "def _eval_is_positive(self):\n n, z = self.args\n if n.is_positive:\n if n.is_odd and z.is_real:\n return True\n if n.is_even and z.is_positive:\n " }, { "id": 36709, "commit_id": "ae189ef99199d07bc7c6dfb79c46b411e05dfe61", "repo": "transformers", "path": "src/transformers/models/gptj/modeling_gptj.py", "file_name": "modeling_gptj.py", "fun_name": "duplicate_interleave", "commit_message": "Add support for exporting GPT-J to ONNX-TRT (#16492)\n\nAdd support for exporting GPT-J to ONNX-TRT\r\n\r\nCo-authored-by: Tomer Stav ", "code": "def duplicate_interleave(m):\n \n dim0 = m.shape[0]\n m = m.view(-1, 1) # flatten the matrix\n m = m.repeat(1, 2) # repeat all elements into the 2nd dimension\n m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy\n return m\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 60, "n_words": 39, "vocab_size": 28, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 80, "n_identifiers": 6, "random_cut": "def duplicate_interleave(m):\n \n dim0 = m.shape[0]\n m = m.view(-1, 1) # flatten the matrix\n m = m.repeat(1, 2) # repeat all elements into the 2nd dimension\n m = m.view(dim0, -1) # reshape into a matrix, interleaving th" }, { "id": 290068, "commit_id": "82151bfd40f7b64044a5a9f82f020411430df97b", "repo": "core", "path": "tests/components/hassio/test_init.py", "file_name": "test_init.py", "fun_name": "test_setup_api_push_api_data_default", "commit_message": "Create repairs for unsupported and unhealthy (#80747)", "code": "async def test_setup_api_push_api_data_default(hass, aioclient_mock, hass_storage):\n \n with patch.dict(os.environ, MOCK_ENVIRON):\n result = await async_setup_component(hass, \"hassio\", {\"http\": {}, \"hassio\": {}})\n assert result\n\n assert aioclient_mock.call_count == 16\n assert not aioclient_mock.mock_calls[1][2][\"ssl\"]\n assert aioclient_mock.mock_calls[1][2][\"port\"] == 8123\n refresh_token = aioclient_mock.mock_calls[1][2][\"refresh_token\"]\n hassio_user = await hass.auth.async_get_user(\n hass_storage[STORAGE_KEY][\"data\"][\"hassio_user\"]\n )\n assert hassio_user is not None\n assert hassio_user.system_generated\n assert len(hassio_user.groups) == 1\n assert hassio_user.groups[0].id == GROUP_ID_ADMIN\n assert hassio_user.name == \"Supervisor\"\n for token in hassio_user.refresh_tokens.values():\n if token.token == refresh_token:\n break\n else:\n assert False, \"refresh token not found\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 165, "n_words": 74, "vocab_size": 52, "complexity": 3, "nloc": 21, "token_counts": 174, "n_ast_nodes": 283, "n_identifiers": 27, "random_cut": "async def test_setup_api_push_api_data_default(hass, aioclient_mock, hass_storage):\n \n with patch.dict(os.environ, MOCK_ENVIRON):\n result = await async_setup_component(hass, \"hassio\", {\"http\": {}, \"hassio\": {}})\n assert result\n\n assert aioclient_mock.call_count == 16\n assert not aioclient_mock.mock_calls[1][2][\"ssl\"]\n assert aioclient_mock.mock_calls[1][2][\"port\"] == 8123\n refresh_token = aioclient_mock.mock_calls[1][2][\"refresh_token\"]\n hassio_user = await hass.auth.async_get_user(\n hass_storage[STORAGE_KEY][\"data\"][\"hassio_user\"]\n )\n assert hassio_user is not None\n assert hassio_user.system_generated\n assert len(hassio_user.groups) == 1\n assert hassio_user.groups[0].id == GROUP_ID_ADMIN\n assert hassio_user.name == \"Supervisor\"\n for token in hassio_user.refresh_tokens.values():\n if token.token == refresh_token:\n break\n else:\n assert False, \"refresh token not found\"\n\n" }, { "id": 314050, "commit_id": "3824703a64cde109a27a5b9b0b684ee309b160d7", "repo": "core", "path": "homeassistant/components/homematicip_cloud/cover.py", "file_name": "cover.py", "fun_name": "async_close_cover_tilt", "commit_message": "Fix homematicip cloud cover tilt position (#73410)\n\n* cover slats fixed set tilt position\r\n\r\n* Update cover.py\r\n\r\n* Adjust tests\r\n\r\nCo-authored-by: Erik Montnemery ", "code": "async def async_close_cover_tilt(self, **kwargs) -> None:\n \n await self._device.set_slats_level(\n slatsLevel=HMIP_SLATS_CLOSED, channelIndex=self._channel\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 28, "n_ast_nodes": 47, "n_identifiers": 9, "random_cut": "async def async_close_cover_tilt(self, **kwargs) -> None:\n \n await self." }, { "id": 117291, "commit_id": "13d267c409bf1cc65fca366d1aa4fc51438cbf71", "repo": "mindsdb", "path": "tests/integration_tests/flows/test_http.py", "file_name": "test_http.py", "fun_name": "test_special_queries", "commit_message": "It http test refactoring (#3959)\n\n* HTTP and company independent tests refactoring", "code": "def test_special_queries(self):\n # \"show databases;\",\n # \"show schemas;\",\n # \"show tables;\",\n # \"show tables from mindsdb;\",\n # \"show full tables from mindsdb;\",\n # \"show variables;\",\n # \"show session status;\",\n # \"show global variables;\",\n # \"show engines;\",\n # \"show warnings;\",\n # \"show charset;\",\n # \"show collation;\",\n # \"show models;\",\n # \"show function status where db = 'mindsdb';\",\n # \"show procedure status where db = 'mindsdb';\",\n empty_table = [\n \"show function status\",\n \"show function status where db = 'mindsdb'\",\n \"show procedure status\",\n \"show procedure status where db = 'mindsdb'\",\n \"show warnings\"\n ]\n for query in empty_table:\n try:\n print(query)\n resp = self.sql_via_http(query, RESPONSE_TYPE.TABLE)\n assert len(resp['data']) == 0\n except Exception:\n print(f'Error in query: {query}')\n raise\n\n not_empty_table = [\n \"show databases\",\n \"show schemas\",\n \"show variables\",\n \"show session status\",\n \"show global variables\",\n \"show engines\",\n \"show charset\",\n \"show collation\"\n ]\n for query in not_empty_table:\n try:\n print(query)\n resp = self.sql_via_http(query, RESPONSE_TYPE.TABLE)\n assert len(resp['data']) > 0\n except Exception:\n print(f'Error in query: {query}')\n raise\n\n # show database should be same as show schemas\n try:\n query = 'show databases'\n resp = self.sql_via_http(query, RESPONSE_TYPE.TABLE)\n assert len(resp['column_names']) == 1\n assert resp['column_names'][0] == 'Database'\n db_names = [x[0].lower() for x in resp['data']]\n assert 'information_schema' in db_names\n assert 'mindsdb' in db_names\n assert 'files' in db_names\n except Exception:\n print(f'Error in query: {query}')\n raise\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 820, "n_words": 206, "vocab_size": 81, "complexity": 7, "nloc": 46, "token_counts": 198, "n_ast_nodes": 373, "n_identifiers": 15, "random_cut": "def test_special_queries(self):\n # \"show databases;\",\n # \"show schemas;\",\n # \"show tables;\",\n # \"show tables from mindsdb;\",\n # \"show full tables from mindsdb;\",\n # \"show variables;\",\n # \"show session status;\",\n # \"show global variables;\",\n # \"s" }, { "id": 281152, "commit_id": "f40ba0d256a78ab2b8461f0df3a9a52ca7dc5704", "repo": "OpenBBTerminal", "path": "discordbot/economy/glbonds.py", "file_name": "glbonds.py", "fun_name": "glbonds_command", "commit_message": "Bot logging fix (#1105)\n\n* Write bot logs to stdout instead of a file\r\nHeroku's logging uses the stdout and has problems with files\r\n\r\n* Send \"you snooze you lose\" only if debug flag is enabled\r\n\r\n* Replace print statements with logger entries in the economy menu\r\n\r\n* Add logging to bot menu command calls\r\n\r\n* Silence bandit warnings about the REPLACE_ME token\r\n\r\n* Organize imports and update logging in economy menu\r\n\r\n* Organize imports and update logging in dps menu\r\n\r\n* Organize imports and update logging in dd menu\r\n\r\n* Organize imports and update logging in gov menu\r\n\r\n* Organize imports and update logging in options menu\r\n\r\n* Organize imports and update logging in screener menu\r\n\r\n* Organize imports and update logging in ta menu\r\n\r\n* Revert automatic import sorting\r\n\r\n* Add logging to the options reaction helper", "code": "async def glbonds_command(ctx):\n \n\n try:\n # Retrieve data\n df_data = wsj_model.global_bonds()\n\n # Debug user output\n if cfg.DEBUG:\n logger.debug(df_data.to_string())\n\n # Output data\n if df_data.empty:\n df_data_str = \"No global bonds data available\"\n else:\n df_data_str = \"```\" + df_data.to_string(index=False) + \"```\"\n\n embed = discord.Embed(\n title=\"Economy: [WSJ] Global Bonds\",\n description=df_data_str,\n colour=cfg.COLOR,\n )\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n await ctx.send(embed=embed)\n\n except Exception as e:\n embed = discord.Embed(\n title=\"ERROR Economy: [WSJ] Global Bonds\",\n colour=cfg.COLOR,\n description=e,\n )\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n await ctx.send(embed=embed)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 345, "n_words": 74, "vocab_size": 48, "complexity": 4, "nloc": 30, "token_counts": 153, "n_ast_nodes": 256, "n_identifiers": 28, "random_cut": "async def glbonds_command(ctx):\n \n\n try:\n # Retrieve data\n df_data = wsj_model.global_bonds()\n\n # Debug user output\n if cfg.DEBUG:\n logger.debug(df_data.to_string())\n\n # Output data\n if df_data.empty:\n df_data_str = \"No global bonds data available\"\n else:\n df_data_str = \"" }, { "id": 94185, "commit_id": "b85fe479194b2d0c607ef082c740c8cd90ff573a", "repo": "sentry", "path": "tests/sentry/migrations/test_0311_backfill_slack_settings.py", "file_name": "test_0311_backfill_slack_settings.py", "fun_name": "setup_before_migration", "commit_message": "feat(slack-notifications): adds backfill for slack notification settings (#37321)", "code": "def setup_before_migration(self, apps):\n self.slack_integration = Integration(\n provider=\"slack\",\n external_id=\"1\",\n name=\"Team 1\",\n )\n self.github_integration = Integration(\n provider=\"github\",\n external_id=\"3\",\n name=\"Team 1\",\n )\n self.slack_integration.save()\n self.user1 = self.create_user(date_joined=DEFAULT_JOIN_DATE)\n self.user2 = self.create_user(date_joined=DEFAULT_JOIN_DATE)\n self.user3 = self.create_user(date_joined=DEFAULT_JOIN_DATE)\n self.user4 = self.create_user(\n date_joined=START_DATE_DEFAULT_SLACK_NOTIFICATION + timedelta(days=1)\n )\n self.user5 = self.create_user(date_joined=DEFAULT_JOIN_DATE)\n self.orgA = self.create_organization(owner=self.user1)\n self.orgB = self.create_organization(owner=self.user5)\n self.slack_integration.add_organization(self.orgA)\n self.github_integration.add_organization(self.orgB)\n self.teamA_1 = self.create_team(\n organization=self.orgA, members=[self.user1, self.user2, self.user3, self.user4]\n )\n self.teamA_2 = self.create_team(organization=self.orgA)\n self.teamA_3 = self.create_team(organization=self.orgA)\n self.projectA_1 = self.create_project(\n organization=self.orgA, teams=[self.teamA_1, self.teamA_2, self.teamA_3]\n )\n # setup identities and providers\n slack_provider = self.create_identity_provider(self.slack_integration)\n # skip user 3 intentionally\n for user in [self.user1, self.user2, self.user4]:\n self.create_identity(\n user=user, identity_provider=slack_provider, external_id=str(user.id) + \"stuff\"\n )\n ExternalActor.objects.create(\n actor=self.teamA_2.actor,\n organization=self.orgA,\n integration=self.slack_integration,\n provider=ExternalProviders.SLACK.value,\n external_name=\"test\",\n )\n\n github_provider = self.create_identity_provider(self.github_integration)\n # make a Github identity\n self.create_identity(\n user=self.user5,\n identity_provider=github_provider,\n external_id=str(self.user5.id) + \"stuff\",\n )\n\n # populate settings\n\n # user2 should have one always setting and one default setting\n NotificationSetting.objects.create(\n scope_type=NotificationScopeType.USER.value,\n target_id=self.user2.actor_id,\n provider=ExternalProviders.SLACK.value,\n type=NotificationSettingTypes.ISSUE_ALERTS.value,\n scope_identifier=self.user2.id,\n value=NotificationSettingOptionValues.ALWAYS.value,\n )\n\n # default should be wiped out\n NotificationSetting.objects.create(\n scope_type=NotificationScopeType.USER.value,\n target_id=self.user2.actor_id,\n provider=ExternalProviders.SLACK.value,\n type=NotificationSettingTypes.WORKFLOW.value,\n scope_identifier=self.user2.id,\n value=NotificationSettingOptionValues.DEFAULT.value,\n )\n\n # teamA_2 is like user 2\n NotificationSetting.objects.create(\n scope_type=NotificationScopeType.TEAM.value,\n target_id=self.teamA_2.actor_id,\n provider=ExternalProviders.SLACK.value,\n type=NotificationSettingTypes.ISSUE_ALERTS.value,\n scope_identifier=self.teamA_2.id,\n value=NotificationSettingOptionValues.ALWAYS.value,\n )\n\n # a default to overwrite\n NotificationSetting.objects.create(\n scope_type=NotificationScopeType.TEAM.value,\n target_id=self.teamA_2.actor_id,\n provider=ExternalProviders.SLACK.value,\n type=NotificationSettingTypes.WORKFLOW.value,\n scope_identifier=self.teamA_2.id,\n value=NotificationSettingOptionValues.DEFAULT.value,\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 976, "n_words": 181, "vocab_size": 110, "complexity": 2, "nloc": 81, "token_counts": 606, "n_ast_nodes": 917, "n_identifiers": 67, "random_cut": "def setup_before_migration(self, apps):\n self.slack_integration = Integration(\n provider=\"slack\",\n external_id=\"1\",\n name=\"Team 1\",\n )\n self.github_integration = Integration(\n provider=\"github\",\n external_id=\"3\",\n name=\"Team 1\",\n )\n self.slack_integration.save()\n self.user1 = self.create_user(date_joined=DEFAULT_JOIN_DATE)\n self.user2 = self.create_user(date_joined=DEFAULT_JOIN_DATE)\n self.user3 = self.create_user(date_joined=DEFAULT_JOIN_DATE)\n self.user4 = self.create_user(\n " }, { "id": 66574, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v11_0/update_allow_transfer_for_manufacture.py", "file_name": "update_allow_transfer_for_manufacture.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"item\")\n\tfrappe.db.sql(\n\t\t\n\t)\n\n\tfor doctype in [\"BOM Item\", \"Work Order Item\", \"BOM Explosion Item\"]:\n\t\tfrappe.reload_doc(\"manufacturing\", \"doctype\", frappe.scrub(doctype))\n\n\t\tfrappe.db.sql(\n\t\t\t.format(\n\t\t\t\tdoctype\n\t\t\t)\n\t\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 15, "n_words": 26, "vocab_size": 20, "complexity": 2, "nloc": 18, "token_counts": 61, "n_ast_nodes": 112, "n_identifiers": 8, "random_cut": "def execute():\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"item\")\n\tfrappe.db.sql(\n\t\t\n\t)\n\n\tfor doctype in [\"BOM Item\", \"Work Order Item\", \"BOM Explosion Item\"]:\n\t\tfrappe.reload_doc(\"manufacturing\", \"doctype\", frappe.scrub(doctype))\n\n\t\tfrappe.db.sql(\n\t\t\t.fo" }, { "id": 153430, "commit_id": "be9d382e35a9b87565499c029056afe1ddce6f37", "repo": "modin", "path": "modin/pandas/resample.py", "file_name": "resample.py", "fun_name": "sum", "commit_message": "REFACTOR-#4093: Refactor base to be smaller (#4220)\n\nSigned-off-by: jeffreykennethli ", "code": "def sum(self, _method=\"sum\", min_count=0, *args, **kwargs):\n if self.resample_kwargs[\"axis\"] == 0:\n result = self.__groups.sum(min_count=min_count, *args, **kwargs)\n else:\n result = self.__groups.sum(min_count=min_count, *args, **kwargs).T\n return result\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 65, "n_words": 23, "vocab_size": 17, "complexity": 2, "nloc": 6, "token_counts": 71, "n_ast_nodes": 110, "n_identifiers": 10, "random_cut": "def sum(self, _method=\"sum\", min_count=0, *args, **kwargs):\n " }, { "id": 178930, "commit_id": "abfb99b0a05dd76d2ecc6ebc20732a271857c6c8", "repo": "Nuitka", "path": "nuitka/plugins/standard/PySidePyQtPlugin.py", "file_name": "PySidePyQtPlugin.py", "fun_name": "considerDataFiles", "commit_message": "Plugins: Massive cleanup of data file handling\n\n* Move data file handling out of standalone only, allowing support\n for other modes as well.\n\n* Attach logger and tags to data file objects.", "code": "def considerDataFiles(self, module):\n full_name = module.getFullName()\n\n if full_name == self.binding_name and (\n \"qml\" in self.getQtPluginsSelected() or \"all\" in self.getQtPluginsSelected()\n ):\n qml_plugin_dir = self._getQmlDirectory()\n qml_target_dir = self._getQmlTargetDir()\n\n self.info(\"Including Qt plugins 'qml' below '%s'.\" % qml_target_dir)\n\n for filename in self._getQmlFileList(dlls=False):\n filename_relative = os.path.relpath(filename, qml_plugin_dir)\n\n yield self.makeIncludedDataFile(\n source_path=filename,\n dest_path=os.path.join(\n qml_target_dir,\n filename_relative,\n ),\n reason=\"Qt QML datafile\",\n tags=\"qml\",\n )\n elif self.isQtWebEngineModule(full_name) and not self.webengine_done_data:\n self.webengine_done_data = True\n\n # TODO: This is probably wrong/not needed on macOS\n if not isMacOS():\n yield self.makeIncludedGeneratedDataFile(\n data=,\n dest_path=\"qt6.conf\" if \"6\" in self.binding_name else \"qt.conf\",\n reason=\"QtWebEngine needs Qt configuration file\",\n )\n\n resources_dir = self._getResourcesPath()\n\n for filename, filename_relative in listDir(resources_dir):\n yield self.makeIncludedDataFile(\n source_path=filename,\n dest_path=os.path.join(\n self._getResourcesTargetDir(), filename_relative\n ),\n reason=\"Qt resources\",\n )\n\n if not self.no_qt_translations:\n translations_path = self._getTranslationsPath()\n\n for filename in getFileList(translations_path):\n filename_relative = os.path.relpath(filename, translations_path)\n dest_path = self._getTranslationsTargetDir()\n\n yield self.makeIncludedDataFile(\n source_path=filename,\n dest_path=os.path.join(dest_path, filename_relative),\n reason=\"Qt translation\",\n tags=\"translation\",\n )\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 882, "n_words": 134, "vocab_size": 90, "complexity": 12, "nloc": 50, "token_counts": 286, "n_ast_nodes": 467, "n_identifiers": 39, "random_cut": "def considerDataFiles(self, module):\n " }, { "id": 191968, "commit_id": "28f72f16b7716f6914cc2dad2e6977b6de58aaab", "repo": "vision", "path": "torchvision/prototype/datasets/_builtin/cub200.py", "file_name": "cub200.py", "fun_name": "_make_info", "commit_message": "add CUB200 prototype datasets (#5154)\n\n* add CUB200 prototype datasets\r\n\r\n* address review comments", "code": "def _make_info(self) -> DatasetInfo:\n return DatasetInfo(\n \"cub200\",\n type=DatasetType.IMAGE,\n homepage=\"http://www.vision.caltech.edu/visipedia/CUB-200-2011.html\",\n dependencies=(\"scipy\",),\n valid_options=dict(\n split=(\"train\", \"test\"),\n year=(\"2011\", \"2010\"),\n ),\n )\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 126, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 11, "token_counts": 52, "n_ast_nodes": 86, "n_identifiers": 12, "random_cut": "def _make_info(self) -> DatasetInfo:\n return DatasetInfo(\n \"cub200\",\n type=DatasetType.IMAGE,\n homepage=\"http://www.vision.caltech.edu/visipedia/CUB-200-2011.html\",\n dependencies=(\"scipy\",),\n valid_options=dict(\n split=(\"train\", \"test\"),\n year=(\"2011\", \"2010\"),\n ),\n )\n" }, { "id": 138214, "commit_id": "7146df617a0dc47bc5a69449a00206e80aceddf7", "repo": "ray", "path": "python/ray/tune/tests/test_searchers.py", "file_name": "test_searchers.py", "fun_name": "testBlendSearch", "commit_message": "[Tune] Fix AxSearch save and nan/inf result handling (#31147)\n\nThis PR fixes AxSearch saving and handles trials that produce nan/inf metrics properly.\r\n\r\nSigned-off-by: Justin Yu ", "code": "def testBlendSearch(self):\n from ray.tune.search.flaml import BlendSearch\n\n with self.check_searcher_checkpoint_errors_scope():\n out = tune.run(\n _invalid_objective,\n search_alg=BlendSearch(\n points_to_evaluate=[\n {\"report\": 1.0},\n {\"report\": 2.1},\n {\"report\": 3.1},\n {\"report\": 4.1},\n ]\n ),\n config=self.config,\n metric=\"_metric\",\n mode=\"max\",\n num_samples=16,\n reuse_actors=False,\n )\n self.assertCorrectExperimentOutput(out)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 323, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 20, "token_counts": 101, "n_ast_nodes": 147, "n_identifiers": 19, "random_cut": "def testBlendSearch(self):\n from ray.tune.search.flaml import BlendSearch\n\n with self.check_searcher_checkpoint_errors_scope():\n out = tune.run(\n _invalid_objective,\n search_alg=BlendSearch(\n points_to_evaluate=[\n {\"report\": 1.0},\n {\"report\": 2.1},\n {\"report\": 3.1},\n {\"report\": 4.1},\n ]\n ),\n config=self.config,\n metric=\"_metric\"," }, { "id": 56614, "commit_id": "4c4a47c7365d5c1719c5c632adcfd1731654b920", "repo": "prefect", "path": "src/prefect/orion/database/migrations/versions/postgresql/2022_06_20_123921_7296741dff68_add_system_column_for_block_types.py", "file_name": "2022_06_20_123921_7296741dff68_add_system_column_for_block_types.py", "fun_name": "downgrade", "commit_message": "is_system_block_type → is_protected", "code": "def downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table(\"block_type\", schema=None) as batch_op:\n batch_op.drop_column(\"is_protected\")\n # ### end Alembic commands ###\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 40, "n_words": 25, "vocab_size": 19, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "def downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_a" }, { "id": 145256, "commit_id": "36a31cb6fde95d490c81c6de5d9f911b4cac8af2", "repo": "ray", "path": "python/ray/_private/usage/usage_lib.py", "file_name": "usage_lib.py", "fun_name": "_generate_cluster_metadata", "commit_message": "[Usage Stats] Implement usage stats report \"Turned off by default\". (#22249)\n\nThis is the second PR to implement usage stats on Ray. Please refer to the file usage_lib.py for more details.\r\n\r\nThe full specification is here https://docs.google.com/document/d/1ZT-l9YbGHh-iWRUC91jS-ssQ5Qe2UQ43Lsoc1edCalc/edit#heading=h.17dss3b9evbj.\r\n\r\nThis adds a dashboard module to enable usage stats. **Usage stats report is turned off by default** after this PR. We can control the report (enablement, report period, and URL. Note that URL is strictly for testing) using the env variable. \r\n\r\n## NOTE\r\nThis requires us to add `requests` to the default library. `requests` must be okay to be included because\r\n1. it is extremely lightweight. It is implemented only with built-in libs.\r\n2. It is really stable. The project basically claims they are \"deprecated\", meaning no new features will be added there.\r\n\r\ncc @edoakes @richardliaw for the approval\r\n\r\nFor the HTTP request, I was alternatively considered httpx, but it was not as lightweight as `requests`. So I decided to implement async requests using the thread pool.", "code": "def _generate_cluster_metadata():\n \n ray_version, python_version = ray._private.utils.compute_version_info()\n # These two metadata is necessary although usage report is not enabled\n # to check version compatibility.\n metadata = {\n \"ray_version\": ray_version,\n \"python_version\": python_version,\n }\n # Additional metadata is recorded only when usage stats are enabled.\n if _usage_stats_enabled():\n metadata.update(\n {\n \"schema_version\": usage_constant.SCHEMA_VERSION,\n \"source\": os.getenv(\"RAY_USAGE_STATS_SOURCE\", \"OSS\"),\n \"session_id\": str(uuid.uuid4()),\n \"git_commit\": ray.__commit__,\n \"os\": sys.platform,\n \"session_start_timestamp_ms\": int(time.time() * 1000),\n }\n )\n return metadata\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 232, "n_words": 65, "vocab_size": 53, "complexity": 2, "nloc": 18, "token_counts": 97, "n_ast_nodes": 173, "n_identifiers": 22, "random_cut": "def _generate_cluster_metadata():\n \n ray_version, python_version = ray._private.utils.compute_version_info()\n # These two metadata is necessary although usage report is not enabled\n # to check version compatibility.\n metadata = {\n \"ray_version\": ray_version,\n \"python_version\": python_version,\n }\n # Additional metadata is recorded only when usage s" }, { "id": 205992, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/forms/models.py", "file_name": "models.py", "fun_name": "construct_instance", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def construct_instance(form, instance, fields=None, exclude=None):\n \n from django.db import models\n\n opts = instance._meta\n\n cleaned_data = form.cleaned_data\n file_field_list = []\n for f in opts.fields:\n if (\n not f.editable\n or isinstance(f, models.AutoField)\n or f.name not in cleaned_data\n ):\n continue\n if fields is not None and f.name not in fields:\n continue\n if exclude and f.name in exclude:\n continue\n # Leave defaults for fields that aren't in POST data, except for\n # checkbox inputs because they don't appear in POST data if not checked.\n if (\n f.has_default()\n and form[f.name].field.widget.value_omitted_from_data(\n form.data, form.files, form.add_prefix(f.name)\n )\n and cleaned_data.get(f.name) in form[f.name].field.empty_values\n ):\n continue\n # Defer saving file-type fields until after the other fields, so a\n # callable upload_to can use the values from other fields.\n if isinstance(f, models.FileField):\n file_field_list.append(f)\n else:\n f.save_form_data(instance, cleaned_data[f.name])\n\n for f in file_field_list:\n f.save_form_data(instance, cleaned_data[f.name])\n\n return instance\n\n\n# ModelForms #################################################################\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 406, "n_words": 134, "vocab_size": 87, "complexity": 14, "nloc": 31, "token_counts": 204, "n_ast_nodes": 316, "n_identifiers": 29, "random_cut": "def construct_instance(form, instance, fields=None, exclude=None):\n \n from django.db import models\n\n opts = instance._meta\n\n cleaned_data = form.cleaned_data\n file_field_list = []\n for f in opts.fields:\n if (\n not f.editable\n or isinstance(f, models.AutoField)\n or f.name not in cleaned_data\n ):\n continue\n if fields is not None and f.name not in fields:\n continue\n if exclude and f.name in exclude:\n continue\n # Leave defaults for fields that aren't in POST data, except for\n # checkbox inputs because they don" }, { "id": 87187, "commit_id": "5462ee11ad11ebb9a50323befcd286816d7898c8", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_details.py", "file_name": "test_project_details.py", "fun_name": "test_rule_config_serializer", "commit_message": "feat(ds): Support new DS behaviour in project_details endpoint (#40387)\n\nSupports new adaptive dynamic sampling behaviour alongside\r\nthe deprecated dynamic sampling behaviour and achieves that\r\nthrough feature flag differentiation\r\n\r\nThis PR achieve that through the following:\r\n- Introducing a new `DynamicSamplingBiasSerializer` which is composed of\r\nid representing the bias name and a boolean flag indicating whether that\r\nparticular flag is active or not\r\n- Modifies current existing behavior for both old sampling flag and new\r\nsampling flag. Essentially the new setup entails that to be on the old\r\ndynamic sampling, the following flags need to be enabled\r\n\"organizations:server-side-sampling\" and\r\n\"organizations:server-side-sampling-ui\", and to be on the new dynamic\r\nsampling configurations, you need the following flags to be enabled\r\n\"organizations:dynamic-sampling-basic\" and\r\n\"organizations:server-side-sampling\"\r\nP.S. 1: These flags will be replaced \r\n\"organizations:server-side-sampling-ui\" ->\r\n\"organizations:dynamic-sampling-deprecated\"\r\n\"organizations:server-side-sampling-basic\" ->\r\n\"organizations:dynamic-sampling\"\r\nHence, these feature flags need to be updated once this PR lands\r\nhttps://github.com/getsentry/sentry/pull/40388\r\nP.S. 2: If a project is on the new plan and the old plan, the new plan\r\ntakes precedence\r\n- Introduces default biases that are enabled by default and can be\r\noverwritten. The motivation to do this is to be able to add new biases\r\nthat are enabled by default, and both the GET and PUT request honor this\r\nlist\r\n- `GET` and `POST` endpoint does a dictionary update of user's stored\r\nbiases on the default biases that are hardcoded, and returns them to the\r\nUI/ relay. This means that the introduced project option\r\n\"sentry:dynamic_sampling_biases\" might not have all the toggles\r\nenabled/disabled through the UI but only the ones that a customer chose\r\nto modify\r\n\r\n\r\nFollowup:\r\n- This new feature flag behaviour needs to be reflected in ProjectConfig\r\ncomputations", "code": "def test_rule_config_serializer(self):\n data = {\n \"rules\": [\n {\n \"sampleRate\": 0.7,\n \"type\": \"trace\",\n \"active\": False,\n \"id\": 1,\n \"condition\": {\n \"op\": \"and\",\n \"inner\": [\n {\"op\": \"eq\", \"name\": \"field1\", \"value\": [\"val\"]},\n {\"op\": \"glob\", \"name\": \"field1\", \"value\": [\"val\"]},\n ],\n },\n },\n {\n \"sampleRate\": 0.7,\n \"type\": \"trace\",\n \"active\": False,\n \"id\": 2,\n \"condition\": {\n \"op\": \"and\",\n \"inner\": [],\n },\n },\n ],\n \"next_id\": 3,\n }\n\n serializer = DynamicSamplingSerializer(\n data=data,\n context={\"project\": self.create_project(), \"request\": self.make_request()},\n )\n assert serializer.is_valid()\n assert data == serializer.validated_data\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 626, "n_words": 73, "vocab_size": 45, "complexity": 1, "nloc": 35, "token_counts": 161, "n_ast_nodes": 287, "n_identifiers": 10, "random_cut": "def test_rule_config_serializer(self):\n data = {\n \"rules\": [\n {\n \"sampleRate\": 0.7,\n \"type\": \"trace\",\n \"active\": False,\n \"id\": 1,\n \"condition\": {\n \"op\": \"and\",\n \"inner\": [\n {\"op\": \"eq\", \"name\": \"field1\", \"value\": [\"val\"]},\n {\"op\": \"glob\", \"name\": \"field1\", \"value\": [\"val\"]},\n ],\n },\n },\n {\n \"sampleRate\": 0.7,\n \"type\": \"trace\",\n \"active\": False,\n \"id\": 2,\n " }, { "id": 283302, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/stocks_controller.py", "file_name": "stocks_controller.py", "fun_name": "call_fa", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def call_fa(self, _):\n \n if self.ticker:\n from openbb_terminal.stocks.fundamental_analysis import fa_controller\n\n self.queue = self.load_class(\n fa_controller.FundamentalAnalysisController,\n self.ticker,\n self.start,\n self.interval,\n self.suffix,\n self.queue,\n )\n else:\n console.print(\"Use 'load ' prior to this command!\", \"\\n\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 183, "n_words": 28, "vocab_size": 28, "complexity": 2, "nloc": 13, "token_counts": 64, "n_ast_nodes": 100, "n_identifiers": 16, "random_cut": "def call_fa(self, _):\n \n if self.ticker:\n from openbb_terminal.stocks.fundamental_analysis import fa_controller\n\n self.queue = self.load_class(\n fa_controller.FundamentalAnalysisController,\n self.ticker,\n" }, { "id": 334496, "commit_id": "f4ee3498b325ae0b2dbb4e4f7b7a3294a6185113", "repo": "diffusers", "path": "src/diffusers/models/vqvae.py", "file_name": "vqvae.py", "fun_name": "forward", "commit_message": "add vqvae", "code": "def forward(self, x, temb):\n h = x\n h = self.norm1(h)\n h = nonlinearity(h)\n h = self.conv1(h)\n\n if temb is not None:\n h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]\n\n h = self.norm2(h)\n h = nonlinearity(h)\n h = self.dropout(h)\n h = self.conv2(h)\n\n if self.in_channels != self.out_channels:\n if self.use_conv_shortcut:\n x = self.conv_shortcut(x)\n else:\n x = self.nin_shortcut(x)\n\n return x + h\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 197, "n_words": 58, "vocab_size": 31, "complexity": 4, "nloc": 17, "token_counts": 128, "n_ast_nodes": 203, "n_identifiers": 17, "random_cut": "def forward(self, x, temb):\n h = x\n h = self.norm1(h)\n h = nonlinearity(h)\n h = self.conv1(h)\n\n if temb is not None:\n h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None]\n\n h = self.norm2(h)\n h = nonlinearity(h)\n" }, { "id": 252253, "commit_id": "83e543c3e66654b952f1979c0adaa62df91b2832", "repo": "mitmproxy", "path": "test/mitmproxy/addons/test_proxyauth.py", "file_name": "test_proxyauth.py", "fun_name": "test_is_http_proxy", "commit_message": "add multi proxy mode\n\nThis commit makes it possible for mitmproxy to spawn multiple\nTCP/UDP proxy servers at the same time, see\nhttps://github.com/mitmproxy/mitmproxy/discussions/5288", "code": "def test_is_http_proxy(mode, expected):\n f = tflow.tflow()\n f.client_conn.proxy_mode = ProxyMode.parse(mode)\n assert proxyauth.is_http_proxy(f) == expected\n\n\n@pytest.mark.parametrize(\n \"is_http_proxy, expected\",\n [\n (True, \"Proxy-Authorization\"),\n (False, \"Authorization\"),\n ],\n)", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"is_http_proxy, expected\",\n [\n (True, \"Proxy-Authorization\"),\n (False, \"Authorization\"),\n ],\n)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 52, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 96, "n_identifiers": 14, "random_cut": "def test_is_http_proxy(mode, expected):\n f = tflow.tflow()\n f.client_conn.proxy_mode = ProxyMode.parse(mode)\n assert proxyauth.is_http_proxy(f) == expected\n\n\n@pytest.mark.parametrize(\n \"is_http_pr" }, { "id": 207203, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_inlines/tests.py", "file_name": "tests.py", "fun_name": "test_stacked_inline_hidden_field_with_view_only_permissions", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_stacked_inline_hidden_field_with_view_only_permissions(self):\n \n self.client.force_login(self.view_only_user)\n url = reverse(\n \"stacked_inline_hidden_field_in_group_admin:admin_inlines_someparentmodel_change\",\n args=(self.parent.pk,),\n )\n response = self.client.get(url)\n # The whole line containing name + position fields is not hidden.\n self.assertContains(\n response, '
    '\n )\n # The div containing the position field is hidden.\n self.assertInHTML(\n '
    '\n ''\n '
    0
    ',\n response.rendered_content,\n )\n self.assertInHTML(\n '
    '\n ''\n '
    1
    ',\n response.rendered_content,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 274, "n_words": 62, "vocab_size": 41, "complexity": 1, "nloc": 22, "token_counts": 78, "n_ast_nodes": 139, "n_identifiers": 15, "random_cut": "def test_stacked_inline_hidden_field_with_view_only_permissions(self):\n \n self.client.force_login(self.view_only_user)\n url = reverse(\n \"stacked_inline_hidden_field_in_group_admin:admin_inlines_someparentmodel_change\",\n args=(self.parent.pk,),\n )\n response = self.client.get(url)\n # The whole line containing name + position fields is not hidden.\n self.assertContains(\n response, '
    '\n )\n # The div containing the position field is hidden.\n self.assertInHTML(\n '
    3, Contains(t, Interval.Lopen(3, 4))).simplify() == \\\n 1 - lowergamma(g, 3*l)/gamma(g) # equivalent to P(X(1)>3)\n\n\n #test issue 20078\n assert (2*X(t) + 3*X(t)).simplify() == 5*X(t)\n assert (2*X(t) - 3*X(t)).simplify() == -X(t)\n assert (2*(0.25*X(t))).simplify() == 0.5*X(t)\n assert (2*X(t) * 0.25*X(t)).simplify() == 0.5*X(t)**2\n assert (X(t)**2 + X(t)**3).simplify() == (X(t) + 1)*X(t)**2", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 297, "n_words": 165, "vocab_size": 116, "complexity": 1, "nloc": 25, "token_counts": 546, "n_ast_nodes": 833, "n_identifiers": 35, "random_cut": "def test_GammaProcess_symbolic():\n t, d, x, y, g, l = symbols('t d x y g l', positive=True)\n X = GammaProcess(\"X\", l, g)\n\n raises(NotImplementedError, lambda: X[t])\n raises(IndexError, lambda: X(-1))\n assert isinstance(X(t), RandomIndexedSymbol)\n assert X.state_space == Interval(0, oo)\n assert X.distribution(t) == GammaDistribution(g*t, 1/l)\n with warns_deprecated_sympy():\n X.distribution(X(t))\n assert X.joint_distribution(5, X(3)) == JointDistributionHandmade(Lambda(\n (X(5), X(3)), l**(8*g)*exp(-l*X(3))*exp(-l*X(5))*X(3)**(3*g - 1)*X(5)**(5*g\n - 1)/(gamma(3*g)*gamma(5*g))))\n # property of the gamma process at any given timestamp\n assert E(X(t)) == g*t/l\n assert variance(X(t)).simplify() == g*t/l**2\n\n # Equivalent to E(2*X(1)) + E(X(1)**2) + E(X(1)**3), where E(X(1)) == g/l\n assert E(X(t)**2 + X(d)*2 + X(y)**3, Contains(t, Interval.Lopen(0, 1))\n & Contains(d, Interval.Lopen(1, 2)) & Contains(y, Interval.Ropen(3, 4))) == \\\n 2*g/l + (g**2 + g)/l**2 + (g**3 + 3" }, { "id": 187001, "commit_id": "ab1ee5e5f323edc1b4c6b3c8e357771b820161ea", "repo": "streamlink", "path": "src/streamlink/plugins/mildom.py", "file_name": "mildom.py", "fun_name": "_get_live_streams", "commit_message": "plugins.mildom: get token for livestream (#4375)", "code": "def _get_live_streams(self, channel_id):\n # Get quality info and check if user is live1\n data = self.session.http.get(\n \"https://cloudac.mildom.com/nonolive/gappserv/live/enterstudio\",\n params={\n \"__platform\": \"web\",\n \"user_id\": channel_id,\n },\n headers={\"Accept-Language\": \"en\"},\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"code\": int,\n validate.optional(\"message\"): str,\n validate.optional(\"body\"): {\n validate.optional(\"status\"): int,\n \"anchor_live\": int,\n validate.optional(\"live_type\"): int,\n \"ext\": {\n \"cmode_params\": [{\n \"cmode\": str,\n \"name\": str,\n }],\n validate.optional(\"live_mode\"): int,\n },\n },\n },\n )\n )\n log.trace(f\"{data!r}\")\n if data[\"code\"] != 0:\n log.debug(data.get(\"message\", \"Mildom API returned an error\"))\n return\n if data[\"body\"][\"anchor_live\"] != 11:\n log.debug(\"User doesn't appear to be live\")\n return\n qualities = []\n for quality_info in data[\"body\"][\"ext\"][\"cmode_params\"]:\n qualities.append((quality_info[\"name\"], \"_\" + quality_info[\"cmode\"] if quality_info[\"cmode\"] != \"raw\" else \"\"))\n\n # Get token\n data = self.session.http.post(\n \"https://cloudac.mildom.com/nonolive/gappserv/live/token\",\n params={\n \"__platform\": \"web\",\n \"__guest_id\": \"pc-gp-{}\".format(uuid4()),\n },\n headers={\"Accept-Language\": \"en\"},\n json={\"host_id\": channel_id, \"type\": \"hls\"},\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"code\": int,\n validate.optional(\"message\"): str,\n validate.optional(\"body\"): {\n \"data\": [\n {\"token\": str, }\n ],\n }\n }\n )\n )\n log.trace(f\"{data!r}\")\n if data[\"code\"] != 0:\n log.debug(data.get(\"message\", \"Mildom API returned an error\"))\n return\n token = data[\"body\"][\"data\"][0][\"token\"]\n\n # Create stream URLs\n data = self.session.http.get(\n \"https://cloudac.mildom.com/nonolive/gappserv/live/liveserver\",\n params={\n \"__platform\": \"web\",\n \"user_id\": channel_id,\n \"live_server_type\": \"hls\",\n },\n headers={\"Accept-Language\": \"en\"},\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"code\": int,\n validate.optional(\"message\"): str,\n validate.optional(\"body\"): {\n \"stream_server\": validate.url(),\n }\n }\n )\n )\n log.trace(f\"{data!r}\")\n if data[\"code\"] != 0:\n log.debug(data.get(\"message\", \"Mildom API returned an error\"))\n return\n base_url = url_concat(data[\"body\"][\"stream_server\"], f\"{channel_id}{{}}.m3u8?{token}\")\n self.session.http.headers.update({\"Referer\": \"https://www.mildom.com/\"})\n for quality in qualities:\n yield quality[0], HLSStream(self.session, base_url.format(quality[1]))\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1502, "n_words": 208, "vocab_size": 108, "complexity": 8, "nloc": 91, "token_counts": 538, "n_ast_nodes": 963, "n_identifiers": 33, "random_cut": "def _get_live_streams(self, channel_id):\n # Get quality info and check if user is live1\n data = self.session.http.get(\n \"https://cloudac.mildom.com/nonolive/gappserv/live/enterstudio\",\n params={\n \"__platform\": \"web\",\n \"user_id\": channel_id,\n },\n headers={\"Accept-Language\": \"en\"},\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"code\": int,\n validate.optional(\"message\"): str,\n validate.optional(\"body\"): {\n " }, { "id": 258117, "commit_id": "1a60e2113753f3337dc7d52017f949c0aa29e6f4", "repo": "haystack", "path": "test/nodes/test_document_merger.py", "file_name": "test_document_merger.py", "fun_name": "test_document_merger_run", "commit_message": "refactor: simplify Summarizer, add Document Merger (#3452)\n\n* remove generate_single_summary\r\n\r\n* update schemas\r\n\r\n* remove unused import\r\n\r\n* fix mypy\r\n\r\n* fix mypy\r\n\r\n* test: summarizer doesnt change content\r\n\r\n* other test correction\r\n\r\n* move test_summarizer_translation to test_extractor_translation\r\n\r\n* fix test\r\n\r\n* first try for doc merger\r\n\r\n* reintroduce and deprecate generate_single_summary\r\n\r\n* progress in document merger\r\n\r\n* document merger!\r\n\r\n* mypy, pylint fixes\r\n\r\n* use generator\r\n\r\n* added test that will fail in 1.12\r\n\r\n* adapt to review\r\n\r\n* extended deprecation docstring\r\n\r\n* Update test/nodes/test_extractor_translation.py\r\n\r\n* Update test/nodes/test_summarizer.py\r\n\r\n* Update test/nodes/test_summarizer.py\r\n\r\n* black\r\n\r\n* documents fixture\r\n\r\nCo-authored-by: Sara Zan ", "code": "def test_document_merger_run():\n separator = \"|\"\n dm = DocumentMerger(separator=separator)\n result = dm.run(documents)\n\n assert len(result[0][\"documents\"]) == 1\n assert result[0][\"documents\"][0].content == separator.join([doc[\"content\"] for doc in doc_dicts])\n assert result[0][\"documents\"][0].meta == {\"flat_field\": 1, \"nested_field\": {1: 2, \"c\": {\"3\": 3}}}\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 51, "n_words": 34, "vocab_size": 28, "complexity": 2, "nloc": 7, "token_counts": 100, "n_ast_nodes": 166, "n_identifiers": 13, "random_cut": "def test_document_merger_run():\n separator = \"|\"\n dm = DocumentMerger(separator=" }, { "id": 298481, "commit_id": "a0c7fca0033daf7dd04ce9ef2d54aada529d81b3", "repo": "core", "path": "homeassistant/components/homekit_controller/light.py", "file_name": "light.py", "fun_name": "supported_color_modes", "commit_message": "Use ColorMode enum in homekit_controller (#70503)", "code": "def supported_color_modes(self) -> set[ColorMode | str] | None:\n \n color_modes: set[ColorMode | str] = set()\n\n if self.service.has(CharacteristicsTypes.HUE) or self.service.has(\n CharacteristicsTypes.SATURATION\n ):\n color_modes.add(ColorMode.HS)\n\n if self.service.has(CharacteristicsTypes.COLOR_TEMPERATURE):\n color_modes.add(ColorMode.COLOR_TEMP)\n\n if not color_modes and self.service.has(CharacteristicsTypes.BRIGHTNESS):\n color_modes.add(ColorMode.BRIGHTNESS)\n\n if not color_modes:\n color_modes.add(ColorMode.ONOFF)\n\n return color_modes\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 147, "n_words": 36, "vocab_size": 26, "complexity": 7, "nloc": 14, "token_counts": 115, "n_ast_nodes": 186, "n_identifiers": 17, "random_cut": "def supported_color_modes(self) -> set[ColorMode | str] | None:\n \n colo" }, { "id": 200402, "commit_id": "24f1e7730119fe958cc8e28411f790c9a5ec04eb", "repo": "sympy", "path": "sympy/holonomic/tests/test_holonomic.py", "file_name": "test_holonomic.py", "fun_name": "test_evalf_rk4", "commit_message": "Fix various typos\n\nFound via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`", "code": "def test_evalf_rk4():\n x = symbols('x')\n R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')\n\n # log(1+x)\n p = HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1])\n\n # path taken is a straight line from 0 to 1, on the real axis\n r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\n s = '0.693146363174626' # approx. equal to log(2) i.e. 0.693147180559945\n assert sstr(p.evalf(r)[-1]) == s\n\n # path taken is a triangle 0-->1+i-->2\n r = [0.1 + 0.1*I]\n for i in range(9):\n r.append(r[-1]+0.1+0.1*I)\n for i in range(10):\n r.append(r[-1]+0.1-0.1*I)\n\n # close to the exact solution 1.09861228866811\n # imaginary part also close to zero\n s = '1.098616 + 1.36083e-7*I'\n assert sstr(p.evalf(r)[-1].n(7)) == s\n\n # sin(x)\n p = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1])\n s = '0.90929463522785 + 1.52655665885959e-16*I'\n assert sstr(p.evalf(r)[-1]) == s\n\n # computing sin(pi/2) using this method\n # using a linear path from 0 to pi/2\n r = [0.1]\n for i in range(14):\n r.append(r[-1] + 0.1)\n r.append(pi/2)\n s = '0.999999895088917' # close to 1.0 (exact solution)\n assert sstr(p.evalf(r)[-1]) == s\n\n # trying different path, a rectangle (0-->i-->pi/2 + i-->pi/2)\n # computing the same value sin(pi/2) using different path\n r = [0.1*I]\n for i in range(9):\n r.append(r[-1]+0.1*I)\n for i in range(15):\n r.append(r[-1]+0.1)\n r.append(pi/2+I)\n for i in range(10):\n r.append(r[-1]-0.1*I)\n\n # close to 1.0\n s = '1.00000003415141 + 6.11940487991086e-16*I'\n assert sstr(p.evalf(r)[-1]) == s\n\n # cos(x)\n p = HolonomicFunction(Dx**2 + 1, x, 0, [1, 0])\n # compute cos(pi) along 0-->pi\n r = [0.05]\n for i in range(61):\n r.append(r[-1]+0.05)\n r.append(pi)\n # close to -1 (exact answer)\n s = '-0.999999993238714'\n assert sstr(p.evalf(r)[-1]) == s\n\n # a rectangular path (0 -> i -> 2+i -> 2)\n r = [0.1*I]\n for i in range(9):\n r.append(r[-1]+0.1*I)\n for i in range(20):\n r.append(r[-1]+0.1)\n for i in range(10):\n r.append(r[-1]-0.1*I)\n\n p = HolonomicFunction(Dx**2 + 1, x, 0, [1,1]).evalf(r)\n s = '0.493152791638442 - 1.41553435639707e-15*I'\n assert sstr(p[-1]) == s\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 534, "n_words": 302, "vocab_size": 138, "complexity": 11, "nloc": 50, "token_counts": 599, "n_ast_nodes": 862, "n_identifiers": 20, "random_cut": "def test_evalf_rk4():\n x = symbols('x')\n R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')\n\n # log(1+x)\n p = HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1])\n\n # path taken is a straight line from 0 to 1, on the real axis\n r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\n s = '0.693146363174626' # approx. equal to log(2) i.e. 0.693147180559945\n assert sstr(p.evalf(r)[-1]) == s\n\n # path taken is a triangle 0-->1+i-->2\n r = [0.1 + 0.1*I]\n for i in range(9):\n r.append(r[-1]+0.1+0.1*I)\n for i in range(10):\n r.append(r[-1]+0.1-0.1*I)\n\n # close to the exact solution 1.09861228866811\n # imaginary part also close to zero\n s = '1.098616 + 1.36083e-7*I'\n assert sstr(p.evalf(r)[-1].n(7)) == s\n\n # sin(x)\n p = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1])\n s = '0.90929463522785 + 1.52655665885959e-16*I'\n assert sstr(p.evalf(r)[-1]) == s\n\n # computing sin(pi/2) using this method\n # using a linear path from 0 to pi/2\n r = [0.1]\n for i in range(14):\n r.append(r[-1] + 0.1)\n r.append(pi/2)\n s = '0.999999895088917' # close to 1.0 (exact solution)\n assert sstr(p.evalf(r)[-1]) == s\n\n # trying different path, a rectangle (0-->i-->pi/2 + i-->pi/2)\n # computing the same value sin(pi/2) us" }, { "id": 258872, "commit_id": "1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe", "repo": "scikit-learn", "path": "sklearn/feature_extraction/tests/test_image.py", "file_name": "test_image.py", "fun_name": "test_grid_to_graph", "commit_message": "MNT Update black to stable version (#22474)", "code": "def test_grid_to_graph():\n # Checking that the function works with graphs containing no edges\n size = 2\n roi_size = 1\n # Generating two convex parts with one vertex\n # Thus, edges will be empty in _to_graph\n mask = np.zeros((size, size), dtype=bool)\n mask[0:roi_size, 0:roi_size] = True\n mask[-roi_size:, -roi_size:] = True\n mask = mask.reshape(size**2)\n A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)\n assert connected_components(A)[0] == 2\n\n # check ordering\n mask = np.zeros((2, 3), dtype=bool)\n mask[0, 0] = 1\n mask[:, 2] = 1\n graph = grid_to_graph(2, 3, 1, mask=mask.ravel()).todense()\n desired = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])\n np.testing.assert_array_equal(graph, desired)\n\n # Checking that the function works whatever the type of mask is\n mask = np.ones((size, size), dtype=np.int16)\n A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)\n assert connected_components(A)[0] == 1\n\n # Checking dtype of the graph\n mask = np.ones((size, size))\n A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=bool)\n assert A.dtype == bool\n A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=int)\n assert A.dtype == int\n A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64)\n assert A.dtype == np.float64\n\n\n@ignore_warnings(category=DeprecationWarning) # scipy deprecation inside face", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@ignore_warnings(category=DeprecationWarning)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 261, "n_words": 172, "vocab_size": 91, "complexity": 1, "nloc": 25, "token_counts": 342, "n_ast_nodes": 512, "n_identifiers": 31, "random_cut": "def test_grid_to_graph():\n # Checking that the function works with graphs containing no edges\n size = 2\n roi_size = 1\n # Generating two convex parts with one vertex\n # Thus, edges will be empty in _to_graph\n mask = np.zeros((size, size), dtype=bool)\n mask[0:roi_size, 0:roi_size] = True\n mask[-roi_size:, -roi_size:] = True\n mask = mask.reshape(size**2)\n A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)\n assert connected_components(A)[0] == 2\n\n # check ordering\n mask = np.zeros((2, 3), dtype=bool)\n mask[0, 0] = 1\n mask[:, 2] = 1\n graph = grid_to_graph(2, 3, 1, mask=mask.ravel()).todense()\n desired = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])\n np.testing.assert_array_equal(graph, desired)\n\n # Checking that the function works whatever the type of mask is\n mask = np.ones((size, size), dtype=np.int16)\n A = grid_to_graph(n_x=size, n_y=size, n_" }, { "id": 269380, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/applications/imagenet_utils.py", "file_name": "imagenet_utils.py", "fun_name": "correct_pad", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def correct_pad(inputs, kernel_size):\n \n img_dim = 2 if backend.image_data_format() == \"channels_first\" else 1\n input_size = backend.int_shape(inputs)[img_dim : (img_dim + 2)]\n if isinstance(kernel_size, int):\n kernel_size = (kernel_size, kernel_size)\n if input_size[0] is None:\n adjust = (1, 1)\n else:\n adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)\n correct = (kernel_size[0] // 2, kernel_size[1] // 2)\n return (\n (correct[0] - adjust[0], correct[0]),\n (correct[1] - adjust[1], correct[1]),\n )\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 128, "n_words": 66, "vocab_size": 49, "complexity": 4, "nloc": 14, "token_counts": 136, "n_ast_nodes": 214, "n_identifiers": 12, "random_cut": "def correct_pad(inputs, kernel_size):\n \n img_dim = 2 if backend.image_data_format() == \"channels_first\" else 1\n input_size = backend.int_shape(inputs)[img_dim : (img_dim + 2)]\n if isinstance(kernel_size, int):\n kern" }, { "id": 122570, "commit_id": "e4757e8410a6e40bc081085e2a1bef2ee6423d9c", "repo": "jax", "path": "jax/_src/checkify.py", "file_name": "checkify.py", "fun_name": "_get_current_traceback", "commit_message": "Rewrite Checkify to support tracking different error types.\n\nIn general, behavior should remain the same and this is not a breaking\nchange.\n\nThere are some minor changes to the API:\n - checkify.ErrorCategory has changed type: it's no longer an Enum, but\n the JaxException type. These have not been exposed as part of the\n public API.\n - some attributes on Error have changed and made private\n - The raised error has changed type (JaxRuntimeError), and will have a\n different traceback (pointing to the origin of the error + where the\n error value was raised).\n - `checkify.check` now supports formating error message with variable\n size runtime info!\n\nCo-authored-by: Sharad Vikram ", "code": "def _get_current_traceback(skip_frames = 0) -> Optional[types.TracebackType]:\n # TODO(lenamartens): use c++ version from XLA?\n tb = None\n import inspect\n for frame_info in inspect.stack():\n frame = frame_info.frame\n if skip_frames:\n skip_frames -= 1\n elif not traceback_util.include_frame(frame):\n continue\n else:\n tb = types.TracebackType(tb, frame, frame.f_lasti, frame.f_lineno)\n return tb\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 74, "n_words": 43, "vocab_size": 38, "complexity": 4, "nloc": 12, "token_counts": 71, "n_ast_nodes": 113, "n_identifiers": 14, "random_cut": "def _get_current_traceback(skip_frames = 0) -> Optional[types.TracebackType]:\n # TODO(lenamartens): use c++ version from XLA?\n tb = None\n import inspect\n for frame_info in inspect.stack():\n frame = frame_info.frame\n if skip_frames:\n skip_frames -= 1\n elif not traceback_util.include_frame(frame):\n continue\n else:\n tb = types.TracebackType(tb, frame, frame.f_lasti, frame.f_lineno)\n return tb\n" }, { "id": 129685, "commit_id": "d363c370789479eb1f87ce014920ab2052cfbf36", "repo": "ray", "path": "python/ray/tests/test_multi_node_3.py", "file_name": "test_multi_node_3.py", "fun_name": "redis_proc", "commit_message": "[Core] Stop Ray stop from killing redis that's not started by Ray (#21805)\n\nCurrently, `ray stop` logic is vulnerable, and it kills Redis server that's not started by Ray. This PR fixes the issue by better checking the executable name of redis-server (If it is redis-server created by Ray, it should contain Ray specific path copied while wheels are built).\r\n\r\nI originally tried to obtain ppid and kill a redis-server only when it is created from the same parent, but it turns out all processes started by ray start has no ppid. \r\n\r\nWhile the best solution is to have some \"process manager\" that we can detect redis server started by us, I think there's no need to put lots of efforts here right now since Redis will be removed soon. We will eventually move to a better direction (process manager) to handle this sort of issues.", "code": "def redis_proc():\n \n REDIS_SERVER_PATH = \"core/src/ray/thirdparty/redis/src/redis-server\"\n full_path = Path(ray.__file__).parents[0] / REDIS_SERVER_PATH\n check_call_subprocess([\"cp\", f\"{full_path}\", \"redis-server\"])\n proc = subprocess.Popen([\"./redis-server\", \"--port\", \"7999\"])\n yield proc\n subprocess.check_call([\"ray\", \"stop\"])\n os.kill(proc.pid, 9)\n subprocess.check_call([\"rm\", \"-rf\", \"redis-server\"])\n\n\n@pytest.mark.skipif(\n sys.platform == \"win32\",\n reason=(\"Feature not supported Windows because Redis \"\n \"is not officially supported by Windows. \"\n \"(There cannot be external Redis in Windows)\"))", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(\n sys.platform == \"win32\",\n reason=(\"Feature not supported Windows because Redis \"\n \"is not officially supported by Windows. \"\n \"(There cannot be external Redis in Windows)\"))", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 106, "n_words": 52, "vocab_size": 43, "complexity": 1, "nloc": 9, "token_counts": 82, "n_ast_nodes": 193, "n_identifiers": 21, "random_cut": "def redis_proc():\n \n REDIS_SERVER_PATH = \"core/src/ray/thirdparty/redis/src/redis-server\"\n full_path = Path(ray.__file__).parents[0] / REDIS_SERVER_PATH\n check_call_subprocess([\"cp\", f\"{full_path}\", \"redis-server\"])\n proc = subprocess.Popen([\"./redis-server\", \"--port\", \"7999\"])\n yield proc\n subprocess.check_call([\"ray\", \"stop\"])\n os.kill(proc.pid, 9)\n subprocess.check_call([\"rm\", \"-rf\", \"redis-server\"])\n\n\n@pytest.mark.skipif(\n sys.platform == \"win32\",\n reason=(\"Feature not supported Windows because Redis \"\n \"is not officially supported by Windows. \"\n \"(There cannot be external Redis in Windows)\"))" }, { "id": 135105, "commit_id": "432f023642731bf53aac9b6c778f9dd7b1d82a57", "repo": "ray", "path": "rllib/algorithms/sac/tests/test_sac.py", "file_name": "test_sac.py", "fun_name": "_sac_loss_helper", "commit_message": "[RLlib] Deprecate `AlgorithmConfig.framework(\"tfe\")`: Use `tf2` instead. (#29755)", "code": "def _sac_loss_helper(self, train_batch, weights, ks, log_alpha, fw, gamma, sess):\n \n # ks:\n # 0=log_alpha\n # 1=target log-alpha (not used)\n\n # 2=action hidden bias\n # 3=action hidden kernel\n # 4=action out bias\n # 5=action out kernel\n\n # 6=Q hidden bias\n # 7=Q hidden kernel\n # 8=Q out bias\n # 9=Q out kernel\n\n # 14=target Q hidden bias\n # 15=target Q hidden kernel\n # 16=target Q out bias\n # 17=target Q out kernel\n alpha = np.exp(log_alpha)\n # cls = TorchSquashedGaussian if fw == \"torch\" else SquashedGaussian\n cls = TorchDirichlet if fw == \"torch\" else Dirichlet\n model_out_t = train_batch[SampleBatch.CUR_OBS]\n model_out_tp1 = train_batch[SampleBatch.NEXT_OBS]\n target_model_out_tp1 = train_batch[SampleBatch.NEXT_OBS]\n\n # get_policy_output\n action_dist_t = cls(\n fc(\n relu(fc(model_out_t, weights[ks[1]], weights[ks[0]], framework=fw)),\n weights[ks[9]],\n weights[ks[8]],\n ),\n None,\n )\n policy_t = action_dist_t.deterministic_sample()\n log_pis_t = action_dist_t.logp(policy_t)\n if sess:\n log_pis_t = sess.run(log_pis_t)\n policy_t = sess.run(policy_t)\n log_pis_t = np.expand_dims(log_pis_t, -1)\n\n # Get policy output for t+1.\n action_dist_tp1 = cls(\n fc(\n relu(fc(model_out_tp1, weights[ks[1]], weights[ks[0]], framework=fw)),\n weights[ks[9]],\n weights[ks[8]],\n ),\n None,\n )\n policy_tp1 = action_dist_tp1.deterministic_sample()\n log_pis_tp1 = action_dist_tp1.logp(policy_tp1)\n if sess:\n log_pis_tp1 = sess.run(log_pis_tp1)\n policy_tp1 = sess.run(policy_tp1)\n log_pis_tp1 = np.expand_dims(log_pis_tp1, -1)\n\n # Q-values for the actually selected actions.\n # get_q_values\n q_t = fc(\n relu(\n fc(\n np.concatenate([model_out_t, train_batch[SampleBatch.ACTIONS]], -1),\n weights[ks[3]],\n weights[ks[2]],\n framework=fw,\n )\n ),\n weights[ks[11]],\n weights[ks[10]],\n framework=fw,\n )\n\n # Q-values for current policy in given current state.\n # get_q_values\n q_t_det_policy = fc(\n relu(\n fc(\n np.concatenate([model_out_t, policy_t], -1),\n weights[ks[3]],\n weights[ks[2]],\n framework=fw,\n )\n ),\n weights[ks[11]],\n weights[ks[10]],\n framework=fw,\n )\n\n # Target q network evaluation.\n # target_model.get_q_values\n if fw == \"tf\":\n q_tp1 = fc(\n relu(\n fc(\n np.concatenate([target_model_out_tp1, policy_tp1], -1),\n weights[ks[7]],\n weights[ks[6]],\n framework=fw,\n )\n ),\n weights[ks[15]],\n weights[ks[14]],\n framework=fw,\n )\n else:\n assert fw == \"tf2\"\n q_tp1 = fc(\n relu(\n fc(\n np.concatenate([target_model_out_tp1, policy_tp1], -1),\n weights[ks[7]],\n weights[ks[6]],\n framework=fw,\n )\n ),\n weights[ks[9]],\n weights[ks[8]],\n framework=fw,\n )\n\n q_t_selected = np.squeeze(q_t, axis=-1)\n q_tp1 -= alpha * log_pis_tp1\n q_tp1_best = np.squeeze(q_tp1, axis=-1)\n dones = train_batch[SampleBatch.DONES]\n rewards = train_batch[SampleBatch.REWARDS]\n if fw == \"torch\":\n dones = dones.float().numpy()\n rewards = rewards.numpy()\n q_tp1_best_masked = (1.0 - dones) * q_tp1_best\n q_t_selected_target = rewards + gamma * q_tp1_best_masked\n base_td_error = np.abs(q_t_selected - q_t_selected_target)\n td_error = base_td_error\n critic_loss = [\n np.mean(\n train_batch[\"weights\"] * huber_loss(q_t_selected_target - q_t_selected)\n )\n ]\n target_entropy = -np.prod((1,))\n alpha_loss = -np.mean(log_alpha * (log_pis_t + target_entropy))\n actor_loss = np.mean(alpha * log_pis_t - q_t_det_policy)\n\n return critic_loss, actor_loss, alpha_loss, td_error\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1853, "n_words": 359, "vocab_size": 170, "complexity": 6, "nloc": 110, "token_counts": 709, "n_ast_nodes": 1058, "n_identifiers": 61, "random_cut": "def _sac_loss_helper(self, train_batch, weights, ks, log_alpha, fw, gamma, sess):\n \n # ks:\n # 0=log_alpha\n # 1=target log-alpha (not used)\n\n # 2=action hidden bias\n # 3=action hidde" }, { "id": 103503, "commit_id": "1e56d39777876ecf91852563557818049dc2e6f9", "repo": "kitty", "path": "kitty/constants.py", "file_name": "constants.py", "fun_name": "wakeup_io_loop", "commit_message": "Wakeup main loop when refreshing window as well as io loop", "code": "def wakeup_io_loop() -> None:\n from .fast_data_types import get_boss\n b = get_boss()\n if b is not None:\n b.child_monitor.wakeup()\n\n\nterminfo_dir = os.path.join(kitty_base_dir, 'terminfo')\nlogo_png_file = os.path.join(kitty_base_dir, 'logo', 'kitty.png')\nbeam_cursor_data_file = os.path.join(kitty_base_dir, 'logo', 'beam-cursor.png')\nshell_integration_dir = os.path.join(kitty_base_dir, 'shell-integration')\ntry:\n shell_path = pwd.getpwuid(os.geteuid()).pw_shell or '/bin/sh'\nexcept KeyError:\n with suppress(Exception):\n print('Failed to read login shell via getpwuid() for current user, falling back to /bin/sh', file=sys.stderr)\n shell_path = '/bin/sh'\n# Keep this short as it is limited to 103 bytes on macOS\n# https://github.com/ansible/ansible/issues/11536#issuecomment-153030743\nssh_control_master_template = 'kssh-{kitty_pid}-{ssh_placeholder}'\n\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 103, "n_words": 81, "vocab_size": 62, "complexity": 2, "nloc": 5, "token_counts": 29, "n_ast_nodes": 221, "n_identifiers": 27, "random_cut": "def wakeup_io_loop() -> None:\n from .fast_data_types import get_boss\n b = get_boss()\n if b is not None:\n b.child_monitor.wakeup()\n\n\nterminfo_dir = os.path.join(kitty_base_dir, 'terminfo')\nlogo_png_file = os.path.join(kitty_base_dir, 'logo', 'kitty.png')\nbeam_cursor_data_file = os.path.join(kitty_base_dir, 'logo', 'beam-cursor.png')\nshell_integration_dir = os.path.join(kitty_base_dir, 'shell-integration')\ntry:\n shell_path = pwd.getpwuid(os.geteuid()).pw_shell or '/bin/sh'\nexcept KeyError:\n with suppress(Exception):\n print('Failed to read login shell via getpwuid() for current user, falling back to /bin/sh', file=sys.stderr)\n shell_path = '/bin/sh'\n# Keep this short as it is limited to 103 bytes on macOS\n# https://github.com/ansible/ansible/issues/11536#issuec" }, { "id": 151977, "commit_id": "3395c29127e2dfc4467f04b40b2aec7ef3ec1196", "repo": "stable-diffusion-webui", "path": "webui.py", "file_name": "webui.py", "fun_name": "flag", "commit_message": "added prompt matrix feature\nall images in batches now have proper seeds, not just the first one\nadded code to remove bad characters from filenames\nadded code to flag output which writes it to csv and saves images\nrenamed some fields in UI for clarity", "code": "def flag(self, flag_data, flag_option=None, flag_index=None, username=None) -> int:\r\n os.makedirs(\"log/images\", exist_ok=True)\r\n\r\n # those must match the \"dream\" function\r\n prompt, ddim_steps, sampler_name, use_GFPGAN, prompt_matrix, ddim_eta, n_iter, n_samples, cfg_scale, request_seed, height, width, images, seed, comment = flag_data\r\n\r\n filenames = []\r\n\r\n with open(\"log/log.csv\", \"a\", encoding=\"utf8\", newline='') as file:\r\n import time\r\n import base64\r\n\r\n at_start = file.tell() == 0\r\n writer = csv.writer(file)\r\n if at_start:\r\n writer.writerow([\"prompt\", \"seed\", \"width\", \"height\", \"cfgs\", \"steps\", \"filename\"])\r\n\r\n filename_base = str(int(time.time() * 1000))\r\n for i, filedata in enumerate(images):\r\n filename = \"log/images/\"+filename_base + (\"\" if len(images) == 1 else \"-\"+str(i+1)) + \".png\"\r\n\r\n if filedata.startswith(\"data:image/png;base64,\"):\r\n filedata = filedata[len(\"data:image/png;base64,\"):]\r\n\r\n with open(filename, \"wb\") as imgfile:\r\n imgfile.write(base64.decodebytes(filedata.encode('utf-8')))\r\n\r\n filenames.append(filename)\r\n\r\n writer.writerow([prompt, seed, width, height, cfg_scale, ddim_steps, filenames[0]])\r\n\r\n print(\"Logged:\", filenames[0])\r\n\r\n\r\ndream_interface = gr.Interface(\r\n dream,\r\n inputs=[\r\n gr.Textbox(label=\"Prompt\", placeholder=\"A corgi wearing a top hat as an oil painting.\", lines=1),\r\n gr.Slider(minimum=1, maximum=150, step=1, label=\"Sampling Steps\", value=50),\r\n gr.Radio(label='Sampling method', choices=[\"DDIM\", \"PLMS\", \"k-diffusion\"], value=\"k-diffusion\"),\r\n gr.Checkbox(label='Fix faces using GFPGAN', value=False, visible=GFPGAN is not None),\r\n gr.Checkbox(label='Create prompt matrix (separate multiple prompts using |, and get all combinations of them)', value=False),\r\n gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label=\"DDIM ETA\", value=0.0, visible=False),\r\n gr.Slider(minimum=1, maximum=16, step=1, label='Batch count (how many batches of images to generate)', value=1),\r\n gr.Slider(minimum=1, maximum=4, step=1, label='Batch size (how many images are in a batch; memory-hungry)', value=1),\r\n gr.Slider(minimum=1.0, maximum=15.0, step=0.5, label='Classifier Free Guidance Scale (how strongly should the image follow the prompt)', value=7.0),\r\n gr.Number(label='Seed', value=-1),\r\n gr.Slider(minimum=64, maximum=2048, step=64, label=\"Height\", value=512),\r\n gr.Slider(minimum=64, maximum=2048, step=64, label=\"Width\", value=512),\r\n ],\r\n outputs=[\r\n gr.Gallery(label=\"Images\"),\r\n gr.Number(label='Seed'),\r\n gr.Textbox(label=\"Copy-paste generation parameters\"),\r\n ],\r\n title=\"Stable Diffusion Text-to-Image K\",\r\n description=\"Generate images from text with Stable Diffusion (using K-LMS)\",\r\n flagging_callback=Flagging()\r\n)\r", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 616, "n_words": 247, "vocab_size": 202, "complexity": 5, "nloc": 21, "token_counts": 262, "n_ast_nodes": 911, "n_identifiers": 77, "random_cut": "def flag(self, flag_data, flag_option=None, flag_index=None, username=None) -> int:\r\n os.makedirs(\"log/images\", exist_ok=True)\r\n\r\n # those must match the \"dream\" function\r\n prompt, ddim_steps, sampler_name, use_GFPGAN, prompt_matrix, ddim_eta, n_iter, n_samples, cfg_scale, request_seed, height, width, images, seed, comment = flag_data\r\n\r\n filenames = []\r\n\r\n with open(\"log/log.csv\", \"a\", encoding=\"utf8\", newline='') as file:\r\n import time\r\n import base64\r\n\r\n at_start = file.tell() == 0\r\n writer = csv.writer(file)\r\n if at_start:\r\n writer.writerow([\"prompt\", \"seed\", \"width\", \"height\", \"cfgs\", \"steps\", \"filename\"])\r\n\r\n filename_base = str(int(time.time() * 1000))\r\n for i, filedata in enumerate(images):\r\n filename = \"log/images/\"+filename_base + (\"\" if len(images) == 1 else \"-\"+str(i+1)) + \".png\"\r\n\r\n if filedata.startswith(\"data:image/png;base64,\"):\r\n filedata = filedata[len(\"data:image/png;base64,\"):]\r\n\r\n with open(filename, \"wb\") as imgfile:\r\n imgfile.write(base64.de" }, { "id": 34366, "commit_id": "3fefee99108de855f5659679c9d034a3be5ad0f4", "repo": "transformers", "path": "src/transformers/pipelines/automatic_speech_recognition.py", "file_name": "automatic_speech_recognition.py", "fun_name": "preprocess", "commit_message": "Make chuking smartly (long files) work on asr ctc_with_lm. (#15219)\n\n* [WIP] Make chuking smartly (long files) work on asr ctc_with_lm.\r\n\r\n* Slow test with functionality.\r\n\r\n* Fixing regular test.\r\n\r\n* fix for batch size 1\r\n\r\n* Handling batch outside `rescale_Stride`.\r\n\r\n- Renamed to `rescale_stride`.\r\n\r\n* Disable equality in the test.\r\n\r\n* Remove print.\r\n\r\nCo-authored-by: Patrick von Platen ", "code": "def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None):\n if isinstance(inputs, str):\n with open(inputs, \"rb\") as f:\n inputs = f.read()\n\n if isinstance(inputs, bytes):\n inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)\n\n if not isinstance(inputs, np.ndarray):\n raise ValueError(\"We expect a numpy ndarray as input\")\n if len(inputs.shape) != 1:\n raise ValueError(\"We expect a single channel audio input for AutomaticSpeechRecognitionPipeline\")\n\n if chunk_length_s:\n if stride_length_s is None:\n stride_length_s = chunk_length_s / 6\n\n chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate))\n\n if isinstance(stride_length_s, (int, float)):\n stride_length_s = [stride_length_s, stride_length_s]\n\n stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate))\n stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate))\n\n if self.type not in {\"ctc\", \"ctc_with_lm\"}:\n raise ValueError(\n \"`chunk_length_s` is only valid for CTC models, use other chunking options for other models\"\n )\n if chunk_len < stride_left + stride_right:\n raise ValueError(\"Chunk length must be superior to stride length\")\n\n # make sure that\n for item in chunk_iter(inputs, self.feature_extractor, chunk_len, stride_left, stride_right):\n yield item\n else:\n processed = self.feature_extractor(\n inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors=\"pt\"\n )\n yield {\"is_last\": True, **processed}\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 503, "n_words": 147, "vocab_size": 103, "complexity": 11, "nloc": 31, "token_counts": 254, "n_ast_nodes": 407, "n_identifiers": 30, "random_cut": "def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None):\n if isinstance(inputs, str):\n with open(inputs, \"rb\") as f:\n inputs = f.read()\n\n if isinstance(inputs, bytes):\n inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)\n\n if not isinstance(inputs, np.ndarray):\n raise ValueError(\"We expect a numpy ndarray as input\")\n if len(inputs.shape) != 1:\n raise ValueError(\"We expect a single channel audio input for AutomaticSpeechRecognitionPipeline\")\n\n if chunk_length_s:\n if stride_length_s is None:\n stride_length_s = chunk_length_s / 6\n\n chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate))\n\n if isinstance(stride_length_s, (int, float)):\n stride_length_s = [stride_length_s, stride_length_s]\n\n stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate))\n stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate))\n\n if self.type not in {\"ctc\", \"ctc_with_lm\"}:\n raise ValueError(\n \"`chunk_length_s` is only valid for CTC models, use other chunking options for other models\"\n )\n if chunk_len < stride_left + stride_right:\n raise ValueError(\"Chunk length must be superior to stride length\")\n\n # make sure that\n for item in chunk_iter(inputs, self." }, { "id": 19109, "commit_id": "d3ddd59a2333b6cf88e719157453aeeb8d9c2632", "repo": "mlflow", "path": "tests/store/artifact/utils/test_model_utils.py", "file_name": "test_model_utils.py", "fun_name": "test_get_model_name_and_version_with_version", "commit_message": "Support specifying 'latest' in model URI to get the latest version of a model regardless of the stage (#5027)\n\n* Support specifying 'latest' in model URI to get the latest version of a model regardless of the stage\r\n\r\nSigned-off-by: Chenran Li ", "code": "def test_get_model_name_and_version_with_version():\n with mock.patch.object(\n MlflowClient, \"get_latest_versions\", return_value=[]\n ) as mlflow_client_mock:\n assert get_model_name_and_version(MlflowClient(), \"models:/AdsModel1/123\") == (\n \"AdsModel1\",\n \"123\",\n )\n mlflow_client_mock.assert_not_called()\n\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 44, "n_ast_nodes": 79, "n_identifiers": 9, "random_cut": "def test_get_model_name_and_version_with_version():\n with mock.patch.object(\n MlflowClient, \"get_latest_versions\", return_value=[]\n ) as mlflow_client_mock:\n assert get_model_name_and_version(MlflowCl" }, { "id": 42187, "commit_id": "2f70aee390fce0b483dfb63771ef624149f87d06", "repo": "seaborn", "path": "tests/_core/test_plot.py", "file_name": "test_plot.py", "fun_name": "test_y_wrapping", "commit_message": "Fix visibility of internal axis labels with Plot.pair(wrap=...) (#2979)\n\n* Fix visibility of internal axis labels with Plot.pair(wrap=...)\r\n\r\nFixes #2976\r\n\r\n* Fix wrapped row label test\r\n\r\n* Fix access to pair_spec structure entry\r\n\r\n* Fix Plot.pair axis sharing with wrap=1", "code": "def test_y_wrapping(self, long_df):\n\n y_vars = [\"f\", \"x\", \"y\", \"z\"]\n wrap = 3\n p = Plot(long_df, x=\"x\").pair(y=y_vars, wrap=wrap).plot()\n\n n_row, n_col = wrap, len(y_vars) // wrap + 1\n assert_gridspec_shape(p._figure.axes[0], n_row, n_col)\n assert len(p._figure.axes) == len(y_vars)\n label_array = np.empty(n_row * n_col, object)\n label_array[:len(y_vars)] = y_vars\n label_array = label_array.reshape((n_row, n_col), order=\"F\")\n label_array = [y for y in label_array.flat if y is not None]\n for i, ax in enumerate(p._figure.axes):\n label = ax.yaxis.get_label()\n assert label.get_visible()\n assert label.get_text() == label_array[i]\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 182, "n_words": 73, "vocab_size": 53, "complexity": 4, "nloc": 15, "token_counts": 180, "n_ast_nodes": 288, "n_identifiers": 32, "random_cut": "def test_y_wrapping(self, long_df):\n\n y_vars = [\"f\", \"x\", \"y\", \"z\"]\n wrap = 3\n p = Plot(long_df, x=\"x\").pair(y=y_vars, wrap=wrap).plot()\n\n n_row, n_col = wrap, len(y_vars) // wrap + 1\n assert_gridspec_shape(p._figure.axes[0], n_row, n_col)\n assert len(p._figure.axes) == len(y_vars)\n label_array = np.empty(n_row * n_col, object)\n label_array[:len(y_vars)] = y_vars\n label_array = label_array.reshape((n_row, n_col), order=\"F\")\n label_array = [y for y in label_array.flat if y is not None]\n for i, ax in enumerate(p._figure.axes):\n " }, { "id": 58237, "commit_id": "fddd3cf86cb763fb2a9081360762881fae60e300", "repo": "prefect", "path": "tests/cli/test_profile.py", "file_name": "test_profile.py", "fun_name": "test_rename_profile_renames_profile", "commit_message": "Switch the name of the active profile on rename if in use (#6164)\n\n* Switch the name of the active profile on rename if in use\r\n\r\nCloses https://github.com/PrefectHQ/prefect/issues/6110\r\n\r\n* Add tests\r\n\r\n* Include negative test coverage for inactive case\r\n\r\n* Fixup comment [ci skip]\r\n\r\nCo-authored-by: Chris Pickett \r\n\r\nCo-authored-by: Chris Pickett ", "code": "def test_rename_profile_renames_profile():\n save_profiles(\n ProfilesCollection(\n profiles=[\n Profile(name=\"foo\", settings={PREFECT_API_KEY: \"foo\"}),\n ],\n active=None,\n )\n )\n\n invoke_and_assert(\n [\"profile\", \"rename\", \"foo\", \"bar\"],\n expected_output=\"Renamed profile 'foo' to 'bar'.\",\n expected_code=0,\n )\n\n profiles = load_profiles()\n assert \"foo\" not in profiles, \"The original profile should not exist anymore\"\n assert profiles[\"bar\"].settings == {\n PREFECT_API_KEY: \"foo\"\n }, \"Settings should be retained\"\n assert profiles.active_name != \"bar\", \"The active profile should not be changed\"\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 177, "n_words": 61, "vocab_size": 48, "complexity": 1, "nloc": 20, "token_counts": 90, "n_ast_nodes": 157, "n_identifiers": 14, "random_cut": "def test_rename_profile_renames_profile():\n save_profiles(\n ProfilesCollection(\n profiles=[\n Profile(name=\"foo\", settings" }, { "id": 45933, "commit_id": "5ace37a16d1773adb71c684450838e4c8e69b581", "repo": "airflow", "path": "tests/dag_processing/test_processor.py", "file_name": "test_processor.py", "fun_name": "test_process_file_should_failure_callback", "commit_message": "Store callbacks in database if standalone_dag_processor config is True. (#21731)", "code": "def test_process_file_should_failure_callback(self, monkeypatch, tmp_path):\n callback_file = tmp_path.joinpath(\"callback.txt\")\n callback_file.touch()\n monkeypatch.setenv(\"AIRFLOW_CALLBACK_FILE\", str(callback_file))\n dag_file = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), '../dags/test_on_failure_callback.py'\n )\n dagbag = DagBag(dag_folder=dag_file, include_examples=False)\n dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())\n\n dag = dagbag.get_dag('test_om_failure_callback_dag')\n task = dag.get_task(task_id='test_om_failure_callback_task')\n with create_session() as session:\n dagrun = dag.create_dagrun(\n state=State.RUNNING,\n execution_date=DEFAULT_DATE,\n run_type=DagRunType.SCHEDULED,\n session=session,\n )\n (ti,) = dagrun.task_instances\n ti.refresh_from_task(task)\n\n requests = [\n TaskCallbackRequest(\n full_filepath=dag.fileloc,\n simple_task_instance=SimpleTaskInstance.from_ti(ti),\n msg=\"Message\",\n )\n ]\n dag_file_processor.process_file(dag_file, requests, session=session)\n\n assert \"Callback fired\" == callback_file.read_text()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 374, "n_words": 63, "vocab_size": 53, "complexity": 1, "nloc": 29, "token_counts": 200, "n_ast_nodes": 323, "n_identifiers": 56, "random_cut": "def test_process_file_should_failure_callback(self, monkeypatch, tmp_path):\n callback_file = tmp_path.joinpath(\"callback.txt\")\n callback_file.touch()\n monkeypatch.setenv(\"AIRFLOW_CALLBACK_FILE\", str(callback_file))\n dag_file = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), '../dags/test_on_failure_callback.py'\n )\n dagbag = DagBag(dag_folder=dag_file, include_examples=False)\n dag_file_processor = DagFi" }, { "id": 56691, "commit_id": "50c7ffda5aebdaf4fb258db4b2a4de76557d9c7f", "repo": "prefect", "path": "tests/blocks/test_system.py", "file_name": "test_system.py", "fun_name": "test_datetime", "commit_message": "Add a block that pulls its value from env var", "code": "async def test_datetime():\n await system.DateTime(value=pendulum.datetime(2022, 1, 1)).save(name=\"test\")\n api_block = await system.DateTime.load(\"test\")\n assert api_block.value == pendulum.datetime(2022, 1, 1)\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 25, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 55, "n_ast_nodes": 90, "n_identifiers": 10, "random_cut": "async def test_datetime():\n await system.DateTime" }, { "id": 10002, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "tests/distributed/test_remote_peas/test_remote_peas.py", "file_name": "test_remote_peas.py", "fun_name": "test_async_jinad_client", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "async def test_async_jinad_client(async_jinad_client, pea_args):\n workspace_id = await async_jinad_client.workspaces.create(paths=[cur_dir])\n assert DaemonID(workspace_id)\n\n success, pea_id = await async_jinad_client.peas.create(\n workspace_id=workspace_id, payload=replace_enum_to_str(vars(pea_args))\n )\n assert success\n assert pea_id\n assert is_pea_ready(pea_args)\n assert await async_jinad_client.peas.delete(pea_id)\n assert not is_pea_ready(pea_args)\n assert await async_jinad_client.workspaces.delete(workspace_id)\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 69, "n_words": 33, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 87, "n_ast_nodes": 139, "n_identifiers": 17, "random_cut": "async def test_async_jinad_client(async_jinad_client, pea_args):\n workspace_id = await async_jinad_client.workspaces.create(paths=[cur_dir])\n assert DaemonID(workspace_id)\n\n success, pea_id = await async_jinad_client.peas.create(\n workspace_id=workspace_id, payload=replace_enum_to_str(vars(pea_args))\n )\n assert success\n assert pea_id\n assert is_pea_ready(pea_args)\n assert await async_jinad_client.peas.delete(pea_id)\n assert not is_pea_ready(pea_args)\n assert await async_jinad_client.workspaces.delete(workspace_id)\n\n" }, { "id": 289361, "commit_id": "31a787558fd312331b55e5c2c4b33341fc3601fc", "repo": "core", "path": "tests/components/energy/test_websocket_api.py", "file_name": "test_websocket_api.py", "fun_name": "test_fossil_energy_consumption_no_co2", "commit_message": "Ensure recorder test fixture is setup before hass fixture (#80528)\n\n* Ensure recorder test fixture is setup before hass fixture\r\n\r\n* Adjust more tests", "code": "async def test_fossil_energy_consumption_no_co2(recorder_mock, hass, hass_ws_client):\n \n now = dt_util.utcnow()\n later = dt_util.as_utc(dt_util.parse_datetime(\"2022-09-01 00:00:00\"))\n\n await async_setup_component(hass, \"history\", {})\n await async_setup_component(hass, \"sensor\", {})\n await async_recorder_block_till_done(hass)\n\n period1 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-01 00:00:00\"))\n period2 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-30 23:00:00\"))\n period2_day_start = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-30 00:00:00\"))\n period3 = dt_util.as_utc(dt_util.parse_datetime(\"2021-10-01 00:00:00\"))\n period4 = dt_util.as_utc(dt_util.parse_datetime(\"2021-10-31 23:00:00\"))\n period4_day_start = dt_util.as_utc(dt_util.parse_datetime(\"2021-10-31 00:00:00\"))\n\n external_energy_statistics_1 = (\n {\n \"start\": period1,\n \"last_reset\": None,\n \"state\": 0,\n \"sum\": 2,\n },\n {\n \"start\": period2,\n \"last_reset\": None,\n \"state\": 1,\n \"sum\": 3,\n },\n {\n \"start\": period3,\n \"last_reset\": None,\n \"state\": 2,\n \"sum\": 5,\n },\n {\n \"start\": period4,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 8,\n },\n )\n external_energy_metadata_1 = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"test\",\n \"statistic_id\": \"test:total_energy_import_tariff_1\",\n \"unit_of_measurement\": \"kWh\",\n }\n external_energy_statistics_2 = (\n {\n \"start\": period1,\n \"last_reset\": None,\n \"state\": 0,\n \"sum\": 20,\n },\n {\n \"start\": period2,\n \"last_reset\": None,\n \"state\": 1,\n \"sum\": 30,\n },\n {\n \"start\": period3,\n \"last_reset\": None,\n \"state\": 2,\n \"sum\": 50,\n },\n {\n \"start\": period4,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 80,\n },\n )\n external_energy_metadata_2 = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"test\",\n \"statistic_id\": \"test:total_energy_import_tariff_2\",\n \"unit_of_measurement\": \"kWh\",\n }\n\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_1\n )\n async_add_external_statistics(\n hass, external_energy_metadata_2, external_energy_statistics_2\n )\n await async_wait_recording_done(hass)\n\n client = await hass_ws_client()\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"energy/fossil_energy_consumption\",\n \"start_time\": now.isoformat(),\n \"end_time\": later.isoformat(),\n \"energy_statistic_ids\": [\n \"test:total_energy_import_tariff_1\",\n \"test:total_energy_import_tariff_2\",\n ],\n \"co2_statistic_id\": \"test:co2_ratio_missing\",\n \"period\": \"hour\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n period2.isoformat(): pytest.approx(33.0 - 22.0),\n period3.isoformat(): pytest.approx(55.0 - 33.0),\n period4.isoformat(): pytest.approx(88.0 - 55.0),\n }\n\n await client.send_json(\n {\n \"id\": 2,\n \"type\": \"energy/fossil_energy_consumption\",\n \"start_time\": now.isoformat(),\n \"end_time\": later.isoformat(),\n \"energy_statistic_ids\": [\n \"test:total_energy_import_tariff_1\",\n \"test:total_energy_import_tariff_2\",\n ],\n \"co2_statistic_id\": \"test:co2_ratio_missing\",\n \"period\": \"day\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n period2_day_start.isoformat(): pytest.approx(33.0 - 22.0),\n period3.isoformat(): pytest.approx(55.0 - 33.0),\n period4_day_start.isoformat(): pytest.approx(88.0 - 55.0),\n }\n\n await client.send_json(\n {\n \"id\": 3,\n \"type\": \"energy/fossil_energy_consumption\",\n \"start_time\": now.isoformat(),\n \"end_time\": later.isoformat(),\n \"energy_statistic_ids\": [\n \"test:total_energy_import_tariff_1\",\n \"test:total_energy_import_tariff_2\",\n ],\n \"co2_statistic_id\": \"test:co2_ratio_missing\",\n \"period\": \"month\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n period1.isoformat(): pytest.approx(33.0 - 22.0),\n period3.isoformat(): pytest.approx((55.0 - 33.0) + (88.0 - 55.0)),\n }\n\n\n@pytest.mark.freeze_time(\"2021-08-01 00:00:00+00:00\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.freeze_time(\"2021-08-01 00:00:00+00:00\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 1468, "n_words": 323, "vocab_size": 120, "complexity": 1, "nloc": 150, "token_counts": 750, "n_ast_nodes": 1263, "n_identifiers": 33, "random_cut": "async def test_fossil_energy_consumption_no_co2(recorder_mock, hass, hass_ws_client):\n \n now = dt_util.utcnow()\n later = dt_util.as_utc(dt_util.parse_datetime(\"2022-09-01 00:00:00\"))\n\n await async_setup_component(hass, \"history\", {})\n await async_setup_component(hass, \"sensor\", {})\n await async_recorder_block_till_done(hass)\n\n period1 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-01 00:00:00\"))\n period2 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-30 23:00:00\"))\n period2_day_start = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-30 00:00:00\"))\n period3 = dt_util.as_utc(dt_util.parse_datetime(\"2021-10-01 00:00:00\"))\n period4 = dt_util.as_utc(dt_util.parse_datetime(\"2021-10-31 23:00:00\"))\n period4_day_start = dt_util.as_utc(dt_util.parse_datetime(\"2021-10-31 00:00:00\"))\n\n external_energy_statistics_1 = (\n {\n \"start\": period1,\n \"last_reset\": None,\n \"state\": 0,\n \"sum\": 2,\n },\n {\n \"start\": period2,\n \"last_reset\": None,\n \"state\": 1,\n \"sum\": 3,\n },\n {\n \"start\": period3,\n \"last_reset\": None,\n \"state\": 2,\n \"sum\": 5,\n },\n {\n \"start\": period4,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 8,\n },\n )\n external_energy_metadata_1 = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"test\",\n \"statistic_id\": \"test:total_energy_import_tariff_1\",\n \"unit_of_measurement\": \"kWh\",\n }\n external_energy_statistics_2 = (\n {\n \"start\": period1,\n \"last_reset\": None,\n \"state\": 0,\n \"sum\": 20,\n },\n {\n \"start\": period2,\n \"last_reset\": None,\n \"state\": 1,\n \"sum\": 30,\n },\n {\n \"start\": period3,\n \"last_reset\": None,\n \"state\": 2,\n \"sum\": 50,\n },\n {\n \"start\": period4,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 80,\n },\n )\n external_energy_metadata_2 = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"test\",\n \"statistic_id\": \"test:total_energy_import_tariff_2\",\n \"unit_of_measurement\": \"kWh\",\n }\n\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_1\n )\n async_add_external_statistics(\n hass, external_energy_metadata_2, external_energy_statistics_2\n )\n await async_wait_recording_done(hass)\n\n client = await hass_ws_client()\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"energy" }, { "id": 7187, "commit_id": "aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173", "repo": "ludwig", "path": "ludwig/models/gbm.py", "file_name": "gbm.py", "fun_name": "save", "commit_message": "feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027)", "code": "def save(self, save_path):\n \n if self.lgb_booster is None:\n raise ValueError(\"Model has not been trained yet.\")\n\n weights_save_path = os.path.join(save_path, MODEL_WEIGHTS_FILE_NAME)\n self.lgb_booster.save_model(weights_save_path, num_iteration=self.lgb_booster.best_iteration)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 20, "vocab_size": 20, "complexity": 2, "nloc": 5, "token_counts": 48, "n_ast_nodes": 78, "n_identifiers": 13, "random_cut": "def save(self, save_path):\n \n if self.lgb_booster is None:\n raise ValueError(\"Model has not been trained yet.\")\n\n weights_save_path = os.path.join(save_path, MODEL_WEIGHTS_FILE_NAME)\n self.lgb_booster.save_model(weights_save_path, num_iteration=self.lgb_booster.best_iteration)" }, { "id": 130913, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/serve/http_util.py", "file_name": "http_util.py", "fun_name": "set_content_type", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def set_content_type(self, content_type):\n if content_type == \"text\":\n self.raw_headers.append([b\"content-type\", b\"text/plain\"])\n elif content_type == \"text-utf8\":\n self.raw_headers.append([b\"content-type\", b\"text/plain; charset=utf-8\"])\n elif content_type == \"json\":\n self.raw_headers.append([b\"content-type\", b\"application/json\"])\n else:\n raise ValueError(\"Invalid content type {}\".format(content_type))\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 99, "n_words": 28, "vocab_size": 21, "complexity": 4, "nloc": 9, "token_counts": 76, "n_ast_nodes": 129, "n_identifiers": 7, "random_cut": "def set_content_type(self, content_type):\n if content_type == \"text\":\n self.raw_headers.append([b\"content-type\", b\"text/plain\"])\n elif content_type == \"text-utf8\":\n self.raw_headers.append([b\"content-type\", b\"text/plain; charset=utf-8\"])\n elif content_type == \"json\":\n self.raw_headers.append([b\"cont" }, { "id": 252120, "commit_id": "8fce7c7fa3be59f5760653e9e6daccee7f13cee9", "repo": "mitmproxy", "path": "mitmproxy/addons/proxyserver.py", "file_name": "proxyserver.py", "fun_name": "load", "commit_message": "Support specifying the local address for outgoing connections. (#5366)\n\n* allow sockname to specify local-addr\r\n\r\n* set local_addr via command line\r\n\r\n* minor fix for reconfig\r\n\r\n* minor rewording\r\n\r\nCo-authored-by: Maximilian Hils ", "code": "def load(self, loader):\n loader.add_option(\n \"connection_strategy\",\n str,\n \"eager\",\n \"Determine when server connections should be established. When set to lazy, mitmproxy \"\n \"tries to defer establishing an upstream connection as long as possible. This makes it possible to \"\n \"use server replay while being offline. When set to eager, mitmproxy can detect protocols with \"\n \"server-side greetings, as well as accurately mirror TLS ALPN negotiation.\",\n choices=(\"eager\", \"lazy\"),\n )\n loader.add_option(\n \"stream_large_bodies\",\n Optional[str],\n None,\n ,\n )\n loader.add_option(\n \"body_size_limit\",\n Optional[str],\n None,\n ,\n )\n loader.add_option(\n \"keep_host_header\",\n bool,\n False,\n ,\n )\n loader.add_option(\n \"proxy_debug\",\n bool,\n False,\n \"Enable debug logs in the proxy core.\",\n )\n loader.add_option(\n \"normalize_outbound_headers\",\n bool,\n True,\n ,\n )\n loader.add_option(\n \"validate_inbound_headers\",\n bool,\n True,\n ,\n )\n loader.add_option(\n \"connect_addr\",\n Optional[str],\n None,\n ,\n )\n loader.add_option(\n \"dns_server\", bool, False, \n )\n loader.add_option(\n \"dns_listen_host\", str, \"\", \n )\n loader.add_option(\"dns_listen_port\", int, 53, )\n loader.add_option(\n \"dns_mode\",\n str,\n \"regular\",\n ,\n )\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 758, "n_words": 134, "vocab_size": 82, "complexity": 1, "nloc": 88, "token_counts": 180, "n_ast_nodes": 302, "n_identifiers": 9, "random_cut": "def load(self, loader):\n loader.add_option(\n \"connection_strategy\",\n str,\n \"eager\",\n \"Determine when server connections should be established. When set to lazy, mitmproxy \"\n \"tries to defer establishing an upstream connection as long as possible. This makes it possible to \"\n \"use server replay while being offline. When set to eager, mitmproxy can detect protocols with \"\n \"server-side greetings, as well as accurately mirror TLS ALPN negotiation.\",\n choices=(\"eager\", \"lazy\"),\n )\n loader.add_option(\n \"stream_large_bodies\",\n Optional[str],\n None,\n ,\n )\n loader.add_option(\n \"body_size_limit\",\n Optional[str],\n None,\n ,\n )\n loader.add_option(\n \"keep_host_header\",\n bool,\n False,\n ,\n )\n loader.add_option(\n \"proxy_debug\",\n bool,\n False,\n \"Enable debug logs in the proxy core.\",\n )\n loader.add_option(\n \"normalize_outbound_headers\",\n bool,\n True,\n ,\n )\n loader.add_option(\n " }, { "id": 209212, "commit_id": "0b030308134905fd9eb5f9064484e48f5312b227", "repo": "scapy", "path": "scapy/contrib/automotive/scanner/enumerator.py", "file_name": "enumerator.py", "fun_name": "execute", "commit_message": "Add count parameter to enumerators in automotive scanners (#3658)", "code": "def execute(self, socket, state, **kwargs):\n # type: (_SocketUnion, EcuState, Any) -> None\n self.check_kwargs(kwargs)\n timeout = kwargs.pop('timeout', 1)\n count = kwargs.pop('count', None)\n execution_time = kwargs.pop(\"execution_time\", 1200)\n\n state_block_list = kwargs.get('state_block_list', list())\n\n if state_block_list and state in state_block_list:\n self._state_completed[state] = True\n log_interactive.debug(\"[i] State %s in block list!\", repr(state))\n return\n\n state_allow_list = kwargs.get('state_allow_list', list())\n\n if state_allow_list and state not in state_allow_list:\n self._state_completed[state] = True\n log_interactive.debug(\"[i] State %s not in allow list!\",\n repr(state))\n return\n\n it = self._get_request_iterator(state, **kwargs)\n\n # log_interactive.debug(\"[i] Using iterator %s in state %s\", it, state)\n\n start_time = time.time()\n log_interactive.debug(\n \"[i] Start execution of enumerator: %s\", time.ctime(start_time))\n\n for req in it:\n res = self.sr1_with_retry_on_error(req, socket, state, timeout)\n\n self._store_result(state, req, res)\n\n if self._evaluate_response(state, req, res, **kwargs):\n log_interactive.debug(\"[i] Stop test_case execution because \"\n \"of response evaluation\")\n return\n\n if count is not None:\n if count <= 0:\n log_interactive.debug(\n \"[i] Finished execution count of enumerator\")\n return\n else:\n count -= 1\n\n if (start_time + execution_time) < time.time():\n log_interactive.debug(\n \"[i] Finished execution time of enumerator: %s\",\n time.ctime())\n return\n\n log_interactive.info(\"[i] Finished iterator execution\")\n self._state_completed[state] = True\n log_interactive.debug(\"[i] States completed %s\",\n repr(self._state_completed))\n\n execute.__doc__ = _supported_kwargs_doc\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 734, "n_words": 174, "vocab_size": 103, "complexity": 10, "nloc": 43, "token_counts": 286, "n_ast_nodes": 479, "n_identifiers": 31, "random_cut": "def execute(self, socket, state, **kwargs):\n # type: (_SocketUnion, EcuState, Any) -> None\n self.check_kwargs(kwargs)\n timeout = kwargs.pop('timeout', 1)\n count = kwargs.pop('count', None)\n execution_time = kwargs.pop(\"execution_time\", 1200)\n\n state_block_list = kwargs.get('state_block_list', list())\n\n if state_block_list and state in state_block_list:\n self._state_completed[state] = True\n log_interactive.debug(\"[i] State %s in block list!\", repr(state))\n return\n\n state_allow_list = kwargs.get('state_allow_list', list())\n\n if state_allow_list and state not in state_allow_list:\n self._state_completed[state] = True\n log_interactive.debug(\"[i] State %s not in allow list!\",\n repr(state))\n return\n\n it = self._get_request_iterator(state, **kwargs)\n\n # log_interactive.debug(\"[i] Using iterator %s in state %s\", it, state)\n\n start_time = time.time()\n " }, { "id": 245869, "commit_id": "79c8295801acedee0cbdbf128a01b9fe162646b0", "repo": "mmdetection", "path": "tests/test_models/test_dense_heads/test_condinst_head.py", "file_name": "test_condinst_head.py", "fun_name": "_fake_mask_feature_head", "commit_message": "[Feature]: Support Condinst (#9223)\n\n* [Feature]: support condinst for instance segmentation\r\n\r\n* update\r\n\r\n* update\r\n\r\n* update\r\n\r\n* fix config name and add test unit\r\n\r\n* fix squeeze error\r\n\r\n* add README and chang mask to poly", "code": "def _fake_mask_feature_head():\n mask_feature_head = ConfigDict(\n in_channels=1,\n feat_channels=1,\n start_level=0,\n end_level=2,\n out_channels=8,\n mask_stride=8,\n num_stacked_convs=4,\n norm_cfg=dict(type='BN', requires_grad=True))\n return mask_feature_head\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 77, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 11, "token_counts": 51, "n_ast_nodes": 74, "n_identifiers": 14, "random_cut": "def _fake_mask_feature_head():\n mask_fe" }, { "id": 318055, "commit_id": "bbd7041a73572547be49ead53b183aa1e55a6d75", "repo": "core", "path": "homeassistant/components/anthemav/media_player.py", "file_name": "media_player.py", "fun_name": "async_added_to_hass", "commit_message": "Refactor and improve anthemav (#75852)", "code": "async def async_added_to_hass(self) -> None:\n \n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n f\"{ANTHEMAV_UDATE_SIGNAL}_{self._entry_id}\",\n self.async_write_ha_state,\n )\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 100, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 9, "token_counts": 27, "n_ast_nodes": 56, "n_identifiers": 8, "random_cut": "async def async_added_to_hass(self) -> None:\n" }, { "id": 168141, "commit_id": "d6563c53e6f5b324af815af60548344a70ed9397", "repo": "pandas", "path": "pandas/tests/apply/test_series_apply.py", "file_name": "test_series_apply.py", "fun_name": "test_map_abc_mapping_with_missing", "commit_message": "CLN: Fixing invalid method signatures (#47438)", "code": "def test_map_abc_mapping_with_missing(non_dict_mapping_subclass):\n # https://github.com/pandas-dev/pandas/issues/29733\n # Check collections.abc.Mapping support as mapper for Series.map", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 4, "n_whitespaces": 17, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 11, "n_identifiers": 2, "random_cut": "def test_map_abc_mapping_with_missing(non_dict_mapping_subclass):\n # https://github.com/pandas-dev/pandas/issues/29733\n # Check col" }, { "id": 104730, "commit_id": "8f4c95630a17cb53ac908c3b50a1c1c7ef65494e", "repo": "datasets", "path": "datasets/conceptual_captions/conceptual_captions.py", "file_name": "conceptual_captions.py", "fun_name": "_split_generators", "commit_message": "Add Google Conceptual Captions Dataset (#1459)\n\n* Add Google Conceptual Captions Dataset\r\n\r\n* fix\r\n\r\n* fix\r\n\r\n* fix\r\n\r\n* Update README.md\r\n\r\n* Improve script\r\n\r\n* Generate info\r\n\r\n* Fix dummy data\r\n\r\n* Update card\r\n\r\n* Minor import fix\r\n\r\n* Minor fix in SBU Captions\r\n\r\n* Remove extra newline\r\n\r\n* Delete Image_Labels_Subset_Train_GCC-Labels-training.tsv%3F_ga%3D2.234395421.-20118413.1607637118\r\n\r\nCo-authored-by: abhishek thakur \r\nCo-authored-by: mariosasko ", "code": "def _split_generators(self, dl_manager):\n downloaded_data = dl_manager.download(_URLS[self.config.name])\n splits = [\n datasets.SplitGenerator(\n name=datasets.Split.TRAIN,\n gen_kwargs={\"annotations_file\": downloaded_data[\"train\"]},\n ),\n ]\n if self.config.name == \"unlabeled\":\n splits += [\n datasets.SplitGenerator(\n name=datasets.Split.VALIDATION,\n gen_kwargs={\"annotations_file\": downloaded_data[\"validation\"]},\n ),\n ]\n return splits\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 206, "n_words": 30, "vocab_size": 22, "complexity": 2, "nloc": 16, "token_counts": 91, "n_ast_nodes": 145, "n_identifiers": 15, "random_cut": "def _split_generators(self, dl_manager):\n downloaded_data = dl_manager.download(_URLS[self.config.name])\n splits = [\n datasets.SplitGenerator(\n name=datasets.Split.TRAIN,\n gen_kwargs={\"annotations_file\": do" }, { "id": 224189, "commit_id": "372384d8102ddb4be6360f44d1bfddb8b45435a4", "repo": "mkdocs", "path": "mkdocs/tests/structure/toc_tests.py", "file_name": "toc_tests.py", "fun_name": "test_nested_anchor", "commit_message": "Some manual changes ahead of formatting code with Black", "code": "def test_nested_anchor(self):\n md = dedent(\n \n )\n expected = dedent(\n \n )\n toc = get_toc(get_markdown_toc(md))\n self.assertEqual(str(toc).strip(), expected)\n self.assertEqual(len(toc), 2)\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 89, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 22, "token_counts": 52, "n_ast_nodes": 89, "n_identifiers": 12, "random_cut": "def test_nested_anchor(self):\n md = dedent(\n \n )\n expected = dedent(\n \n )\n toc = ge" }, { "id": 153579, "commit_id": "605efa618e7994681f57b11d04d417f353ef8d50", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "transform", "commit_message": "DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def transform(self, func, axis=0, *args, **kwargs): # noqa: PR01, RT01, D200\n \n kwargs[\"is_transform\"] = True\n self._validate_function(func)\n try:\n result = self.agg(func, axis=axis, *args, **kwargs)\n except TypeError:\n raise\n except Exception as err:\n raise ValueError(\"Transform function failed\") from err\n try:\n assert len(result) == len(self)\n except Exception:\n raise ValueError(\"transforms cannot produce aggregated results\")\n return result\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 169, "n_words": 50, "vocab_size": 42, "complexity": 4, "nloc": 14, "token_counts": 88, "n_ast_nodes": 146, "n_identifiers": 14, "random_cut": "def transform(self, func, axis=0, *args, **kwargs): # noqa: PR01, RT01, D200\n \n kwargs[\"is_transform\"] = True\n self._validate_function(func)\n try:\n result = self.agg(func, axis=axis, *args, **kwargs)\n except TypeError:\n raise\n except Exception as err:\n raise ValueError(\"Transform function failed\") from err\n try:\n assert len(result) == len(self)\n except Exception:\n raise ValueError(\"transforms cannot produce aggregated results\")\n " }, { "id": 243085, "commit_id": "317286d260488e1e60a3d25b4d6c3fce83ba12f8", "repo": "Pillow", "path": "Tests/test_file_pcx.py", "file_name": "test_file_pcx.py", "fun_name": "test_sanity", "commit_message": "Pad palette to 768 bytes", "code": "def test_sanity(tmp_path):\n for mode in (\"1\", \"L\", \"P\", \"RGB\"):\n _roundtrip(tmp_path, hopper(mode))\n\n # Test a palette with less than 256 colors\n im = Image.new(\"P\", (1, 1))\n im.putpalette((255, 0, 0))\n _roundtrip(tmp_path, im)\n\n # Test an unsupported mode\n f = str(tmp_path / \"temp.pcx\")\n im = hopper(\"RGBA\")\n with pytest.raises(ValueError):\n im.save(f)\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 86, "n_words": 46, "vocab_size": 38, "complexity": 2, "nloc": 10, "token_counts": 87, "n_ast_nodes": 149, "n_identifiers": 15, "random_cut": "def test_sanity(tmp_path):\n for mode in (\"1\", \"L\", \"P\", \"RGB\"):\n _roundtrip(tmp_path, hopper(mode))\n\n # Test a palette with less than 256 colors\n im = Image.new(\"P\", (1, 1))\n" }, { "id": 277925, "commit_id": "406774b60ac6b505ae9bf7e8728b00a1523ad4a3", "repo": "keras", "path": "keras/optimizers/optimizer_v1.py", "file_name": "optimizer_v1.py", "fun_name": "get_updates", "commit_message": "resolve line-too-long in optimizer", "code": "def get_updates(self, loss, params):\n if tf.distribute.has_strategy():\n self.updates = []\n\n if not params:\n # After the model vars have been created, the second call to\n # get_updates is called with params as an empty list. This\n # ensures that we call compute_gradients with params=None.\n grads = self.optimizer.compute_gradients(loss)\n else:\n grads = self.optimizer.compute_gradients(loss, params)\n global_step = tf.compat.v1.train.get_global_step()\n opt_update = self.optimizer.apply_gradients(grads, global_step)\n else:\n if not params:\n self.updates = [tf.compat.v1.assign_add(self.iterations, 1)]\n return self.updates\n\n # Updates list starts out empty because the iterations variable is\n # incremented in optimizer.apply_gradients()\n self.updates = []\n grads = self.optimizer.compute_gradients(loss, params)\n opt_update = self.optimizer.apply_gradients(\n grads, global_step=self.iterations\n )\n\n self.updates.append(opt_update)\n return self.updates\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 378, "n_words": 99, "vocab_size": 65, "complexity": 4, "nloc": 20, "token_counts": 153, "n_ast_nodes": 245, "n_identifiers": 21, "random_cut": "def get_updates(self, loss, params):\n if tf.distribute.has_strategy():\n self.updates = []\n\n if not params:\n # After the model vars have been created, the second call to\n # get_updates is called with params as an empty list. This\n # ensures that we call compute_gradi" }, { "id": 305059, "commit_id": "481205535c3230385573df12ec2814be2d7468dd", "repo": "core", "path": "tests/components/prusalink/conftest.py", "file_name": "conftest.py", "fun_name": "mock_printer_api", "commit_message": "Add PrusaLink integration (#77429)\n\nCo-authored-by: Martin Hjelmare ", "code": "def mock_printer_api(hass):\n \n resp = {\n \"telemetry\": {\n \"temp-bed\": 41.9,\n \"temp-nozzle\": 47.8,\n \"print-speed\": 100,\n \"z-height\": 1.8,\n \"material\": \"PLA\",\n },\n \"temperature\": {\n \"tool0\": {\"actual\": 47.8, \"target\": 0.0, \"display\": 0.0, \"offset\": 0},\n \"bed\": {\"actual\": 41.9, \"target\": 0.0, \"offset\": 0},\n },\n \"state\": {\n \"text\": \"Operational\",\n \"flags\": {\n \"operational\": True,\n \"paused\": False,\n \"printing\": False,\n \"cancelling\": False,\n \"pausing\": False,\n \"sdReady\": False,\n \"error\": False,\n \"closedOnError\": False,\n \"ready\": True,\n \"busy\": False,\n },\n },\n }\n with patch(\"pyprusalink.PrusaLink.get_printer\", return_value=resp):\n yield resp\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 392, "n_words": 72, "vocab_size": 48, "complexity": 1, "nloc": 31, "token_counts": 158, "n_ast_nodes": 267, "n_identifiers": 7, "random_cut": "def mock_printer_api(hass):\n \n resp = {\n \"telemetry\": {\n \"temp-bed\": 41.9" }, { "id": 164992, "commit_id": "03fef5f0e35200aa5828975b62782bcf11faa0d2", "repo": "pandas", "path": "pandas/tests/plotting/test_hist_method.py", "file_name": "test_hist_method.py", "fun_name": "test_hist_layout", "commit_message": "TST: Clean tests/plotting (#45992)", "code": "def test_hist_layout(self, hist_df):\n df = hist_df\n msg = \"The 'layout' keyword is not supported when 'by' is None\"\n with pytest.raises(ValueError, match=msg):\n df.height.hist(layout=(1, 1))\n\n with pytest.raises(ValueError, match=msg):\n df.height.hist(layout=[1, 1])\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 28, "vocab_size": 23, "complexity": 1, "nloc": 7, "token_counts": 65, "n_ast_nodes": 105, "n_identifiers": 12, "random_cut": "def test_hist_layout(self, hist_df):\n df = hist_df\n msg = \"The 'layout' keyword is not supported when 'by' is None\"\n with pytest.raises(Val" }, { "id": 97136, "commit_id": "876a24001e607b14f8d87e5999616dd6a0e3dcb8", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_spans_histogram.py", "file_name": "test_organization_events_spans_histogram.py", "fun_name": "setUp", "commit_message": "feat(perf): Add base spans histogram endpoint (#32529)\n\n* feat(perf): Add events spans histogram url\r\n\r\n* feat(spans): Add spans histogram endpoint\r\n\r\n* fix typos\r\n\r\n* Add spans histogram query\r\n\r\n* clean up the spans histogram query\r\n\r\n* clean up test data\r\n\r\n* make endpoint private\r\n\r\n* add base tests\r\n\r\n* udpate tests\r\n\r\n* exclude spans histogram endpoint from the public list\r\n\r\n* make endpoint private\r\n\r\n* update codeowners\r\n\r\n* use spans.exclusive_time in SnQL Function for spans histograms\r\n\r\n* clear selected_columns\r\n\r\n* document spans histogram query params\r\n\r\n* handle quotes and backslashes in span ops\r\n\r\n* rename args in span histogram snql function\r\n\r\n* add proper tests\r\n\r\n* add spans histogram function to the query builder\r\n\r\n* more tests", "code": "def setUp(self):\n super().setUp()\n self.features = {}\n self.login_as(user=self.user)\n self.org = self.create_organization(owner=self.user)\n self.project = self.create_project(organization=self.org)\n self.url = reverse(\n self.URL,\n kwargs={\"organization_slug\": self.org.slug},\n )\n\n self.min_ago = before_now(minutes=1).replace(microsecond=0)\n self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 113, "n_words": 29, "vocab_size": 24, "complexity": 1, "nloc": 12, "token_counts": 125, "n_ast_nodes": 197, "n_identifiers": 27, "random_cut": "def setUp(self):\n super().setUp()\n self.features = {}\n self.login_as(user=self.user)\n self.org = self.create_organization(owner=self.user)\n self.project = self.create_project(organization=self.org)\n self.url = reverse(\n self.URL,\n kwargs={\"organization_slug\": self.org.slug},\n )\n\n " }, { "id": 211923, "commit_id": "ada85ff1dc6dc1d5857141b3202733870de5c809", "repo": "bokeh", "path": "bokeh/colors/color.py", "file_name": "color.py", "fun_name": "brightness", "commit_message": "Bump min sphinx version (#11973)\n\n* Bump min sphinx version\r\n\r\n* checkpoint\r\n\r\n* comment for fully qualified names", "code": "def brightness(self) -> float:\n \n # http://alienryderflex.com/hsp.html\n r, g, b = self.r, self.g, self.b\n return sqrt(0.299*r**2 + 0.587*g**2 + 0.114*b**2)/255\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 47, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 4, "token_counts": 54, "n_ast_nodes": 76, "n_identifiers": 7, "random_cut": "def brightness(self) -> float:\n \n # http://alienryderflex.com/hsp.html\n r, g, b = self.r, self.g, self.b\n return sqrt(0.299*r**2 + 0.587*g**2 + 0.114*b**2)/255\n" }, { "id": 75491, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/backends/database/mysql/mysql.py", "file_name": "mysql.py", "fun_name": "reset_index", "commit_message": "Reformat with black", "code": "def reset_index(self):\n for connection in [\n connection\n for connection in connections.all()\n if connection.vendor == \"mysql\"\n ]:\n IndexEntry._default_manager.using(connection.alias).delete()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 17, "vocab_size": 13, "complexity": 4, "nloc": 7, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 11, "random_cut": "def reset_index(self):\n for connection in [\n connection\n for connection in connections.all()\n if connecti" }, { "id": 223337, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/test_unixccompiler.py", "file_name": "test_unixccompiler.py", "fun_name": "setUp", "commit_message": "add python 3.10.4 for windows", "code": "def setUp(self):\n self._backup_platform = sys.platform\n self._backup_get_config_var = sysconfig.get_config_var\n self._backup_config_vars = dict(sysconfig._config_vars)", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 11, "vocab_size": 9, "complexity": 1, "nloc": 7, "token_counts": 44, "n_ast_nodes": 47, "n_identifiers": 11, "random_cut": "def setUp(self):\n self._backup_platform = sys.platform\n self._backup_get_config_var = sysconfig.get_config_var\n self._backup_config_vars = dict(sy" }, { "id": 56649, "commit_id": "e89deece8df49321ef8e5bdc5a3798ea7a7ac1f2", "repo": "prefect", "path": "tests/orion/api/test_flow_run_notification_policies.py", "file_name": "test_flow_run_notification_policies.py", "fun_name": "test_create_policy_with_message", "commit_message": "Enable custom message templates", "code": "async def test_create_policy_with_message(self, client, notifier_block):\n response = await client.post(\n \"/flow_run_notification_policies/\",\n json=dict(\n schemas.actions.FlowRunNotificationPolicyCreate(\n name=\"My Success Policy\",\n state_names=[\"Completed\"],\n tags=[],\n block_document_id=notifier_block.id,\n message_template=\"Hello there {flow_run_name}\",\n ).dict(json_compatible=True),\n ),\n )\n policy = FlowRunNotificationPolicy.parse_obj(response.json())\n assert policy.message_template == \"Hello there {flow_run_name}\"\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 218, "n_words": 33, "vocab_size": 31, "complexity": 1, "nloc": 15, "token_counts": 83, "n_ast_nodes": 134, "n_identifiers": 21, "random_cut": "async def test_create_policy_with_message(self, client, notifier_block):\n response = await client.post(\n \"/flow_run_notification_policies/\",\n json=dict(\n schemas.actions.FlowRunNotificationPolicyCreate(\n name=\"My Success Policy\",\n state_names=[\"Completed\"],\n tags=[],\n " }, { "id": 163009, "commit_id": "3dfed3fcd552dcbf4daf7f78c82a87638f896512", "repo": "pandas", "path": "pandas/tests/io/test_sql.py", "file_name": "test_sql.py", "fun_name": "test_keyword_as_column_names", "commit_message": "ENH: to_sql returns rowcount (#45137)", "code": "def test_keyword_as_column_names(self):\n df = DataFrame({\"From\": np.ones(5)})\n assert sql.to_sql(df, con=self.conn, name=\"testkeywords\", index=False) == 5\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 26, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 43, "n_ast_nodes": 69, "n_identifiers": 12, "random_cut": "def test_keyword_as_column_names(self):\n df = DataFrame({\"From\": np.ones(5)})\n assert sql.to_sql(df, con=self.conn, name=\"testkeywords\", index=Fal" }, { "id": 186272, "commit_id": "765dd138c74d754a599a46aab043d19a78dec657", "repo": "textual", "path": "tests/test_visibility_change.py", "file_name": "test_visibility_change.py", "fun_name": "compose", "commit_message": "Fix changes to visibility needing an explicit refresh to take effect\n\nFixes #1355.", "code": "def compose(self) -> ComposeResult:\n yield Vertical(\n Widget(id=\"keep\"), Widget(id=\"hide-via-code\"), Widget(id=\"hide-via-css\")\n )\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 34, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 6, "random_cut": "def compose(self) -> ComposeResult:\n yield " }, { "id": 2395, "commit_id": "58da94aae4e66576a7f40b55f2de2d69693bfe20", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/node_manager/user_manager.py", "file_name": "user_manager.py", "fun_name": "get_all_applicant", "commit_message": "added user application manager,\nreverted default signup on user creation", "code": "def get_all_applicant(self) -> List[NoSQLUserApplication]:\n \n return self.user_application_manager.all()\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 19, "n_ast_nodes": 33, "n_identifiers": 6, "random_cut": "def get_all_applicant(self) -> List[NoSQLUserApplication]:\n \n return" }, { "id": 69623, "commit_id": "7d09440579da5865f8b6832829527b3aeab92f85", "repo": "erpnext", "path": "erpnext/assets/doctype/asset_depreciation_schedule/asset_depreciation_schedule.py", "file_name": "asset_depreciation_schedule.py", "fun_name": "cancel_asset_depr_schedules", "commit_message": "chore: more refactoring", "code": "def cancel_asset_depr_schedules(asset_doc):\n\tfor row in asset_doc.get(\"finance_books\"):\n\t\tasset_depr_schedule_name = get_asset_depr_schedule_name(asset_doc.name, row.finance_book)\n\n\t\tif not asset_depr_schedule_name:\n\t\t\treturn\n\n\t\tasset_depr_schedule_doc = frappe.get_doc(\"Asset Depreciation Schedule\", asset_depr_schedule_name)\n\n\t\tasset_depr_schedule_doc.cancel()\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 14, "n_words": 21, "vocab_size": 20, "complexity": 3, "nloc": 7, "token_counts": 47, "n_ast_nodes": 79, "n_identifiers": 12, "random_cut": "def cancel_asset_depr_schedules(asset_doc):\n\tfor row in asset_doc.get(\"finance_books\"):\n\t\tasset_depr_schedule_name = get_asset_depr_schedule_name(asset_doc.name, row.finance_book)\n\n\t\tif not asset_depr_schedule_name:\n\t\t\treturn\n\n\t\tasset_depr_schedule_do" }, { "id": 147137, "commit_id": "909cdea3cdbebb11ea2e62355b99f8bc3008c3ac", "repo": "ray", "path": "python/ray/_private/import_thread.py", "file_name": "import_thread.py", "fun_name": "_do_importing", "commit_message": "[Python Worker] add feature flag to support forking from workers (#23260)\n\nMake sure Python dependencies can be imported on demand, without the background importer thread. Use cases are:\r\n\r\nIf the pubsub notification for a new export is lost, importing can still be done.\r\nAllow not running the background importer thread, without affecting Ray's functionalities.\r\nAdd a feature flag to support forking from Python workers, by\r\n\r\nEnable fork support in gRPC.\r\nDisable importer thread and only leave the main thread in the Python worker. The importer thread will not run after forking anyway.", "code": "def _do_importing(self):\n while True:\n with self._lock:\n export_key = ray._private.function_manager.make_export_key(\n self.num_imported + 1, self.worker.current_job_id\n )\n key = self.gcs_client.internal_kv_get(\n export_key, ray_constants.KV_NAMESPACE_FUNCTION_TABLE\n )\n if key is not None:\n self._process_key(key)\n self.num_imported += 1\n else:\n break\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 233, "n_words": 31, "vocab_size": 27, "complexity": 3, "nloc": 14, "token_counts": 69, "n_ast_nodes": 111, "n_identifiers": 17, "random_cut": "def _do_importing(self):\n while True:\n with self._lock:\n export_key = ray._private.function_manager.ma" }, { "id": 312310, "commit_id": "8245ff7473a4242faad007a53f610d9dc98b46b0", "repo": "core", "path": "tests/components/mqtt/test_init.py", "file_name": "test_init.py", "fun_name": "test_debug_info_filter_same", "commit_message": "Log transmitted MQTT messages (#65550)", "code": "async def test_debug_info_filter_same(hass, mqtt_mock):\n \n config = {\n \"device\": {\"identifiers\": [\"helloworld\"]},\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"sensor/#\",\n \"unique_id\": \"veryunique\",\n }\n\n registry = dr.async_get(hass)\n\n data = json.dumps(config)\n async_fire_mqtt_message(hass, \"homeassistant/sensor/bla/config\", data)\n await hass.async_block_till_done()\n\n device = registry.async_get_device({(\"mqtt\", \"helloworld\")})\n assert device is not None\n\n debug_info_data = debug_info.info_for_device(hass, device.id)\n assert len(debug_info_data[\"entities\"][0][\"subscriptions\"]) >= 1\n assert {\"topic\": \"sensor/#\", \"messages\": []} in debug_info_data[\"entities\"][0][\n \"subscriptions\"\n ]\n\n dt1 = datetime(2019, 1, 1, 0, 0, 0)\n dt2 = datetime(2019, 1, 1, 0, 0, 1)\n with patch(\"homeassistant.util.dt.utcnow\") as dt_utcnow:\n dt_utcnow.return_value = dt1\n async_fire_mqtt_message(hass, \"sensor/abc\", \"123\")\n async_fire_mqtt_message(hass, \"sensor/abc\", \"123\")\n dt_utcnow.return_value = dt2\n async_fire_mqtt_message(hass, \"sensor/abc\", \"123\")\n\n debug_info_data = debug_info.info_for_device(hass, device.id)\n assert len(debug_info_data[\"entities\"][0][\"subscriptions\"]) == 1\n assert len(debug_info_data[\"entities\"][0][\"subscriptions\"][0][\"messages\"]) == 2\n assert {\n \"topic\": \"sensor/#\",\n \"messages\": [\n {\n \"payload\": \"123\",\n \"qos\": 0,\n \"retain\": False,\n \"time\": dt1,\n \"topic\": \"sensor/abc\",\n },\n {\n \"payload\": \"123\",\n \"qos\": 0,\n \"retain\": False,\n \"time\": dt2,\n \"topic\": \"sensor/abc\",\n },\n ],\n } == debug_info_data[\"entities\"][0][\"subscriptions\"][0]\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 492, "n_words": 137, "vocab_size": 78, "complexity": 1, "nloc": 49, "token_counts": 333, "n_ast_nodes": 580, "n_identifiers": 25, "random_cut": "async def test_debug_info_filter_same(hass, mqtt_mock):\n \n config = {\n \"device\": {\"identifiers\": [\"helloworld\"]},\n \"platform\": \"mqtt\",\n \"name\": \"test\",\n \"state_topic\": \"sensor/#\",\n \"unique_id\": \"veryunique\",\n }\n\n registry = dr.async_get(hass)\n\n data = json.dumps(config)\n async_fire_mqtt_message(hass, \"homeassistant/sensor/bla/config\", data)\n await hass.async_block_till_done()\n\n device = registry.async_get_device({(\"mqtt\", \"helloworld\")})\n assert device is not None\n\n debug_info_data = debug_info.info_for_device(hass, device.id)\n assert len(debug_info_data[\"entities\"][0][\"subscriptions\"]) >= 1\n assert {\"topic\": \"sensor/#\", \"messages\": []} in debug_info_data[\"entities\"][0][\n \"subscriptions\"\n ]\n\n dt1 = datetime(2019, 1, 1, 0, 0, 0)\n dt2 = datetime(2019, 1, 1, 0, 0, 1)\n with patch(\"homeassistant.util.dt.utcnow\") as dt_utcnow:\n dt_utcnow.return_value = dt1\n async_fire_mqtt_message(hass, \"sensor/abc\", \"123\")\n async_fire_mqtt_message(hass, \"sensor/abc\", \"123\")\n dt_utcnow.return_value = dt2\n async_fire_mqtt_message(hass, \"sensor/abc\", \"123\")\n\n debug_info_data = debug_info.info_for_device(hass, device.id)\n assert len(debug_info_data[\"entities\"][0][\"subscriptions\"]) == 1\n assert len(debug_info_data[\"entities\"][0][\"subscriptions\"][0][\"messages\"]) == 2\n assert {\n \"topic\": \"sensor/#\",\n \"messages\": [\n {\n \"payload\": \"123\",\n \"qos\": 0,\n \"retain\": False,\n \"time\": dt1,\n \"topic\": \"sensor/abc\",\n },\n {\n " }, { "id": 266344, "commit_id": "97104f1221b64ef36cf42cb90c5a0eff263a2adb", "repo": "ansible", "path": "test/units/parsing/test_dataloader.py", "file_name": "test_dataloader.py", "fun_name": "test_get_real_file_not_a_path", "commit_message": "Avoid deprecated TestCase functions in unit tests. (#76678)\n\n* Avoid deprecated TestCase functions in unit tests.\r\n* Add assertRaisesRegex for Python 2.7.\r\n* Fix indentation.", "code": "def test_get_real_file_not_a_path(self):\n self.assertRaisesRegex(AnsibleParserError, 'Invalid filename', self._loader.get_real_file, None)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 33, "n_identifiers": 6, "random_cut": "def test_get_real_file_not_a_path(self):\n self.assertRaisesRegex(AnsibleParserError, 'Invalid filename', self._loader.get_real_file, None)\n" }, { "id": 97160, "commit_id": "358b3618ff9fba04d12be3b5077b85b475e1a9ba", "repo": "sentry", "path": "tests/sentry/middleware/test_access_log_middleware.py", "file_name": "test_access_log_middleware.py", "fun_name": "test_concurrent_request_finishes", "commit_message": "ref(ratelimits): surface concurrent requests to access logs (#32301)\n\n### Context\r\nAll context on this project and reasoning for the implementation can be found [here](https://www.notion.so/sentry/Concurrent-Rate-Limiter-ebd5b7562aed4d01b102d83774a06b11)\r\n\r\nThis is the first step of concurrent rate limits in sentry. In order to understand what to rate limit by, we fist need to understand what the concurrent usage of our API is. \r\n\r\n### This PR does the following:\r\n\r\n* Implements the concurrent rate limiting mechanism which will be used in sentry\r\n* Surfaces the data to the access logs\r\n* Does NOT enforce any concurrent rate limits\r\n* Surfaces concurrent limit data in headers\r\n\r\nSome unrelated but otherwise important things:\r\n* Makes the rate limit middleware fail open. As it is, an exception in that middleware would shut down the request", "code": "def test_concurrent_request_finishes(self):\n self._caplog.set_level(logging.INFO, logger=\"api.access\")\n self.get_success_response()\n # these requests were done in succession, so we should not have any\n # rate limiting\n self.assert_access_log_recorded()\n assert self.captured_logs[0].token_type == \"None\"\n assert self.captured_logs[0].concurrent_requests == \"1\"\n assert self.captured_logs[0].concurrent_limit == \"1\"\n assert self.captured_logs[0].rate_limit_type == \"RateLimitType.NOT_LIMITED\"\n self.get_success_response()\n assert self.captured_logs[1].token_type == \"None\"\n assert self.captured_logs[1].concurrent_requests == \"1\"\n assert self.captured_logs[1].concurrent_limit == \"1\"\n assert self.captured_logs[1].rate_limit_type == \"RateLimitType.NOT_LIMITED\"\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 152, "n_words": 55, "vocab_size": 34, "complexity": 1, "nloc": 13, "token_counts": 122, "n_ast_nodes": 205, "n_identifiers": 14, "random_cut": "def test_concurrent_request_finishes(self):\n " }, { "id": 36204, "commit_id": "ee27b3d7df397a44dc88324e5aa639a20bf67e53", "repo": "transformers", "path": "tests/generation/test_generation_flax_logits_process.py", "file_name": "test_generation_flax_logits_process.py", "fun_name": "test_temperature_dist_warper", "commit_message": "Replace all deprecated `jax.ops` operations with jnp's `at` (#16078)\n\n* Replace all deprecated `jax.ops` operations with jnp's `at`\r\n\r\n* np to jnp scores\r\n\r\n* suggested changes", "code": "def test_temperature_dist_warper(self):\n input_ids = None\n length = 20\n\n scores = self._get_uniform_logits(batch_size=2, length=length)\n\n # tweak scores to not be uniform anymore\n scores = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch\n scores = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch\n\n # compute softmax\n probs = jax.nn.softmax(scores, axis=-1)\n\n temp_dist_warper_sharper = FlaxTemperatureLogitsWarper(temperature=0.5)\n temp_dist_warper_smoother = FlaxTemperatureLogitsWarper(temperature=1.3)\n\n warped_prob_sharp = jax.nn.softmax(temp_dist_warper_sharper(input_ids, scores.copy(), cur_len=None), axis=-1)\n warped_prob_smooth = jax.nn.softmax(temp_dist_warper_smoother(input_ids, scores.copy(), cur_len=None), axis=-1)\n\n # uniform distribution stays uniform\n self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3))\n self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3))\n\n # sharp peaks get higher, valleys get lower\n self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max())\n self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min())\n\n # smooth peaks get lower, valleys get higher\n self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max())\n self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min())\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 264, "n_words": 116, "vocab_size": 69, "complexity": 1, "nloc": 17, "token_counts": 324, "n_ast_nodes": 480, "n_identifiers": 30, "random_cut": "def test_temperature_dist_warper(self):\n input_ids = None\n length = 20\n\n scores = self._get_unifor" }, { "id": 94708, "commit_id": "cdca9910c03df835f35dd02a09120fdbd96df22b", "repo": "sentry", "path": "src/sentry/utils/performance_issues/performance_detection.py", "file_name": "performance_detection.py", "fun_name": "_is_blocking_render", "commit_message": "feat(perf): Add detection for render-blocking asset performance issues (#37826)\n\n* feat(perf): Add detection for render-blocking asset performance issues\r\n\r\nTag transactions that have slow asset load spans before a slow FCP as having\r\nrender-blocking assets. The thresholds are configurable, but currently we're\r\nlooking for transactions with an FCP between 2s and 10s, where an asset load\r\ntakes up at least 25% of that time.\r\n\r\nThe thresholds will be tuned before we start generating\r\nactual Performance Issues from this data - tagging the transactions will let us\r\nsee what we're detecting it and validate/tune it before it becomes visible to\r\nusers.\r\n\r\nThis detector's use of event properties is a little awkward given the current\r\n`PerformanceDetector` interface, but I thought it would be better to get the\r\nrest of our planned detectors in before we refactor too much.\r\n\r\nFixes PERF-1677", "code": "def _is_blocking_render(self, span):\n span_end_timestamp = timedelta(seconds=span.get(\"timestamp\", 0))\n fcp_timestamp = self.transaction_start + self.fcp\n if span_end_timestamp >= fcp_timestamp:\n return False\n\n span_duration = get_span_duration(span)\n fcp_ratio_threshold = self.settings.get(\"fcp_ratio_threshold\")\n return span_duration / self.fcp > fcp_ratio_threshold\n\n\n# Reports metrics and creates spans for detection", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 89, "n_words": 38, "vocab_size": 30, "complexity": 2, "nloc": 8, "token_counts": 62, "n_ast_nodes": 102, "n_identifiers": 14, "random_cut": "def _is_blocking_render(self, span):\n " }, { "id": 288172, "commit_id": "768b83139fe9704c35017f7dd8f9322cb38bc0c5", "repo": "core", "path": "tests/components/bayesian/test_binary_sensor.py", "file_name": "test_binary_sensor.py", "fun_name": "test_sensor_numeric_state", "commit_message": "Add to issue registry if user has mirrored entries for breaking in #67631 (#79208)\n\nCo-authored-by: Diogo Gomes ", "code": "async def test_sensor_numeric_state(hass):\n \n config = {\n \"binary_sensor\": {\n \"platform\": \"bayesian\",\n \"name\": \"Test_Binary\",\n \"observations\": [\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"below\": 10,\n \"above\": 5,\n \"prob_given_true\": 0.7,\n \"prob_given_false\": 0.4,\n },\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored1\",\n \"below\": 7,\n \"above\": 5,\n \"prob_given_true\": 0.9,\n \"prob_given_false\": 0.2,\n },\n ],\n \"prior\": 0.2,\n }\n }\n\n assert await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n hass.states.async_set(\"sensor.test_monitored\", 6)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert abs(state.attributes.get(\"probability\") - 0.304) < 0.01\n # A = sensor.test_binary being ON\n # B = sensor.test_monitored in the range [5, 10]\n # Bayes theorum is P(A|B) = P(B|A) * P(A) / P(B|A)*P(A) + P(B|~A)*P(~A).\n # Where P(B|A) is prob_given_true and P(B|~A) is prob_given_false\n # Calculated using P(A) = 0.2, P(B|A) = 0.7, P(B|~A) = 0.4 -> 0.30\n\n hass.states.async_set(\"sensor.test_monitored\", 4)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert abs(state.attributes.get(\"probability\") - 0.111) < 0.01\n # As abve but since the value is equal to 4 then this is a negative observation (~B) where P(~B) == 1 - P(B) because B is binary\n # We therefore want to calculate P(A|~B) so we use P(~B|A) (1-0.7) and P(~B|~A) (1-0.4)\n # Calculated using bayes theorum where P(A) = 0.2, P(~B|A) = 1-0.7 (as negative observation), P(~B|notA) = 1-0.4 -> 0.11\n\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", 6)\n await hass.async_block_till_done()\n hass.states.async_set(\"sensor.test_monitored1\", 6)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n assert state.attributes.get(\"observations\")[0][\"prob_given_true\"] == 0.7\n assert state.attributes.get(\"observations\")[1][\"prob_given_true\"] == 0.9\n assert state.attributes.get(\"observations\")[1][\"prob_given_false\"] == 0.2\n assert abs(state.attributes.get(\"probability\") - 0.663) < 0.01\n # Here we have two positive observations as both are in range. We do a 2-step bayes. The output of the first is used as the (updated) prior in the second.\n # 1st step P(A) = 0.2, P(B|A) = 0.7, P(B|notA) = 0.4 -> 0.304\n # 2nd update: P(A) = 0.304, P(B|A) = 0.9, P(B|notA) = 0.2 -> 0.663\n\n assert state.state == \"on\"\n\n hass.states.async_set(\"sensor.test_monitored1\", 0)\n await hass.async_block_till_done()\n hass.states.async_set(\"sensor.test_monitored\", 4)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n assert abs(state.attributes.get(\"probability\") - 0.0153) < 0.01\n # Calculated using bayes theorum where P(A) = 0.2, P(~B|A) = 0.3, P(~B|notA) = 0.6 -> 0.11\n # 2nd update: P(A) = 0.111, P(~B|A) = 0.1, P(~B|notA) = 0.8\n\n assert state.state == \"off\"\n\n hass.states.async_set(\"sensor.test_monitored\", 15)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.state == \"off\"\n\n assert len(async_get(hass).issues) == 0\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 898, "n_words": 367, "vocab_size": 174, "complexity": 1, "nloc": 65, "token_counts": 472, "n_ast_nodes": 795, "n_identifiers": 14, "random_cut": "async def test_sensor_numeric_state(hass):\n \n config = {\n \"binary_sensor\": {\n \"platform\": \"bayesian\",\n \"name\": \"Test_Binary\",\n \"observations\": [\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"below\": 10,\n \"above\": 5,\n \"prob_given_true\": 0.7,\n \"prob_given_false\": 0.4,\n },\n {\n \"platform\": \"numeric_state\",\n \"entity_id\": \"sensor.test_monitored1\",\n \"below\": 7,\n \"above\": 5,\n \"prob_given_true\": 0.9,\n \"prob_given_false\": 0.2,\n },\n ],\n \"prior\": 0.2,\n }\n }\n\n assert await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n hass.states.async_set(\"sensor.test_monitored\", 6)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_observation_entities\") == [\n \"sensor.test_monitored\"\n ]\n assert abs(state.attributes.get(\"probability\") - 0.304) < 0.01\n # A = sensor.test_binary being ON\n # B = sensor.test_monitored in the range [5, 10]\n # Bayes theorum is P(A|B) = P(B|A) * P(A) / P(B|A)*P(A) + P(B|~A)*P(~A).\n # Where P(B|A) is prob_given_true and P(B|~A) is prob_given_false\n # Calculated using P(A) = 0.2, P(B|A) = 0.7, P(B|~A) = 0.4 -> 0.30\n\n hass.states.async_set(\"sensor.test_monitored\", 4)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.test_binary\")\n\n assert state.attributes.get(\"occurred_obser" }, { "id": 167117, "commit_id": "a93e8df36a6c146dfa71057ca68971b181588a7e", "repo": "pandas", "path": "pandas/tests/arrays/test_datetimes.py", "file_name": "test_datetimes.py", "fun_name": "reso", "commit_message": "ENH: support non-nano in DTA._box_func (#47299)\n\n* ENH: support reso in DTA._box_func\r\n\r\n* mypy fixup", "code": "def reso(self, unit):\n \n return {\n \"s\": NpyDatetimeUnit.NPY_FR_s.value,\n \"ms\": NpyDatetimeUnit.NPY_FR_ms.value,\n \"us\": NpyDatetimeUnit.NPY_FR_us.value,\n }[unit]\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 66, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 38, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "def reso(self, unit):\n \n return {\n \"s\": NpyDatetimeUnit.NPY_FR_s.value,\n" }, { "id": 58752, "commit_id": "281526fad8d8269463f9d6dafc1ff931d4bc1115", "repo": "prefect", "path": "src/prefect/orion/database/query_components.py", "file_name": "query_components.py", "fun_name": "_join_flow_run_to_work_queue", "commit_message": "Continue refining queries", "code": "def _join_flow_run_to_work_queue(self, flow_run, work_queue):\n \n return sa.and_(flow_run.work_queue_name == work_queue.name)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 8, "random_cut": "def _join_flow_run_to_work_queue(self, flow_run, work_queue):\n \n return sa.and_(flow_run.work_queue_name == work_queue.name)\n" }, { "id": 127226, "commit_id": "d0678b80edf7ab50ffde93c13f4a8cdbd7dbba99", "repo": "ray", "path": "python/ray/train/tests/test_backend.py", "file_name": "test_backend.py", "fun_name": "test_train_failure", "commit_message": "[rfc] [air/tune/train] Improve trial/training failure error printing (#27946)\n\nWhen training fails, the console output is currently cluttered with tracebacks which are hard to digest. This problem is exacerbated when running multiple trials in a tuning run.\r\n\r\nThe main problems here are:\r\n\r\n1. Tracebacks are printed multiple times: In the remote worker and on the driver\r\n2. Tracebacks include many internal wrappers\r\n\r\nThe proposed solution for 1 is to only print tracebacks once (on the driver) or never (if configured).\r\n\r\nThe proposed solution for 2 is to shorten the tracebacks to include mostly user-provided code.\r\n\r\n### Deduplicating traceback printing\r\n\r\nThe solution here is to use `logger.error` instead of `logger.exception` in the `function_trainable.py` to avoid printing a traceback in the trainable. \r\n\r\nAdditionally, we introduce an environment variable `TUNE_PRINT_ALL_TRIAL_ERRORS` which defaults to 1. If set to 0, trial errors will not be printed at all in the console (only the error.txt files will exist).\r\n\r\nTo be discussed: We could also default this to 0, but I think the expectation is to see at least some failure output in the console logs per default.\r\n\r\n### Removing internal wrappers from tracebacks\r\n\r\nThe solution here is to introcude a magic local variable `_ray_start_tb`. In two places, we use this magic local variable to reduce the stacktrace. A utility `shorten_tb` looks for the last occurence of `_ray_start_tb` in the stacktrace and starts the traceback from there. This takes only linear time. If the magic variable is not present, the full traceback is returned - this means that if the error does not come up in user code, the full traceback is returned, giving visibility in possible internal bugs. Additionally there is an env variable `RAY_AIR_FULL_TRACEBACKS` which disables traceback shortening.\r\n\r\nSigned-off-by: Kai Fricke ", "code": "def test_train_failure(ray_start_2_cpus):\n config = TestConfig()\n e = BackendExecutor(config, num_workers=2)\n e.start()\n\n with pytest.raises(StartTraceback) as exc:\n e.get_next_results()\n assert isinstance(exc.value.__cause__, TrainBackendError)\n\n with pytest.raises(StartTraceback) as exc:\n e.pause_reporting()\n assert isinstance(exc.value.__cause__, TrainBackendError)\n\n with pytest.raises(StartTraceback) as exc:\n e.finish_training()\n assert isinstance(exc.value.__cause__, TrainBackendError)\n\n e.start_training(lambda: 1, dataset_spec=EMPTY_RAY_DATASET_SPEC)\n\n with pytest.raises(StartTraceback) as exc:\n e.start_training(lambda: 2, dataset_spec=EMPTY_RAY_DATASET_SPEC)\n assert isinstance(exc.value.__cause__, TrainBackendError)\n\n assert e.finish_training() == [1, 1]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 52, "vocab_size": 26, "complexity": 1, "nloc": 18, "token_counts": 160, "n_ast_nodes": 263, "n_identifiers": 22, "random_cut": "def test_train_failure(ray_start_2_cpus):\n config = TestConfig()\n e = BackendExecutor(config, num_workers=2)\n e.start()\n\n with pytest.raises(StartTraceback) as exc:\n e.get_next_results()\n assert isinstance(exc.value.__cause__, TrainBackendError)\n\n with pytest.raises(StartTraceback) as exc:\n e.pause_reporting()\n assert isinstance(exc.value.__cause__, TrainBackendError)\n\n with pytest.raises(StartTraceback) as exc:\n e.finish_training()\n assert isinstance(exc.value.__cause__, TrainBackendError)\n\n e.start_training(lambda: 1, dataset_spec=EMPTY_" }, { "id": 312848, "commit_id": "d574e54fd8212fa85134a49f1cb8971065fd4d65", "repo": "core", "path": "tests/components/fivem/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "_mock_fivem_info_invalid", "commit_message": "Fivem code quality improvements (#66086)\n\n* specify config type\r\n\r\n* move coordinator outside try block\r\n\r\n* rename gamename to game_name\r\n\r\n* remove log in __init__\r\n\r\n* Remove logging and minify update\r\n\r\n* Add types to parameters\r\n\r\n* Remove name from device\r\n\r\n* Remove update listener\r\n\r\n* Remove status icon\r\n\r\n* Dont allow duplicate entries\r\n\r\n* Use default translation string\r\n\r\n* Remove online and port from coordinator", "code": "def _mock_fivem_info_invalid():\n return {\n \"plugins\": [\n \"sample\",\n ],\n \"data\": {\n \"gamename\": \"gta5\",\n },\n }\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 69, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 9, "token_counts": 23, "n_ast_nodes": 44, "n_identifiers": 1, "random_cut": "def _mock_fivem_info_invalid():\n return {\n \"plugins\": [\n \"sample\",\n ],\n \"data\": {\n \"gamename\": \"gta5\",\n },\n }\n\n" }, { "id": 79929, "commit_id": "5c1c2c8f531d96f4568f6dfa6ce71bc32dd9d16c", "repo": "wagtail", "path": "wagtail/admin/views/pages/delete.py", "file_name": "delete.py", "fun_name": "delete", "commit_message": "Enforce the use of a single string formatting mechanism for translation source strings\n\nClose #9377", "code": "def delete(request, page_id):\n page = get_object_or_404(Page, id=page_id).specific\n if not page.permissions_for_user(request.user).can_delete():\n raise PermissionDenied\n\n wagtail_site_name = getattr(settings, \"WAGTAIL_SITE_NAME\", \"wagtail\")\n with transaction.atomic():\n for fn in hooks.get_hooks(\"before_delete_page\"):\n result = fn(request, page)\n if hasattr(result, \"status_code\"):\n return result\n\n next_url = get_valid_next_url_from_request(request)\n\n pages_to_delete = {page}\n\n # The `construct_translated_pages_to_cascade_actions` hook returns translation and\n # alias pages when the action is set to \"delete\"\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n for fn in hooks.get_hooks(\"construct_translated_pages_to_cascade_actions\"):\n fn_pages = fn([page], \"delete\")\n if fn_pages and isinstance(fn_pages, dict):\n for additional_pages in fn_pages.values():\n pages_to_delete.update(additional_pages)\n\n pages_to_delete = list(pages_to_delete)\n\n if request.method == \"POST\":\n continue_deleting = True\n if (\n request.POST.get(\"confirm_site_name\")\n and request.POST.get(\"confirm_site_name\") != wagtail_site_name\n ):\n messages.error(\n request, f\"Please type '{wagtail_site_name}' to confirm.\"\n )\n continue_deleting = False\n if continue_deleting:\n parent_id = page.get_parent().id\n # Delete the source page.\n action = DeletePageAction(page, user=request.user)\n # Permission checks are done above, so skip them in execute.\n action.execute(skip_permission_checks=True)\n\n # Delete translation and alias pages if they have the same parent page.\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n parent_page_translations = page.get_parent().get_translations()\n for page_or_alias in pages_to_delete:\n if page_or_alias.get_parent() in parent_page_translations:\n action = DeletePageAction(page_or_alias, user=request.user)\n # Permission checks are done above, so skip them in execute.\n action.execute(skip_permission_checks=True)\n\n messages.success(\n request,\n _(\"Page '%(page_title)s' deleted.\")\n % {\"page_title\": page.get_admin_display_title()},\n )\n\n for fn in hooks.get_hooks(\"after_delete_page\"):\n result = fn(request, page)\n if hasattr(result, \"status_code\"):\n return result\n\n if next_url:\n return redirect(next_url)\n return redirect(\"wagtailadmin_explore\", parent_id)\n\n descendant_count = page.get_descendant_count()\n return TemplateResponse(\n request,\n \"wagtailadmin/pages/confirm_delete.html\",\n {\n \"page\": page,\n \"descendant_count\": descendant_count,\n \"next\": next_url,\n # if the number of pages ( child pages + current page) exceeds this limit, then confirm before delete.\n \"confirm_before_delete\": (descendant_count + 1)\n >= getattr(settings, \"WAGTAILADMIN_UNSAFE_PAGE_DELETION_LIMIT\", 10),\n \"wagtail_site_name\": wagtail_site_name,\n # note that while pages_to_delete may contain a mix of translated pages\n # and aliases, we count the \"translations\" only, as aliases are similar\n # to symlinks, so they should just follow the source\n \"translation_count\": len(\n [\n translation.id\n for translation in pages_to_delete\n if not translation.alias_of_id and translation.id != page.id\n ]\n ),\n \"translation_descendant_count\": sum(\n [\n translation.get_descendants().filter(alias_of__isnull=True).count()\n for translation in pages_to_delete\n ]\n ),\n },\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 1448, "n_words": 311, "vocab_size": 181, "complexity": 23, "nloc": 77, "token_counts": 457, "n_ast_nodes": 763, "n_identifiers": 62, "random_cut": "def delete(request, page_id):\n page = get_object_or_404(Page, id=page_id).specific\n if not page.permissions_for_user(request.user).can_delete():\n raise PermissionDenied\n\n wagtail_site_name = getattr(settings, \"WAGTAIL_SITE_NAME\", \"wagtail\")\n with transaction.atomic():\n for fn in hooks.get_hooks(\"before_delete_page\"):\n result = fn(request, page)\n if hasattr(result, \"status_code\"):\n return result\n\n next_url = get_valid_next_url_from_request(request)\n\n pages_to_delete = {page}\n\n # The `construct_translated_pages_to_cascade_actions` hook returns translation and\n # alias pages when the action is set to \"delete\"\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n for fn in hooks.get_hooks(\"construct_translated_pages_to_cascade_actions\"):\n fn_pages = fn([page], \"delete\")\n if fn_pages and isinstance(fn_pages, dict):\n for additional_pages in fn_pages.values():\n pages_to_delete.update(additional_pages)\n\n pages_to_delete = list(pages_to_delete)\n\n if request.method == \"POST\":\n continue_deleting = True\n if (\n request.POST.get(\"confirm_site_name\")\n and request.POST.get(\"confirm_site_name\") != wagtail_site_name\n ):\n messages.error(\n request, f\"Please type '{wagtail_site_name}' to confirm.\"\n )\n continue_deleting = False\n if continue_deleting:\n parent_id = page.get_parent().id\n # Delete the source page.\n action = DeletePageAct" }, { "id": 38226, "commit_id": "afe5d42d8d1d80af911ed980c2936bfe887078f6", "repo": "transformers", "path": "src/transformers/models/led/modeling_tf_led.py", "file_name": "modeling_tf_led.py", "fun_name": "_chunk", "commit_message": "Black preview (#17217)\n\n* Black preview\r\n\r\n* Fixup too!\r\n\r\n* Fix check copies\r\n\r\n* Use the same version as the CI\r\n\r\n* Bump black", "code": "def _chunk(hidden_states, window_overlap):\n \n batch_size, seq_length, hidden_dim = shape_list(hidden_states)\n num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1\n\n # define frame size and frame stride (similar to convolution)\n frame_hop_size = window_overlap * hidden_dim\n frame_size = 2 * frame_hop_size\n hidden_states = tf.reshape(hidden_states, (batch_size, seq_length * hidden_dim))\n\n # chunk with overlap\n chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size)\n\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(chunked_hidden_states),\n [batch_size, num_output_chunks, frame_size],\n message=(\n \"Make sure chunking is correctly applied. `Chunked hidden states should have output dimension\"\n f\" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}.\"\n ),\n )\n\n chunked_hidden_states = tf.reshape(\n chunked_hidden_states,\n (batch_size, num_output_chunks, 2 * window_overlap, hidden_dim),\n )\n\n return chunked_hidden_states\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 332, "n_words": 98, "vocab_size": 75, "complexity": 2, "nloc": 21, "token_counts": 123, "n_ast_nodes": 219, "n_identifiers": 19, "random_cut": "def _chunk(hidden_states, window_overlap):\n \n batch_size, seq_length, hidden_dim = shape_list(hidden_states)\n num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1\n\n # define frame size and frame stride (similar to convolution)\n frame_hop_size = window_overlap * hidden_dim\n frame_size = 2 * frame_hop_size\n hidden_states = tf.reshape(hidden_states, (batch_size, seq_length * hidden_dim))\n\n # chunk with overlap\n chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size)\n\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(chunked_hidden_states),\n [batch_size, num_output_chunks, frame_size],\n message=(\n \"Make sure chunking is correctly applied. `Chunked hidden states should have output dimension\"\n f\" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}.\"\n ),\n )\n\n chunked_hidden_states = tf.reshape(\n chunked_hidden_states,\n (batch_size, num_output_chunks, 2 * window_overlap, hidden_dim),\n )\n\n return chunk" }, { "id": 270014, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks_test.py", "file_name": "callbacks_test.py", "fun_name": "test_progbar_infers_steps", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_progbar_infers_steps(self):\n x, y = np.ones((10, 1)), np.ones((10, 1))\n data = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)\n data = data.filter(lambda x, y: True) # Unknown cardinality.\n\n progbar = keras.callbacks.ProgbarLogger(\"steps\")\n model = keras.Sequential([keras.layers.Dense(1)])\n model.compile(\"sgd\", \"mse\")\n self.assertIsNone(progbar.target)\n model.fit(data, epochs=2, callbacks=[progbar])\n self.assertEqual(progbar.target, 5)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 99, "n_words": 36, "vocab_size": 29, "complexity": 1, "nloc": 10, "token_counts": 133, "n_ast_nodes": 211, "n_identifiers": 26, "random_cut": "def test_progbar_infers_steps(self):\n x, y = np.ones((10, 1)), np.ones((10, 1))\n data = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)\n data = data.filter(lambda x, y: True) # Unknown cardinality.\n\n pro" }, { "id": 75193, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_upload", "commit_message": "Reformat with black", "code": "def test_upload(self):\n response = self.client.post(\n reverse(\"wagtailimages:chooser_upload\"),\n {\n \"image-chooser-upload-title\": \"Test image\",\n \"image-chooser-upload-file\": SimpleUploadedFile(\n \"test.png\", get_test_image_file().file.getvalue()\n ),\n },\n )\n\n # Check response\n self.assertEqual(response.status_code, 200)\n\n # Check that the image was created\n images = Image.objects.filter(title=\"Test image\")\n self.assertEqual(images.count(), 1)\n\n # Test that size was populated correctly\n image = images.first()\n self.assertEqual(image.width, 640)\n self.assertEqual(image.height, 480)\n\n # Test that the file_size/hash fields were set\n self.assertTrue(image.file_size)\n self.assertTrue(image.file_hash)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 253, "n_words": 59, "vocab_size": 46, "complexity": 1, "nloc": 18, "token_counts": 120, "n_ast_nodes": 203, "n_identifiers": 25, "random_cut": "def test_upload(self):\n response = self.client.post(\n reverse(\"wagtailimages:chooser_upload\"),\n {\n \"image-choo" }, { "id": 290445, "commit_id": "b364ef98a073214aad8deff4ff9b91e9ff041557", "repo": "core", "path": "homeassistant/components/mqtt/vacuum/schema_state.py", "file_name": "schema_state.py", "fun_name": "_setup_from_config", "commit_message": "Use `_attr_` for MQTT vacuum (#81534)\n\n* Use `_attr_` for MQTT vacuum\r\n\r\n* Remove unneeded properties\r\n\r\n* Follow-up comment\r\n\r\n* Remove default value", "code": "def _setup_from_config(self, config):\n \n supported_feature_strings = config[CONF_SUPPORTED_FEATURES]\n self._attr_supported_features = strings_to_services(\n supported_feature_strings, STRING_TO_SERVICE\n )\n self._attr_fan_speed_list = config[CONF_FAN_SPEED_LIST]\n self._command_topic = config.get(CONF_COMMAND_TOPIC)\n self._set_fan_speed_topic = config.get(CONF_SET_FAN_SPEED_TOPIC)\n self._send_command_topic = config.get(CONF_SEND_COMMAND_TOPIC)\n\n self._payloads = {\n key: config.get(key)\n for key in (\n CONF_PAYLOAD_START,\n CONF_PAYLOAD_PAUSE,\n CONF_PAYLOAD_STOP,\n CONF_PAYLOAD_RETURN_TO_BASE,\n CONF_PAYLOAD_CLEAN_SPOT,\n CONF_PAYLOAD_LOCATE,\n )\n }\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 245, "n_words": 41, "vocab_size": 34, "complexity": 2, "nloc": 20, "token_counts": 93, "n_ast_nodes": 140, "n_identifiers": 25, "random_cut": "def _setup_from_config(self, config):\n \n supported_feature_strings = config[CONF_SUPPORTED_FEATURES]\n self._attr_supported_features = strings_to_services(\n supported_feature_strings, STRING_TO_SERVICE\n )\n se" }, { "id": 107419, "commit_id": "7277386c56763b37726bd615cf4e13ba75bc81b1", "repo": "matplotlib", "path": "lib/matplotlib/colorbar.py", "file_name": "colorbar.py", "fun_name": "_reset_locator_formatter_scale", "commit_message": "FIX: minorformatter None", "code": "def _reset_locator_formatter_scale(self):\n \n self._process_values()\n self._locator = None\n self._minorlocator = None\n self._formatter = None\n self._minorformatter = None\n if (self.boundaries is not None or\n isinstance(self.norm, colors.BoundaryNorm)):\n if self.spacing == 'uniform':\n funcs = (self._forward_boundaries, self._inverse_boundaries)\n self._set_scale('function', functions=funcs)\n elif self.spacing == 'proportional':\n self._set_scale('linear')\n elif getattr(self.norm, '_scale', None):\n # use the norm's scale (if it exists and is not None):\n self._set_scale(self.norm._scale)\n elif type(self.norm) is colors.Normalize:\n # plain Normalize:\n self._set_scale('linear')\n else:\n # norm._scale is None or not an attr: derive the scale from\n # the Norm:\n funcs = (self.norm, self.norm.inverse)\n self._set_scale('function', functions=funcs)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 325, "n_words": 85, "vocab_size": 53, "complexity": 7, "nloc": 20, "token_counts": 158, "n_ast_nodes": 266, "n_identifiers": 23, "random_cut": "def _reset_locator_formatter_scale(self):\n \n self._process_values()\n self._locator = None\n self._minorlocator = None\n self._formatter = None\n self._minorformatter = None\n if (self.boundaries is not None or\n isinstance(self.norm, colors.BoundaryNorm)):\n if self.spacing == 'uniform':\n funcs = (self._forward_boundaries, self._inverse_boundaries)\n self._set_scale('function', functions=funcs)\n elif self.spacing == 'proportional':\n self._set_scale('linear')\n elif getattr(self.norm, '_scale', None):\n # use the norm's scale (if it exists and is not None):\n self._set_scale(self.norm._scale)\n elif type(self.norm) is colors.Normalize:\n # plain Normalize:\n self._set_scale('linear')\n else:\n " }, { "id": 256346, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "test/conftest.py", "file_name": "conftest.py", "fun_name": "document_store_dot_product_with_docs", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def document_store_dot_product_with_docs(request, test_docs_xs, tmp_path):\n embedding_dim = request.node.get_closest_marker(\"embedding_dim\", pytest.mark.embedding_dim(768))\n document_store = get_document_store(\n document_store_type=request.param,\n embedding_dim=embedding_dim.args[0],\n similarity=\"dot_product\",\n tmp_path=tmp_path,\n )\n document_store.write_documents(test_docs_xs)\n yield document_store\n document_store.delete_documents()\n\n\n@pytest.fixture(params=[\"elasticsearch\", \"faiss\", \"memory\", \"milvus\"])", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.fixture(params=[\"elasticsearch\", \"faiss\", \"memory\", \"milvus\"])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 68, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 11, "token_counts": 69, "n_ast_nodes": 140, "n_identifiers": 19, "random_cut": "def document_store_dot_product_with_docs(request, test_docs_xs, tmp_path):\n embedding_dim = request.node.get_closest_marker(\"embedding_dim\", pytest.mark.embedding_dim(768))\n document_store = get_document_store(\n document_store_type=request.param,\n embedding_dim=embedding_dim.args[0],\n similarity=\"dot_product\",\n tmp_path=tmp_path,\n )\n document_store.write_documents(test_docs_xs)\n yield document_store\n document_store.delete_documents()\n\n\n@pyte" }, { "id": 183845, "commit_id": "a5d46d6adc6860389801dce43a8623b6b4b39c3e", "repo": "textual", "path": "sandbox/table.py", "file_name": "table.py", "fun_name": "on_mount", "commit_message": "exit action", "code": "def on_mount(self):\n self.bind(\"d\", \"toggle_dark\")\n self.bind(\"z\", \"toggle_zebra\")\n self.bind(\"x\", \"exit\")\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 28, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 29, "n_ast_nodes": 57, "n_identifiers": 3, "random_cut": "def on_mount(self):\n self.bind(\"d\", \"toggle_dark\")\n self.bind(\"z\", \"toggle_zeb" }, { "id": 170811, "commit_id": "aebd2293b9e80893f4bc6fbf5f870be5ae8c7ce0", "repo": "pandas", "path": "pandas/tests/frame/methods/test_fillna.py", "file_name": "test_fillna.py", "fun_name": "test_ffill", "commit_message": "TST: avoid chained assignment in tests outside of specific tests on chaining (#49474)\n\n* TST: avoid chained assignment in tests outside of specific tests on chaining\r\n\r\n* update to use .loc[index[slice], ..] pattern", "code": "def test_ffill(self, datetime_frame):\n datetime_frame.loc[datetime_frame.index[:5], \"A\"] = np.nan\n datetime_frame.loc[datetime_frame.index[-5:], \"A\"] = np.nan\n\n tm.assert_frame_equal(\n datetime_frame.ffill(), datetime_frame.fillna(method=\"ffill\")\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 15, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 63, "n_ast_nodes": 101, "n_identifiers": 12, "random_cut": "def test_ffill(self, datetime_frame):\n datetime_frame.loc[datetime_frame.index[:5], \"A\"] = np.nan\n datetime_frame.loc[datetime_frame.index[-5:], \"A\"] = np" }, { "id": 8969, "commit_id": "4b3c8211b3e3eca5f9fdf6553bbd45c9c7587b0d", "repo": "insightface", "path": "body/human_pose/ambiguity_aware/lib/utils/utils.py", "file_name": "utils.py", "fun_name": "euler2rotmat", "commit_message": "update", "code": "def euler2rotmat(eulers):\n # inputs' shape: (N, 3), tensors\n # rotate in the order of z, x, y\n n = eulers.size(0)\n thetax, thetay, thetaz = eulers[:, 0:1], eulers[:, 1:2], eulers[:, 2:3]\n matx = get_rotation_x(thetax)\n maty = get_rotation_y(thetay)\n matz = get_rotation_z(thetaz)\n rotmat = matz.matmul(matx).matmul(maty)\n # rotmat = maty.matmul(matx).matmul(matz)\n return rotmat\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 77, "n_words": 48, "vocab_size": 36, "complexity": 1, "nloc": 8, "token_counts": 78, "n_ast_nodes": 124, "n_identifiers": 15, "random_cut": "def euler2rotmat(eulers):\n # inputs' shape: (N, 3), tensors\n # rotate in the order of z, x, y\n n = eulers.size(0)\n thetax, thetay, thetaz = eulers[:, 0:1], eulers[" }, { "id": 81657, "commit_id": "d42a85714a72defe5abbd28e48367116096131ce", "repo": "awx", "path": "awx/main/tasks/jobs.py", "file_name": "jobs.py", "fun_name": "final_run_hook", "commit_message": "Delete unused playbook profiling code\n\nWe haven't had this feature since pre-AWX 18 (since EEs were introduced) and I cant find any other reference to this.", "code": "def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):\n \n instance.log_lifecycle(\"finalize_run\")\n artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))\n collections_info = os.path.join(artifact_dir, 'collections.json')\n ansible_version_file = os.path.join(artifact_dir, 'ansible_version.txt')\n\n if os.path.exists(collections_info):\n with open(collections_info) as ee_json_info:\n ee_collections_info = json.loads(ee_json_info.read())\n instance.installed_collections = ee_collections_info\n instance.save(update_fields=['installed_collections'])\n if os.path.exists(ansible_version_file):\n with open(ansible_version_file) as ee_ansible_info:\n ansible_version_info = ee_ansible_info.readline()\n instance.ansible_version = ansible_version_info\n instance.save(update_fields=['ansible_version'])\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 207, "n_words": 46, "vocab_size": 34, "complexity": 5, "nloc": 21, "token_counts": 214, "n_ast_nodes": 254, "n_identifiers": 29, "random_cut": "def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):\n \n instance.log_lifecycle(\"finalize_run\"" }, { "id": 42918, "commit_id": "2226e64a2263a8166e47d816aa95d211f8fc1c17", "repo": "airflow", "path": "airflow/providers/presto/transfers/presto_to_slack.py", "file_name": "presto_to_slack.py", "fun_name": "_get_slack_hook", "commit_message": "Add `PrestoToSlackOperator` (#23979)\n\n* Add `PrestoToSlackOperator`\r\nAdding the funcitonality to run a single query against presto and send the result as slack message.\r\nSimilar to `SnowflakeToSlackOperator`", "code": "def _get_slack_hook(self) -> SlackWebhookHook:\n return SlackWebhookHook(\n http_conn_id=self.slack_conn_id,\n message=self.slack_message,\n webhook_token=self.slack_token,\n slack_channel=self.slack_channel,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 68, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 35, "n_ast_nodes": 51, "n_identifiers": 10, "random_cut": "def _get_slack_hook(self) -> SlackWebhookHook:\n return SlackWebhookHook(\n http_conn_id=self.slack_conn_id,\n message=self.slack_" }, { "id": 50663, "commit_id": "a6790a651a12eb391060e533868bf0ba197f6f7e", "repo": "PaddleHub", "path": "modules/image/text_to_image/stable_diffusion/clip/clip/model.py", "file_name": "model.py", "fun_name": "encode_text", "commit_message": "Add stable diffusion module", "code": "def encode_text(self, text):\n x = self.token_embedding(text) # [batch_size, n_ctx, d_model]\n x = x + self.positional_embedding\n x = x.transpose((1, 0, 2)) # NLD -> LND\n x = self.transformer(x)\n x = x.transpose((1, 0, 2)) # LND -> NLD\n x = self.ln_final(x)\n idx = text.numpy().argmax(-1)\n idx = list(idx)\n x = [x[i:i + 1, int(j), :] for i, j in enumerate(idx)]\n x = paddle.concat(x, 0)\n x = paddle.matmul(x, self.text_projection)\n return x\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 153, "n_words": 67, "vocab_size": 37, "complexity": 2, "nloc": 13, "token_counts": 137, "n_ast_nodes": 213, "n_identifiers": 21, "random_cut": "def encode_text(self, text):\n x = self.token_embedding(text) # [batch_size, n_ctx, d_model]\n x = x + self.positional_embedding\n x = x.transpose((1, 0, 2)) # NLD -> LND\n x = self.transformer(x)\n x = x.transpose((1, 0, 2)) # LND -> NLD\n x = self.ln_final(x)\n idx = text.numpy().argmax(-1)\n idx = list(idx)\n x = [x[i:i + 1, int(j), :] for i, j in enume" }, { "id": 301031, "commit_id": "274557361002a168fb14519f30f4590aeac42142", "repo": "core", "path": "homeassistant/components/lutron_caseta/__init__.py", "file_name": "__init__.py", "fun_name": "async_update", "commit_message": "Small cleanups lutron_caseta (#72099)", "code": "async def async_update(self):\n \n self._device = self._smartbridge.get_device_by_id(self.device_id)\n _LOGGER.debug(self._device)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 28, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "async def async_update(self):\n \n self._" }, { "id": 100392, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "plugins/train/trainer/_base.py", "file_name": "_base.py", "fun_name": "_set_tensorboard", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _set_tensorboard(self):\n \n if self._model.state.current_session[\"no_logs\"]:\n logger.verbose(\"TensorBoard logging disabled\")\n return None\n logger.debug(\"Enabling TensorBoard Logging\")\n\n logger.debug(\"Setting up TensorBoard Logging\")\n log_dir = os.path.join(str(self._model.model_dir),\n f\"{self._model.name}_logs\",\n f\"session_{self._model.state.session_id}\")\n tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir,\n histogram_freq=0, # Must be 0 or hangs\n write_graph=get_backend() != \"amd\",\n write_images=False,\n update_freq=\"batch\",\n profile_batch=0,\n embeddings_freq=0,\n embeddings_metadata=None)\n tensorboard.set_model(self._model.model)\n tensorboard.on_train_begin(0)\n logger.verbose(\"Enabled TensorBoard Logging\")\n return tensorboard\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 563, "n_words": 46, "vocab_size": 39, "complexity": 2, "nloc": 21, "token_counts": 131, "n_ast_nodes": 240, "n_identifiers": 32, "random_cut": "def _set_tensorboard(self):\n \n if self._model.state.current_session[\"no_logs\"]:\n logger.verbose(\"TensorBoard logging disabled\")\n return None\n logger.debug(\"Enabling TensorBoard Logging\")\n\n logger.debug(\"Setting up TensorBoard Logging\")\n log_dir = os.path.join(str(self._model.mod" }, { "id": 35902, "commit_id": "b693cbf99c5a180dde8b32ded2fb82ea735aab15", "repo": "transformers", "path": "tests/pipelines/test_pipelines_zero_shot_image_classification.py", "file_name": "test_pipelines_zero_shot_image_classification.py", "fun_name": "test_large_model_pt", "commit_message": "The tests were not updated after the addition of `torch.diag` (#15890)\n\nin the scoring (which is more correct)", "code": "def test_large_model_pt(self):\n image_classifier = pipeline(\n task=\"zero-shot-image-classification\",\n model=\"openai/clip-vit-base-patch32\",\n )\n # This is an image of 2 cats with remotes and no planes\n image = Image.open(\"./tests/fixtures/tests_samples/COCO/000000039769.png\")\n output = image_classifier(image, candidate_labels=[\"cat\", \"plane\", \"remote\"])\n\n self.assertEqual(\n nested_simplify(output),\n [\n {\"score\": 0.511, \"label\": \"remote\"},\n {\"score\": 0.485, \"label\": \"cat\"},\n {\"score\": 0.004, \"label\": \"plane\"},\n ],\n )\n\n output = image_classifier([image] * 5, candidate_labels=[\"cat\", \"plane\", \"remote\"], batch_size=2)\n self.assertEqual(\n nested_simplify(output),\n [\n [\n {\"score\": 0.511, \"label\": \"remote\"},\n {\"score\": 0.485, \"label\": \"cat\"},\n {\"score\": 0.004, \"label\": \"plane\"},\n ],\n ]\n * 5,\n )\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 377, "n_words": 77, "vocab_size": 45, "complexity": 1, "nloc": 27, "token_counts": 169, "n_ast_nodes": 276, "n_identifiers": 14, "random_cut": "def test_large_model_pt(self):\n image_classifier = pipeline(\n task=\"zero-shot-image-classification\",\n model=\"openai/clip-vit-base-patch32\",\n )\n # This is an image of 2 cats with remotes and no planes\n image = Image.open(\"./tests/fixtures/tests_samples/COCO/000000039769.png\")\n output = image_classifier(image, candidate_labels=[\"cat\", \"plane\", \"remote\"])\n\n self.assertEqual(\n nested_simplify(output),\n [\n {\"score\": 0.511, \"label\": \"remote\"},\n {\"score\": 0.485, \"label\": \"cat\"},\n {\"score\": 0.004, \"label\": \"plane\"},\n ],\n )\n\n output = image_classifier([image] * 5, candidate_labels=[\"cat\"," }, { "id": 318390, "commit_id": "fc695896dd8b0169001c438054a79e347053fac6", "repo": "paperless-ngx", "path": "src/documents/management/commands/document_archiver.py", "file_name": "document_archiver.py", "fun_name": "handle", "commit_message": "Format Python code with black", "code": "def handle(self, *args, **options):\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n\n overwrite = options[\"overwrite\"]\n\n if options[\"document\"]:\n documents = Document.objects.filter(pk=options[\"document\"])\n else:\n documents = Document.objects.all()\n\n document_ids = list(\n map(\n lambda doc: doc.id,\n filter(lambda d: overwrite or not d.has_archive_version, documents),\n )\n )\n\n # Note to future self: this prevents django from reusing database\n # conncetions between processes, which is bad and does not work\n # with postgres.\n db.connections.close_all()\n\n try:\n\n logging.getLogger().handlers[0].level = logging.ERROR\n with multiprocessing.Pool(processes=settings.TASK_WORKERS) as pool:\n list(\n tqdm.tqdm(\n pool.imap_unordered(handle_document, document_ids),\n total=len(document_ids),\n disable=options[\"no_progress_bar\"],\n )\n )\n except KeyboardInterrupt:\n print(\"Aborting...\")\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 406, "n_words": 79, "vocab_size": 65, "complexity": 4, "nloc": 26, "token_counts": 168, "n_ast_nodes": 277, "n_identifiers": 44, "random_cut": "def handle(self, *args, **options):\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n\n overwrite = options[\"overwrite\"]\n\n if options[\"document\"]:\n documents = Document.objects.filter(pk=options[\"document\"])\n else:\n documents = Document.objects.all()\n\n document_ids = list(\n map(\n lambda doc: doc.id,\n filter(lambda d: overwrite or not d.has_archive_version, documents),\n )\n )\n\n " }, { "id": 129780, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "ci/repro-ci.py", "file_name": "repro-ci.py", "fun_name": "prepare_instance", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def prepare_instance(self):\n self.create_new_ssh_client()\n output = self.execute_ssh_command(\"docker ps\", quiet=True)\n if \"CONTAINER ID\" in output:\n self.logger.info(\"Instance already prepared.\")\n return\n\n self.logger.info(\"Preparing instance (installing docker etc.)\")\n commands = [\n \"sudo yum install -y docker\",\n \"sudo service docker start\",\n f\"sudo usermod -aG docker {self.ssh_user}\",\n ]\n self.execute_ssh_commands(commands, quiet=True)\n self.create_new_ssh_client()\n self.execute_ssh_command(\"docker ps\", quiet=True)\n self.docker_login()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 171, "n_words": 47, "vocab_size": 38, "complexity": 2, "nloc": 16, "token_counts": 85, "n_ast_nodes": 156, "n_identifiers": 12, "random_cut": "def prepare_instance(self):\n self.create_new_ssh_client()\n output = self.execute_ssh_command(\"docker ps\", quiet=True)\n if \"CONTAINER ID\" in output:\n self.logger.info(\"Instance already prepared.\")\n return\n\n self.logger.info(\"Preparing instance (installing docker etc.)\")\n commands = [\n \"sudo yum install -y docker\",\n \"sudo service docke" }, { "id": 13581, "commit_id": "189bf5e125119e457e8c3ff673b19cffec48bea2", "repo": "jina", "path": "tests/k8s/test_k8s.py", "file_name": "test_k8s.py", "fun_name": "test_flow_with_gpu", "commit_message": "test: print exceptions raised (#5434)", "code": "async def test_flow_with_gpu(k8s_flow_gpu, docker_images, tmpdir, logger):\n try:\n dump_path = os.path.join(str(tmpdir), 'test-flow-with-gpu')\n namespace = f'test-flow-with-gpu'\n k8s_flow_gpu.to_kubernetes_yaml(dump_path, k8s_namespace=namespace)\n\n from kubernetes import client\n\n api_client = client.ApiClient()\n core_client = client.CoreV1Api(api_client=api_client)\n app_client = client.AppsV1Api(api_client=api_client)\n await create_all_flow_deployments_and_wait_ready(\n dump_path,\n namespace=namespace,\n api_client=api_client,\n app_client=app_client,\n core_client=core_client,\n deployment_replicas_expected={\n 'gateway': 1,\n 'test-executor': 1,\n },\n logger=logger,\n )\n resp = await run_test(\n flow=k8s_flow_gpu,\n namespace=namespace,\n core_client=core_client,\n endpoint='/cuda',\n )\n docs = resp[0].docs\n assert len(docs) == 10\n for doc in docs:\n assert doc.tags['resources']['limits'] == {'nvidia.com/gpu:': 1}\n core_client.delete_namespace(namespace)\n except Exception as exc:\n logger.error(f' Exception raised {exc}')\n raise exc\n\n\n@pytest.mark.asyncio\n@pytest.mark.timeout(3600)\n@pytest.mark.parametrize(\n 'docker_images',\n [['test-executor', 'jinaai/jina']],\n indirect=True,\n)", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "@pytest.mark.asyncio\n@pytest.mark.timeout(3600)\n@pytest.mark.parametrize(\n 'docker_images',\n [['test-executor', 'jinaai/jina']],\n indirect=True,\n)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 390, "n_words": 88, "vocab_size": 73, "complexity": 3, "nloc": 35, "token_counts": 190, "n_ast_nodes": 364, "n_identifiers": 41, "random_cut": "async def test_flow_with_gpu(k8s_flow_gpu, docker_images, tmpdir, logger):\n try:\n dump_path = os.path.join(str(tmpdir), 'test-flow-with-gpu')\n namespace = f'test-flow-with-gpu'\n k8s_flow_gpu.to_kubernetes_yaml(dump_path, k8s_namespace=namespace)\n\n from kubernetes import client\n\n api_client = client.ApiClient()\n core_client = client.CoreV1Api(api_client=api_client)\n app_client = client.AppsV1Api(api_client=api_client)\n await create_all_flow_deployments_and_wait_ready(\n dump_path,\n namespace=namespace,\n api_client=api_client,\n app_client=app_client,\n core_client=core_client,\n deployment_replicas_expected={\n 'gateway': 1,\n 'test-executor': 1,\n },\n logger=logger,\n )\n resp = await run_test(\n flow=k8s_flow_gpu,\n namespace=namespace,\n core_client=core_client,\n endpoint='/cuda',\n )\n docs = resp[0].docs\n assert len(docs) == 10\n for doc in docs:\n assert doc.tags['resources']['limits'] == {'nvidia.com/gpu:': 1}\n core_client.delete_namespace(namespace)\n except Exception as exc:\n log" }, { "id": 271459, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/node.py", "file_name": "node.py", "fun_name": "input_tensors", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def input_tensors(self):\n if self.is_input:\n return [self.outputs] # Used in `Layer.input`.\n return self.call_args[0]\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def input_tensors(self):\n if self.is_input:\n return [self.outputs] # Used in `Layer.input`.\n return self.call_args[0]\n" }, { "id": 88034, "commit_id": "8eedbf267dfaee4fca5960a6985630bd8b2bcbda", "repo": "sentry", "path": "tests/sentry/integrations/bitbucket/test_issues.py", "file_name": "test_issues.py", "fun_name": "test_link_issue", "commit_message": "fix(integration): Calls to get_installation() should check for int (#41063)\n\nSome of our tests were passing an organization or a string instead of an int.", "code": "def test_link_issue(self):\n issue_id = 3\n repo = \"myaccount/myrepo\"\n responses.add(\n responses.GET,\n f\"https://api.bitbucket.org/2.0/repositories/{repo}/issues/{issue_id}\",\n json={\"id\": issue_id, \"title\": \"hello\", \"content\": {\"html\": \"This is the description\"}},\n )\n\n data = {\"repo\": repo, \"externalIssue\": issue_id, \"comment\": \"hello\"}\n\n assert self.integration.get_installation(self.organization.id).get_issue(\n issue_id, data=data\n ) == {\n \"key\": issue_id,\n \"description\": \"This is the description\",\n \"title\": \"hello\",\n \"repo\": repo,\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 192, "n_words": 49, "vocab_size": 37, "complexity": 1, "nloc": 17, "token_counts": 99, "n_ast_nodes": 185, "n_identifiers": 14, "random_cut": "def test_link_issue(self):\n issue_id = 3\n repo = \"myaccount/myrepo\"\n responses.add(\n responses.GET,\n f\"https://api.bitbucket.org/2.0/repositories/{repo}/issues/{issue_id}\",\n json={\"id\": issue_id, \"title\": \"hello\", \"content\": {\"html\": \"This is the description\"}},\n )\n\n data = {\"repo\": repo, \"externalIssue\": issue_id, \"comment\": \"hello\"}\n\n assert self.integratio" }, { "id": 123580, "commit_id": "df4293473d2fb6e887e31522cab5aff95e201581", "repo": "sqlmap", "path": "plugins/dbms/extremedb/enumeration.py", "file_name": "enumeration.py", "fun_name": "getUsers", "commit_message": "Fixing DeprecationWarning (logger.warn)", "code": "def getUsers(self):\n warnMsg = \"on eXtremeDB it is not possible to enumerate the users\"\n logger.warning(warnMsg)\n\n return []\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 37, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def getUsers(self):\n warnMsg = \"on eXtremeDB it is not possible to enumerate the users\"\n logger.warning(warnMsg)\n\n return []\n" }, { "id": 283197, "commit_id": "ab4de1dd70fba866930150e440a03e461a6ca6a8", "repo": "OpenBBTerminal", "path": "build/pyinstaller/user_agent/base.py", "file_name": "base.py", "fun_name": "fix_chrome_mac_platform", "commit_message": "Create a packaged app bundle with Pyinstaller (#1525)\n\n* Add dashboard widget assets\r\n\r\n* Add ipywidgets and ipyflex to project\r\n\r\n* Add currencies dashboard notebook\r\n\r\n* Update docs and docstrings\r\n\r\n* Add pyinstaller to project deps\r\n\r\n* Add pyinstaller artifacts to gitignore\r\n\r\n* Fix linter errors in terminal.py\r\n\r\n* Update cspell hook and action with a pyinstaller specific word\r\n\r\n* Add pyinstaller specfile and artifacts\r\n\r\n* Add splashscreen image\r\n\r\n* Add app icon\r\n\r\n* adding splash screen support to terminal.spec and terminal.py\r\n\r\n* Restore the conda env build files\r\n\r\n* Sync deps\r\n\r\n* Add border to the splashscreen image\r\n\r\n* Clean up terminal launcher\r\n\r\n* Add support for default feature flags in packages apps\r\n\r\n* Fix types and linting\r\n\r\n* Add splashscreen management to app bootup\r\n\r\n* Check prediction feature flag when entering crypto/pred\r\n\r\n* Update pyinstaller spec file\r\n\r\n* fix .spec file to work for splash and icon - removed the \"..\"\r\n\r\n* Allows to export when using installer (#1568)\r\n\r\n* fix export for packaged apps\r\n\r\n* fix filename\r\n\r\n* Git : replace commit_hash when it is set in config_terminal\r\n\r\n* Add update of the git commit hash in gtff default during build\r\n\r\n* Add packaged app name and feature flag to logs\r\n\r\n* Add platform specific icon assignment\r\n\r\n* Add macOS build assets\r\n\r\n* Add tensorflow to hidden imports\r\n\r\n* Move LOGGING_COMMIT_HASH to gtff\r\n\r\n* Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again.\r\n\r\n* Linting\r\n\r\n* Workflow : ignore ./build/pyinstaller from codespell\r\n\r\n* Workflow : exclude ./build/pyinstaller from flake8\r\n\r\n* Poetry + Workflow : add types-six\r\n\r\n* Pyinstaller : remove property_cached, user_agent and vaderSentiment\r\n\r\n* Revert \"Pyinstaller : remove property_cached, user_agent and vaderSentiment\"\r\n\r\nThis reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703.\r\n\r\n* Clean up local paths in specfile\r\n\r\n* Validate deps have correct Jinja version (they do)\r\n\r\n* Fix logging commit hash to be set correctly for the logger to see it\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: Chavithra PARANA ", "code": "def fix_chrome_mac_platform(platform):\n \n ver = platform.split(\"OS X \")[1]\n build_range = range(*MACOSX_CHROME_BUILD_RANGE[ver])\n build = randomizer.choice(build_range)\n mac_ver = ver.replace(\".\", \"_\") + \"_\" + str(build)\n return \"Macintosh; Intel Mac OS X %s\" % mac_ver\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 48, "n_words": 30, "vocab_size": 24, "complexity": 1, "nloc": 6, "token_counts": 56, "n_ast_nodes": 101, "n_identifiers": 13, "random_cut": "def fix_chrome_mac_platform(platform):\n \n ver = platform.split(\"OS X \")[1]\n build_range = range(*MACOSX_CHROME_BUILD_RANGE[ver])\n build = randomizer.choice(build_range)\n mac_ver = ver.replace(\".\", \"_\") + \"_\" + str(build)\n return \"Macintosh; Intel Mac OS X %s\" % mac_ver\n\n" }, { "id": 107506, "commit_id": "0f77ba9ca596c8c071eecbec3eea93f4c900dfee", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_tightlayout.py", "file_name": "test_tightlayout.py", "fun_name": "test_tight_pads", "commit_message": "MNT: make layout deprecations pending\n\nTST: fix the tests", "code": "def test_tight_pads():\n fig, ax = plt.subplots()\n with pytest.warns(PendingDeprecationWarning,\n match='will be deprecated'):\n fig.set_tight_layout({'pad': 0.15})\n fig.draw_without_rendering()\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 50, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 42, "n_ast_nodes": 72, "n_identifiers": 11, "random_cut": "def test_tight_pads():\n fig, ax = plt.subplots()\n with pytest.warns(PendingDeprecationWarning,\n match='will be deprecated'):\n " }, { "id": 7907, "commit_id": "42cf4f241f3427d6750b7d97a8860d9fa94e1963", "repo": "ludwig", "path": "tests/integration_tests/test_missing_value_strategy.py", "file_name": "test_missing_value_strategy.py", "fun_name": "test_missing_values_fill_with_mean", "commit_message": "[tests] Added more explicit lifecycle management to Ray clusters during tests (#2447)", "code": "def test_missing_values_fill_with_mean(backend, csv_filename, tmpdir, ray_cluster_2cpu):\n data_csv_path = os.path.join(tmpdir, csv_filename)\n\n kwargs = {PREPROCESSING: {\"missing_value_strategy\": FILL_WITH_MEAN}}\n input_features = [\n number_feature(**kwargs),\n binary_feature(),\n category_feature(encoder={\"vocab_size\": 3}),\n ]\n output_features = [binary_feature()]\n training_data_csv_path = generate_data(input_features, output_features, data_csv_path)\n\n config = {\"input_features\": input_features, \"output_features\": output_features, TRAINER: {\"epochs\": 2}}\n\n # run preprocessing\n ludwig_model = LudwigModel(config, backend=backend)\n ludwig_model.preprocess(dataset=training_data_csv_path)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 97, "n_words": 47, "vocab_size": 40, "complexity": 1, "nloc": 13, "token_counts": 113, "n_ast_nodes": 180, "n_identifiers": 26, "random_cut": "def test_missing_values_fill_with_mean(backend, csv_filename, tmpdir, ray_cluster_2cpu):\n data_csv_path = os.path.join(tmpdir, csv_filename)\n\n kwargs = {PREPROCESSING: {\"missing_value_strategy\": FILL_WITH_MEAN}}\n input_features = [\n number_feature(**kwargs),\n binary_feature(),\n category_feature(encoder={\"vocab_size\": 3}),\n ]\n output_features = [binary_feature()]\n training_data_csv_path = generate_data(input_features, output_features, data_csv_path)\n\n config = {\"input_features\": input_features, " }, { "id": 100271, "commit_id": "444762114c1b1ad2e72c871e825373bd74880aba", "repo": "faceswap", "path": "lib/metal/__init__.py", "file_name": "__init__.py", "fun_name": "get_driver_version", "commit_message": "Initial somewhat working version", "code": "def get_driver_version() -> int:\n # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html\n return Constants.CUDA.DRIVER_VERSION_UNSUPPORTED\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 13, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 21, "n_identifiers": 5, "random_cut": "def get_driver_version() -> int:\n # https://docs.nvidia.com/cuda/cuda-" }, { "id": 200287, "commit_id": "6d2bbf80752549276a968fd4af78231c569d55c5", "repo": "sympy", "path": "sympy/testing/runtests.py", "file_name": "runtests.py", "fun_name": "import_error", "commit_message": "runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy", "code": "def import_error(self, filename, exc_info):\n self._exceptions.append((filename, None, exc_info))\n rel_name = filename[len(self._root_dir) + 1:]\n self.write(rel_name)\n self.write(\"[?] Failed to import\", \"Red\")\n self.write(\" \")\n self.write(\"[FAIL]\", \"Red\", align=\"right\")\n self.write(\"\\n\")\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 24, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 75, "n_ast_nodes": 129, "n_identifiers": 11, "random_cut": "def import_error(self, filename, exc_info):\n self._exceptions.append((filename, None, exc_info))\n rel_name = filena" }, { "id": 71508, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_dashboard.py", "file_name": "test_dashboard.py", "fun_name": "test_panel_query_count", "commit_message": "Reformat with black", "code": "def test_panel_query_count(self):\n # fake a request object with bob as the user\n self.client.user = self.bob\n with self.assertNumQueries(4):\n # Instantiating/getting context of RecentEditsPanel should not generate N+1 queries -\n # i.e. any number less than 6 would be reasonable here\n panel = RecentEditsPanel()\n parent_context = {\"request\": self.client}\n panel.get_context_data(parent_context)\n\n # check that the panel is still actually returning results\n html = panel.render_html(parent_context)\n self.assertIn(\"Ameristralia Day\", html)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 159, "n_words": 63, "vocab_size": 54, "complexity": 1, "nloc": 8, "token_counts": 58, "n_ast_nodes": 104, "n_identifiers": 13, "random_cut": "def test_panel_query_count(self):\n # fake a request object with bob as the user\n self.client.user = self.bob\n with self.assertNumQueries(4):\n # Instantiating/getting context of RecentEditsPanel should not generate N+1 queries -\n # i.e. any number less than 6 would be reasonable here\n panel = RecentEdit" }, { "id": 254482, "commit_id": "11365a1bd4e6aa83a254e62e051d49b4851cd524", "repo": "d2l-en", "path": "d2l/torch.py", "file_name": "torch.py", "fun_name": "objective", "commit_message": "HPO: Add saved functions to d2l lib", "code": "def objective(batch_size, learning_rate, max_epochs=8):\n \n model = d2l.AlexNet(lr=learning_rate)\n trainer = d2l.Trainer(max_epochs=max_epochs, num_gpus=1)\n data = d2l.FashionMNIST(batch_size=batch_size, resize=(224, 224))\n trainer.fit(model=model, data=data)\n validation_error = trainer.validate(model=model)\n return validation_error\n", "url": "https://github.com/d2l-ai/d2l-en.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 23, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 78, "n_ast_nodes": 120, "n_identifiers": 17, "random_cut": "def objective(batch_size, learning_rate, max_epochs=8):\n \n model = d2l.AlexNet(lr=learning_rate)\n trainer = d2l.Trainer(max_epochs=max_epochs, num_gpus=1)\n data = d2l.FashionMNIST(batch_size=batch_size, resize=(224, 224))\n tr" }, { "id": 281985, "commit_id": "683a8bdd83c1b931df111a5b2b8b19350930b73a", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/economy/test_alphavantage_model.py", "file_name": "test_alphavantage_model.py", "fun_name": "test_get_inflation", "commit_message": "Tests : Economy + Conftest (#1260)\n\n* Updating tests : economy\r\n\r\n* Updating tests : removing breaklines\r\n\r\n* Updating tests : economy\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : economy", "code": "def test_get_inflation(recorder):\n result_df = alphavantage_model.get_inflation()\n\n recorder.capture(result_df)\n\n\n@pytest.mark.vcr(record_mode=\"none\")", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 11, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 51, "n_identifiers": 10, "random_cut": "def test_get_inflation(recorder):\n result_df = alphavantage_model.get_inflation()\n\n recorder.capture(" }, { "id": 212272, "commit_id": "b570c028db1ef739f86edeab1affd1f73467aace", "repo": "bokeh", "path": "tests/unit/bokeh/document/test_document.py", "file_name": "test_document.py", "fun_name": "test_get_model_by_changed_to_none_name", "commit_message": "Fix Comparing singleton primitives with equality checking (#12100)\n\n* Fix issue singleton-equality-checking found at https://codereview.doctor\r\n\r\n* Fix awkward C-esque usage\r\n\r\n* Fix awkward C-esque test (pt 2)\r\n\r\n* Fix awkward C-esque test (pt 3)\r\n\r\nCo-authored-by: Bryan Van de Ven ", "code": "def test_get_model_by_changed_to_none_name(self) -> None:\n d = document.Document()\n m = SomeModelInTestDocument(name=\"bar\")\n d.add_root(m)\n assert d.get_model_by_name(\"bar\") == m\n m.name = None\n assert d.get_model_by_name(\"bar\") is None\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 22, "vocab_size": 16, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 89, "n_identifiers": 10, "random_cut": "def test_get_model_by_changed_to_none_name(self) -> None:\n d = document.Document()\n m = SomeModelInTestDocument(name=\"bar\")\n d.add_root(m)\n assert d.get_model_by_name(\"bar\") == m\n m.name" }, { "id": 314550, "commit_id": "992ceb1a09af00e2a5473729e6d02b287cffbba2", "repo": "core", "path": "homeassistant/components/google_assistant/smart_home.py", "file_name": "smart_home.py", "fun_name": "async_devices_sync_response", "commit_message": "Google Assistant diagnostics and synchronization (#73574)\n\n* Add config flow import for local google assistant\r\n* Add diagnostic with sync response\r\n* Add button for device sync", "code": "async def async_devices_sync_response(hass, config, agent_user_id):\n \n entities = async_get_entities(hass, config)\n instance_uuid = await instance_id.async_get(hass)\n devices = []\n\n for entity in entities:\n if not entity.should_expose():\n continue\n\n try:\n devices.append(entity.sync_serialize(agent_user_id, instance_uuid))\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\"Error serializing %s\", entity.entity_id)\n\n return devices\n\n\n@HANDLERS.register(\"action.devices.SYNC\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@HANDLERS.register(\"action.devices.SYNC\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 111, "n_words": 39, "vocab_size": 36, "complexity": 4, "nloc": 12, "token_counts": 75, "n_ast_nodes": 139, "n_identifiers": 20, "random_cut": "async def async_devices_sync_response(hass, config, agent_user_id):\n \n entities = async_get_entities(hass, config)\n instance_uuid = await" }, { "id": 46976, "commit_id": "6933022e94acf139b2dea9a589bb8b25c62a5d20", "repo": "airflow", "path": "tests/providers/google/cloud/hooks/test_automl.py", "file_name": "test_automl.py", "fun_name": "test_delete_dataset", "commit_message": "Fix new MyPy errors in main (#22884)\n\nThose MyPe errors are side effect of some new dependencies.", "code": "def test_delete_dataset(self, mock_delete_dataset):\n self.hook.delete_dataset(dataset_id=DATASET_ID, location=GCP_LOCATION, project_id=GCP_PROJECT_ID)\n\n mock_delete_dataset.assert_called_once_with(\n request=dict(name=DATASET_PATH), retry=DEFAULT, timeout=None, metadata=()\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 43, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 51, "n_ast_nodes": 75, "n_identifiers": 20, "random_cut": "def test_delete_dataset(self, mock_delete_dataset):\n self.hook.delete_dataset(dataset_id=DATAS" }, { "id": 180432, "commit_id": "51c8c34486bfddca5948e46e498de44e21ab6496", "repo": "gradio", "path": "test/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_get", "commit_message": "Async Request Class (#1595)\n\n* Implement Request class and its tests.\r\n\r\n* Add new requirements\r\n\r\n* Reformat codebase.\r\n\r\n* Fix formatting.\r\n\r\n* Add library level requirements.\r\n\r\n* Convert validated_data property to get_validated_data function.\r\n\r\n* Fix the client fixture.\r\n\r\n* Update test/test_utils.py\r\n\r\n* Update test/test_utils.py\r\n\r\n* Fix the client fixture.\r\n\r\n* Add missing initialization for Request._validated_data\r\n\r\n* Fix async test problem with test_tunneling.py\r\n\r\n* Update gradio/utils.py\r\n\r\n* Update gradio/utils.py\r\n\r\n* Fix formatting.\r\n\r\nCo-authored-by: Ömer Faruk Özdemir ", "code": "async def test_get(self):\n client_response: Request = await Request(\n method=Request.Method.GET,\n url=\"http://headers.jsontest.com/\",\n )\n validated_data = client_response.get_validated_data()\n assert client_response.is_valid() is True\n assert validated_data[\"Host\"] == \"headers.jsontest.com\"\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 78, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 8, "token_counts": 47, "n_ast_nodes": 81, "n_identifiers": 11, "random_cut": "async def test_get(self):\n client_response: Request = await Request(\n method=Request.Method.GET,\n url=\"http://headers.jsontest.com/\",\n )\n " }, { "id": 161490, "commit_id": "151276b2cff08bf5f7a3adea55d2683c083b13a0", "repo": "rich", "path": "tests/test_win32_console.py", "file_name": "test_win32_console.py", "fun_name": "test_move_cursor_backward", "commit_message": "Run legacy Windows tests on Windows only", "code": "def test_move_cursor_backward(_, SetConsoleCursorPosition, win32_handle):\n term = LegacyWindowsTerm()\n term.move_cursor_backward()\n SetConsoleCursorPosition.assert_called_once_with(\n win32_handle, coords=WindowsCoordinates(row=CURSOR_Y, col=CURSOR_X - 1)\n )\n\n", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 53, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 40, "n_ast_nodes": 62, "n_identifiers": 14, "random_cut": "def test_move_cursor_backward(_, SetConsoleCursorPosition, win32_handle):\n term " }, { "id": 197996, "commit_id": "6022fe81485d1f32309e52eb06365570f9aa2c42", "repo": "sympy", "path": "sympy/geometry/tests/test_util.py", "file_name": "test_util.py", "fun_name": "test_idiff", "commit_message": "do not use solve in idiff", "code": "def test_idiff():\n x = Symbol('x', real=True)\n y = Symbol('y', real=True)\n t = Symbol('t', real=True)\n f = Function('f')\n g = Function('g')\n # the use of idiff in ellipse also provides coverage\n circ = x**2 + y**2 - 4\n ans = -3*x*(x**2 + y**2)/y**5\n assert ans == idiff(circ, y, x, 3)\n assert ans == idiff(circ, [y], x, 3)\n assert idiff(circ, y, x, 3) == ans\n explicit = 12*x/sqrt(-x**2 + 4)**5\n assert ans.subs(y, solve(circ, y)[0]).equals(explicit)\n assert True in [sol.diff(x, 3).equals(explicit) for sol in solve(circ, y)]\n assert idiff(x + t + y, [y, t], x) == -Derivative(t, x) - 1\n assert idiff(f(x) * exp(f(x)) - x * exp(x), f(x), x) == (x + 1) * exp(x - f(x))/(f(x) + 1)\n assert idiff(f(x) - y * exp(x), [f(x), y], x) == (y + Derivative(y, x)) * exp(x)\n assert idiff(f(x) - y * exp(x), [y, f(x)], x) == -y + exp(-x) * Derivative(f(x), x)\n assert idiff(f(x) - g(x), [f(x), g(x)], x) == Derivative(g(x), x)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 215, "n_words": 158, "vocab_size": 80, "complexity": 2, "nloc": 19, "token_counts": 390, "n_ast_nodes": 592, "n_identifiers": 21, "random_cut": "def test_idiff():\n x = Symbol('x', real=True)\n y = Symbol('y', real=True)\n t = Symbol('t', real=True)\n f = Function('f')\n g = Function('g')\n # the use of idiff in ellipse also provides coverage\n circ = x**2 + y**2 - 4\n ans = -3*x*(x**2 + y**2)/y**5\n assert ans == idiff(circ, y, x, 3)\n assert ans == idiff(circ, [y], x, 3)\n assert idiff(circ, y, x, 3) == ans\n explicit = 12*x/sqrt(-x**2 + 4)**5\n assert ans.subs(y, solve(circ, y)[0]).equals(explicit)\n assert True in [sol.diff(x, 3).equals(explicit) for sol in solve(circ, y)]\n assert idiff(x + t + y, [y, t], x) == -Derivative(t, x) - 1\n assert idiff(f(x) * exp(f(x)) - x * exp(x), f(x), x) == (x + 1) * exp(x - f(x))/(f(x) + 1)\n assert idiff(f(x) - y * exp(x), [f(x), y], x) == (y + Derivative(y, x)) * exp(x)\n assert idiff(f(x) - y * exp(x), [y, f(x)], x) == -" }, { "id": 163255, "commit_id": "ccb25ab1d24c4fb9691270706a59c8d319750870", "repo": "pandas", "path": "pandas/tests/indexing/test_coercion.py", "file_name": "test_coercion.py", "fun_name": "test_fillna_float64", "commit_message": "Revert \"ENH: Index[complex] (#45256)\" (#45279)\n\nThis reverts commit b77797cde57969b3ee14d961f4dacd5cab25cb61.", "code": "def test_fillna_float64(self, index_or_series, fill_val, fill_dtype):\n klass = index_or_series\n obj = klass([1.1, np.nan, 3.3, 4.4])\n assert obj.dtype == np.float64\n\n exp = klass([1.1, fill_val, 3.3, 4.4])\n # float + complex -> we don't support a complex Index\n # complex for Series,\n # object for Index\n if fill_dtype == np.complex128 and klass == pd.Index:\n fill_dtype = object\n self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 131, "n_words": 58, "vocab_size": 39, "complexity": 3, "nloc": 8, "token_counts": 92, "n_ast_nodes": 117, "n_identifiers": 17, "random_cut": "def test_fillna_float64(self, index_or_series, fill_val, fill_dtype):\n klass = index_or_series\n obj = klass([1.1, np.nan, 3.3, 4.4])\n assert obj.dtype == np.float64\n\n exp = klass([1.1, fill_val, 3.3, 4.4])\n # float + complex -> we don't support a complex Index\n # complex for Series,\n # object for Index\n if fill_dtype == np.complex128 and kla" }, { "id": 155180, "commit_id": "193505fdf0c984743397ba3df56262f30aee13a8", "repo": "modin", "path": "modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py", "file_name": "partition.py", "fun_name": "mask", "commit_message": "FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)\n\nSigned-off-by: Igoshev, Iaroslav ", "code": "def mask(self, row_labels, col_labels):\n \n logger = get_logger()\n logger.debug(f\"ENTER::Partition.mask::{self._identity}\")\n new_obj = super().mask(row_labels, col_labels)\n if isinstance(row_labels, slice) and unidist.is_object_ref(self._length_cache):\n if row_labels == slice(None):\n # fast path - full axis take\n new_obj._length_cache = self._length_cache\n else:\n new_obj._length_cache = compute_sliced_len.remote(\n row_labels, self._length_cache\n )\n if isinstance(col_labels, slice) and unidist.is_object_ref(self._width_cache):\n if col_labels == slice(None):\n # fast path - full axis take\n new_obj._width_cache = self._width_cache\n else:\n new_obj._width_cache = compute_sliced_len.remote(\n col_labels, self._width_cache\n )\n logger.debug(f\"EXIT::Partition.mask::{self._identity}\")\n return new_obj\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 325, "n_words": 67, "vocab_size": 39, "complexity": 7, "nloc": 20, "token_counts": 139, "n_ast_nodes": 238, "n_identifiers": 18, "random_cut": "def mask(self, row_labels, col_labels):\n \n logger = get_logger()\n logger.debug(f\"ENTER::Partition.mask::{self._identity}\")\n new_obj = super().mask(row_labels, col_labels)\n if isinstance(row_labels, slice) and unidist.is_object_ref(self._length_cache):\n if row_labels == slice(None):\n # fast path - full axis take\n new_obj._length_cache = self._length_cache\n else:\n new_obj._length_cache = compute_sliced_len.remote(\n row_labels, self._length_cache\n )\n " }, { "id": 4923, "commit_id": "1642630461c25e447ede67bb42ba6ea6ec700e52", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-intercom/unit_tests/unit_test.py", "file_name": "unit_test.py", "fun_name": "conversation_parts_responses", "commit_message": "🐛 Source Intercom: Fixed filtering of conversation_parts (#12374)", "code": "def conversation_parts_responses():\n return [\n (\n \"https://api.intercom.io/conversations\", \n build_conversations_response_body(\n conversations=[\n {\"id\":\"151272900026677\",\"updated_at\":1650988600},\n {\"id\":\"151272900026666\",\"updated_at\":1650988500}\n ],\n next_url=\"https://api.intercom.io/conversations?per_page=2&page=2\"\n )\n ),\n (\n \"https://api.intercom.io/conversations?per_page=2&page=2\",\n build_conversations_response_body(\n conversations=[\n {\"id\":\"151272900026466\",\"updated_at\":1650988450},\n {\"id\":\"151272900026680\",\"updated_at\":1650988100}, # Older than state, won't be processed\n ]\n )\n ),\n (\n \"https://api.intercom.io/conversations/151272900026677\",\n build_conversation_response_body(\n conversation_id=\"151272900026677\",\n conversation_parts=[\n {\"id\": \"13740311961\",\"updated_at\":1650988300},\n {\"id\": \"13740311962\",\"updated_at\":1650988450}\n ]\n )\n ),\n (\n \"https://api.intercom.io/conversations/151272900026666\",\n build_conversation_response_body(\n conversation_id=\"151272900026666\",\n conversation_parts=[\n {\"id\": \"13740311955\",\"updated_at\":1650988150},\n {\"id\": \"13740312056\",\"updated_at\":1650988500}\n ]\n )\n ),\n (\n \"https://api.intercom.io/conversations/151272900026466\",\n build_conversation_response_body(\n conversation_id=\"151272900026466\",\n conversation_parts=[{\"id\": \"13740311970\",\"updated_at\":1650988600}]\n )\n )\n ]\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 651, "n_words": 63, "vocab_size": 40, "complexity": 1, "nloc": 49, "token_counts": 168, "n_ast_nodes": 303, "n_identifiers": 7, "random_cut": "def conversation_parts_responses():\n return [\n (\n \"https://api.intercom.io/conversations\", \n build_conversations_response_body(\n conversations=[\n {\"id\":\"151272900026677\",\"updated_at\":1650988600},\n {\"id\":\"151272900026666\",\"updated_at\":1650988500}\n ],\n next_url=\"https://api.intercom.io/conversations?per_page=2&page=2\"\n )\n ),\n (\n " }, { "id": 32751, "commit_id": "672b66262aa4e65863f8aa94743fdd3c2a27a10b", "repo": "transformers", "path": "src/transformers/utils/fx.py", "file_name": "fx.py", "fun_name": "torch_tensor_baddbmm", "commit_message": "Add FX support for torch.baddbmm andd torch.Tensor.baddbmm (#18363)", "code": "def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None):\n return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 2, "token_counts": 44, "n_ast_nodes": 58, "n_identifiers": 8, "random_cut": "def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None):\n return " }, { "id": 321360, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "tests/unit/keyinput/test_basekeyparser.py", "file_name": "test_basekeyparser.py", "fun_name": "test_count_42_invalid", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def test_count_42_invalid(self, handle_text, prompt_keyparser):\n # Invalid call with ccx gets ignored\n handle_text(prompt_keyparser,\n Qt.Key.Key_4, Qt.Key.Key_2, Qt.Key.Key_C, Qt.Key.Key_C, Qt.Key.Key_X)\n assert not prompt_keyparser.execute.called\n assert not prompt_keyparser._sequence\n # Valid call with ccc gets the correct count\n handle_text(prompt_keyparser,\n Qt.Key.Key_2, Qt.Key.Key_3, Qt.Key.Key_C, Qt.Key.Key_C, Qt.Key.Key_C)\n prompt_keyparser.execute.assert_called_once_with(\n 'message-info ccc', 23)\n assert not prompt_keyparser._sequence\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 149, "n_words": 45, "vocab_size": 31, "complexity": 1, "nloc": 10, "token_counts": 104, "n_ast_nodes": 153, "n_identifiers": 15, "random_cut": "def test_count_42_invalid(self, handle_text, prompt_keyparser):\n # Invalid call with ccx gets ignored\n handle_text(prompt_keyparser,\n Qt.Key.Key_4, Qt.Key.Key_2, Qt.K" }, { "id": 12282, "commit_id": "c1e03950670e338c40754b80ce2b6f2128858867", "repo": "jina", "path": "tests/integration/monitoring/test_monitoring.py", "file_name": "test_monitoring.py", "fun_name": "test_enable_monitoring_deployment", "commit_message": "feat: add a decoratator for monitoring method (#4737)", "code": "def test_enable_monitoring_deployment(port_generator, executor):\n port1 = port_generator()\n port2 = port_generator()\n\n with Flow().add(uses=executor, port_monitoring=port1, monitoring=True).add(\n uses=executor, port_monitoring=port2, monitoring=True\n ) as f:\n for port in [port1, port2]:\n resp = req.get(f'http://localhost:{port}/')\n assert resp.status_code == 200\n\n for meth in ['bar', 'foo']:\n f.post(f'/{meth}', inputs=DocumentArray())\n resp = req.get(f'http://localhost:{port2}/')\n assert (\n f'process_request_seconds_created{{endpoint=\"/{meth}\",executor=\"DummyExecutor\"}}'\n in str(resp.content)\n )\n\n\n@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 178, "n_words": 51, "vocab_size": 41, "complexity": 3, "nloc": 16, "token_counts": 121, "n_ast_nodes": 243, "n_identifiers": 25, "random_cut": "def test_enable_monitoring_deployment(port_generator, executor):\n port1 = port_generator()\n port2 = port_generator()\n\n with Flow().add(uses=executor, port_monitoring=port1, monitoring=True).add(\n uses=executor, port_monitoring=port2, monitoring=True\n ) as f:\n for port in [port1, port2]:\n resp = req.get(f'http://localhost:{port}/')\n assert resp.status_code == 200\n\n for meth in ['bar', 'foo']:\n f.post(f'/{meth}', inputs=DocumentArray())\n " }, { "id": 249114, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_media.py", "file_name": "test_media.py", "fun_name": "test_delete_media_never_accessed", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_delete_media_never_accessed(self) -> None:\n \n\n # upload and do not access\n server_and_media_id = self._create_media()\n self.pump(1.0)\n\n # test that the file exists\n media_id = server_and_media_id.split(\"/\")[1]\n local_path = self.filepaths.local_media_filepath(media_id)\n self.assertTrue(os.path.exists(local_path))\n\n # timestamp after upload/create\n now_ms = self.clock.time_msec()\n channel = self.make_request(\n \"POST\",\n self.url + \"?before_ts=\" + str(now_ms),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(1, channel.json_body[\"total\"])\n self.assertEqual(\n media_id,\n channel.json_body[\"deleted_media\"][0],\n )\n\n self._access_media(server_and_media_id, False)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 230, "n_words": 56, "vocab_size": 48, "complexity": 1, "nloc": 23, "token_counts": 146, "n_ast_nodes": 236, "n_identifiers": 28, "random_cut": "def test_delete_media_never_accessed(self) -> None:\n \n\n # upload and do not access\n server_and_media_id = self._create_media()\n self.pump(1.0)\n\n # test that the file exists\n media_id = server_and_media_id.split(\"/\")[1]\n local_path = self.filepaths.local_media_filepath(media_id)\n self.assertTrue(os.path.exists(local_path))\n\n # timestamp after upload/create\n now_ms = self.clock.time_msec()\n channel = self.make_request(\n \"POST\",\n self.url + \"?before_ts=\" + str(now_ms),\n access_token=self.admin_user_tok,\n )" }, { "id": 199956, "commit_id": "f68e8de4252200cfc74b9433d00f77c4510ac68d", "repo": "sympy", "path": "sympy/core/facts.py", "file_name": "facts.py", "fun_name": "_beta_rules_lines", "commit_message": "refactor", "code": "def _beta_rules_lines(self):\n reverse_implications = defaultdict(list)\n for n, (pre, implied) in enumerate(self.beta_rules):\n reverse_implications[implied].append((pre, n))\n\n yield '# Note: the order of the beta rules is used in the beta_triggers'\n yield 'beta_rules = ['\n yield ''\n m = 0\n indices = {}\n for implied in sorted(reverse_implications):\n fact, value = implied\n yield f' # Rules implying {fact} = {value}'\n for pre, n in reverse_implications[implied]:\n indices[n] = m\n m += 1 \n setstr = \", \".join(map(str, sorted(pre)))\n yield f' ({{{setstr}}},'\n yield f' {implied!r}),'\n yield ''\n yield '] # beta_rules'\n \n yield 'beta_triggers = {'\n for query in sorted(self.beta_triggers):\n fact, value = query\n triggers = [indices[n] for n in self.beta_triggers[query]]\n yield f' {query!r}: {triggers!r},'\n yield '} # beta_triggers'\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 392, "n_words": 110, "vocab_size": 65, "complexity": 6, "nloc": 26, "token_counts": 153, "n_ast_nodes": 290, "n_identifiers": 23, "random_cut": "def _beta_rules_lines(self):\n reverse_implications = defaultdict(list)\n for n, (pre, implied) in enumerate(self.beta_rules):\n reverse_implications[implied].append((pre, n))\n\n yield '# Note: the order of the beta rules is used in the beta_t" }, { "id": 6708, "commit_id": "19a757d7812784e9fd1863278c7bc4fdd397dcdb", "repo": "ludwig", "path": "ludwig/features/audio_feature.py", "file_name": "audio_feature.py", "fun_name": "get_feature_meta", "commit_message": "Torchaudio Refactor (#1950)\n\n* Switched soundfile read for torch audio\r\n\r\n* Removed unused import\r\n\r\n* Working on test_experiment_audio_inputs\r\n\r\n* Fixed shape issue by indexing into tensor before preprocessing to padded matrix\r\n\r\n* Switched type setting to torch.float32", "code": "def get_feature_meta(column, preprocessing_parameters, backend):\n audio_feature_dict = preprocessing_parameters[\"audio_feature\"]\n first_audio_file_path = column.head(1)[0]\n _, sampling_rate_in_hz = torchaudio.load(first_audio_file_path)\n\n feature_dim = AudioFeatureMixin._get_feature_dim(audio_feature_dict, sampling_rate_in_hz)\n audio_file_length_limit_in_s = preprocessing_parameters[\"audio_file_length_limit_in_s\"]\n max_length = AudioFeatureMixin._get_max_length_feature(\n audio_feature_dict, sampling_rate_in_hz, audio_file_length_limit_in_s\n )\n return {\n \"feature_dim\": feature_dim,\n \"sampling_rate_in_hz\": sampling_rate_in_hz,\n \"max_length\": max_length,\n \"reshape\": (max_length, feature_dim),\n }\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 157, "n_words": 40, "vocab_size": 33, "complexity": 1, "nloc": 15, "token_counts": 87, "n_ast_nodes": 138, "n_identifiers": 17, "random_cut": "def get_feature_meta(column, preprocessing_parameters, backend):\n audio_feature_dict = preprocessing_parameters[\"audio_feature\"]\n first_audio_file_path = column.head(1)[0]\n _, sampling_rate_in_hz = torchaudio.load(first_audio_file_path)\n\n feature_dim = AudioFeatureMixin._get_feature_dim(audio_feature_dict, sampling_rate_in_hz)\n audio_file_length_limit_in_s = preprocessing_parameters[\"audio_file_length_limit_in_s\"]\n max_length = AudioFeatureMixin._get_max_length_feature(\n audio_feature_dict, sampling_rate_in_hz, audio_file_length_limit_in_s\n )\n return " }, { "id": 29637, "commit_id": "74d1c8d8504dbdd339865ff97ca4ac9bd30a8faf", "repo": "saleor", "path": "saleor/graphql/product/mutations/product/product_media_update.py", "file_name": "product_media_update.py", "fun_name": "perform_mutation", "commit_message": "Split product types and mutations (#11259)\n\n* Split product types file\r\n\r\n* Split product/mutations/products.py file", "code": "def perform_mutation(cls, _root, info, **data):\n media = cls.get_node_or_error(info, data.get(\"id\"), only_type=ProductMedia)\n product = models.Product.objects.prefetched_for_webhook().get(\n pk=media.product_id\n )\n alt = data.get(\"input\").get(\"alt\")\n if alt is not None:\n media.alt = alt\n media.save(update_fields=[\"alt\"])\n manager = load_plugin_manager(info.context)\n cls.call_event(manager.product_updated, product)\n product = ChannelContext(node=product, channel_slug=None)\n return ProductMediaUpdate(product=product, media=media)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 134, "n_words": 39, "vocab_size": 31, "complexity": 2, "nloc": 13, "token_counts": 126, "n_ast_nodes": 201, "n_identifiers": 29, "random_cut": "def perform_mutation(cls, _root, info, **data):\n media = cls.get_node_or_error(info, data.get(\"id\"), only_type=ProductMedia)\n product = models.Product.objects.prefetched_for_webhook(" }, { "id": 196660, "commit_id": "ba64dcaa3a3f0ac75424163682738660c08b098c", "repo": "sympy", "path": "sympy/tensor/array/expressions/conv_array_to_indexed.py", "file_name": "conv_array_to_indexed.py", "fun_name": "do_convert", "commit_message": "Convert Reshape to indexed: multiindex issue", "code": "def do_convert(self, expr, indices):\n if isinstance(expr, ArrayTensorProduct):\n cumul = list(accumulate([0] + [get_rank(arg) for arg in expr.args]))\n indices_grp = [indices[cumul[i]:cumul[i+1]] for i in range(len(expr.args))]\n return Mul.fromiter(self.do_convert(arg, ind) for arg, ind in zip(expr.args, indices_grp))\n if isinstance(expr, ArrayContraction):\n new_indices = [None for i in range(get_rank(expr.expr))]\n limits = []\n bottom_shape = get_shape(expr.expr)\n for contraction_index_grp in expr.contraction_indices:\n d = Dummy(f\"d{self.count_dummies}\")\n self.count_dummies += 1\n dim = bottom_shape[contraction_index_grp[0]]\n limits.append((d, 0, dim-1))\n for i in contraction_index_grp:\n new_indices[i] = d\n j = 0\n for i in range(len(new_indices)):\n if new_indices[i] is None:\n new_indices[i] = indices[j]\n j += 1\n newexpr = self.do_convert(expr.expr, new_indices)\n return Sum(newexpr, *limits)\n if isinstance(expr, ArrayDiagonal):\n new_indices = [None for i in range(get_rank(expr.expr))]\n ind_pos = expr._push_indices_down(expr.diagonal_indices, list(range(len(indices))), get_rank(expr))\n for i, index in zip(ind_pos, indices):\n if isinstance(i, collections.abc.Iterable):\n for j in i:\n new_indices[j] = index\n else:\n new_indices[i] = index\n newexpr = self.do_convert(expr.expr, new_indices)\n return newexpr\n if isinstance(expr, PermuteDims):\n permuted_indices = _apply_permutation_to_list(expr.permutation, indices)\n return self.do_convert(expr.expr, permuted_indices)\n if isinstance(expr, ArrayAdd):\n return Add.fromiter(self.do_convert(arg, indices) for arg in expr.args)\n if isinstance(expr, _ArrayExpr):\n return expr.__getitem__(tuple(indices))\n if isinstance(expr, ArrayElementwiseApplyFunc):\n return expr.function(self.do_convert(expr.expr, indices))\n if isinstance(expr, Reshape):\n shape_up = expr.shape\n shape_down = get_shape(expr.expr)\n cumul = list(accumulate([1] + list(reversed(shape_up)), operator.mul))\n one_index = Add.fromiter(i*s for i, s in zip(reversed(indices), cumul))\n dest_indices = [None for _ in shape_down]\n c = 1\n for i, e in enumerate(reversed(shape_down)):\n if c == 1:\n if i == len(shape_down) - 1:\n dest_indices[i] = one_index\n else:\n dest_indices[i] = one_index % e\n elif i == len(shape_down) - 1:\n dest_indices[i] = one_index // c\n else:\n dest_indices[i] = one_index // c % e\n c *= e\n dest_indices.reverse()\n return self.do_convert(expr.expr, dest_indices)\n return _get_array_element_or_slice(expr, indices)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1066, "n_words": 254, "vocab_size": 116, "complexity": 28, "nloc": 64, "token_counts": 626, "n_ast_nodes": 961, "n_identifiers": 70, "random_cut": "def do_convert(self, expr, indices):\n if isinstance(expr, ArrayTensorProduct):\n cumul = list(accumulate([0] + [get_rank(arg) for arg in expr.args]))\n indices_grp = [indices[cumul[i]:cumul[i+1]] for i in range(len(expr.args))]\n return Mul.fromiter(self.do_convert(arg, ind) for arg, ind in zip(expr.args, indices_grp))\n if isinstance(expr, ArrayContraction):\n new_indices = [None for i in range(get_rank(expr.expr))]\n limits = []\n bottom_shape = get_shape(expr.expr)\n for contraction_index_grp in expr.contraction_indices" }, { "id": 215568, "commit_id": "34ac3ae2d4e4a0aa2db574c8584091ce702841ed", "repo": "salt", "path": "tests/pytests/unit/test_minion.py", "file_name": "test_minion.py", "fun_name": "test_send_req_tries", "commit_message": "fix unit tests", "code": "def test_send_req_tries(req_channel):\n channel_enter = MagicMock()\n channel_enter.send.side_effect = req_channel[1]\n channel = MagicMock()\n channel.__enter__.return_value = channel_enter\n\n with patch(req_channel[0], return_value=channel):\n opts = salt.config.DEFAULT_MINION_OPTS.copy()\n opts[\"random_startup_delay\"] = 0\n opts[\"return_retry_tries\"] = 30\n opts[\"grains\"] = {}\n with patch(\"salt.loader.grains\"):\n minion = salt.minion.Minion(opts)\n\n load = {\"load\": \"value\"}\n timeout = 60\n\n if \"Async\" in req_channel[0]:\n rtn = minion._send_req_async(load, timeout).result()\n else:\n rtn = minion._send_req_sync(load, timeout)\n\n assert rtn == 30\n\n\n@patch(\"salt.channel.client.ReqChannel.factory\")", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "@patch(\"salt.channel.client.ReqChannel.factory\")", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 203, "n_words": 59, "vocab_size": 41, "complexity": 2, "nloc": 19, "token_counts": 139, "n_ast_nodes": 249, "n_identifiers": 23, "random_cut": "def test_send_req_tries(req_channel):\n channel_enter = MagicMock()\n channel_enter.send.side_effect = req_channel[1]\n channel = MagicMock()\n channel.__enter__.return_value = channel_enter\n\n with patch(req_channel[0], return_value=channel):\n opts = salt.config.DEFAULT_MINION_OPTS.copy()\n opts[\"random_startup_delay\"] = 0\n opts[\"return_retry_tries\"] = 30\n opts[\"grains\"] = {}\n with patch(\"salt.loader.grains\"):\n minion = salt.minion.Minion(opts)\n\n load = {\"load\": \"value\"}\n timeout = 60\n\n " }, { "id": 188529, "commit_id": "31d219524bfc059815b906dad750b388cdcae78c", "repo": "jumpserver", "path": "apps/users/serializers/user.py", "file_name": "user.py", "fun_name": "save_and_set_custom_m2m_fields", "commit_message": "fix: 修复创建用户roles必填问题 (#7745)\n\nCo-authored-by: Jiangjie.Bai ", "code": "def save_and_set_custom_m2m_fields(self, validated_data, save_handler):\n m2m_values = {\n f: validated_data.pop(f, None) for f in self.custom_m2m_fields\n }\n instance = save_handler(validated_data)\n for field_name, value in m2m_values.items():\n if value is None:\n continue\n field = getattr(instance, field_name)\n field.set(value)\n return instance\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 128, "n_words": 35, "vocab_size": 29, "complexity": 4, "nloc": 11, "token_counts": 68, "n_ast_nodes": 104, "n_identifiers": 15, "random_cut": "def save_and_set_custom_m2m_fields(self, validated_data, save_handler):\n m2m_values = {\n f: validated_data.pop(f, None) for f in self.custom_m2m_fields\n }\n instance = save_handler(validated_data)\n for field_name, value in m2m_values.items():\n if value is None:\n continue\n field = getattr(instance, field_name)\n field.set(value" }, { "id": 31044, "commit_id": "119e3c0fc83db5803d20d0749eef1220f27cfdc8", "repo": "transformers", "path": "tests/models/mctct/test_feature_extraction_mctct.py", "file_name": "test_feature_extraction_mctct.py", "fun_name": "test_call", "commit_message": "M-CTC-T Model (#16402)\n\n* added cbs to notebooks, made copy-paste error fix in generation_utils\r\n\r\n* initial push for mctc model\r\n\r\n* mctc feature extractor done\r\n\r\n* added processor, tokenizer and their tests for MCTC. Have added an MCTC modeling test, adjusting model code accordingly.\r\n\r\n* added processor, tokenizer and their tests for MCTC. Have added an MCTC modeling test, adjusting model code accordingly.\r\n\r\n* passing attention, now struggling to figure out how attention masks make sense here\r\n\r\n* works when excluding attention masks. ask later how one would integrate attention maskshere\r\n\r\n* bizarre configuration error (model prefix comes first in config dict json and messes up the order)\r\n\r\n* all passing but bizzarre config dict ordering issue when to_dict\r\n\r\n* passing all major tests\r\n\r\n* feature extraction, processor, tokenizer added & tests passing\r\n\r\n* style & consistency & other logistical fixes\r\n\r\n* copy paste fix\r\n\r\n* model after feature extraction working\r\n\r\n* commiting final feature extraction results; need to fix normalization\r\n\r\n* feature extraction passing tests; probably should add tests on the specific flashlight-copied functions?\r\n\r\n* delete print ; format code a bit\r\n\r\n* fixing tests\r\n\r\n* passing major tests\r\n\r\n* fixing styles\r\n\r\n* completed tokenization test with real example; not sure if these values are entirely correct.\r\n\r\n* last test fixes from local\r\n\r\n* reverting accidentally included custom setup configs\r\n\r\n* remove load tf weights; fix config error\r\n\r\n* testing couldnt import featureextractor\r\n\r\n* fix docs\r\n\r\n* fix docs\r\n\r\n* resolving comments\r\n\r\n* style fixes\r\n\r\n* style fixes\r\n\r\n* Update to MCTCConv1dSubSampler\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* relposemb fixes\r\n\r\n* conv1d name issue; expecting config fail with paraentheses\r\n\r\n* fix config issue\r\n\r\n* fix config issue\r\n\r\n* fix config issue\r\n\r\n* change everything to MCTCT\r\n\r\n* fixing naming change errors\r\n\r\n* archive list\r\n\r\n* copyrights and docs\r\n\r\n* copyrights and docs\r\n\r\n* copyrights and docs\r\n\r\n* merge resolution\r\n\r\n* move tests, fix to changed optionaldependency structure\r\n\r\n* test directories changed\r\n\r\n* fixing tests\r\n\r\n* how to avoid tf tests?\r\n\r\n* how to avoid tf tests?\r\n\r\n* tests passing locally\r\n\r\n* allow mctctprocessor imported any env\r\n\r\n* allow mctctprocessor imported any env\r\n\r\n* fixed second round of feedback, need to fix docs\r\n\r\n* doc changes not being applied\r\n\r\n* all fixed\r\n\r\n* style fix\r\n\r\n* feedback fixes\r\n\r\n* fix copies and feature extraction style fix\r\n\r\n* Update tests/models/visual_bert/test_modeling_visual_bert.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* copy paste huggingface:main visual bert\r\n\r\n* added eof newline to visual bert; all tests are passing otherwise\r\n\r\n* fix slow tests by adding attention mask\r\n\r\n* change model id to speechbrain\r\n\r\n* make fix-copies\r\n\r\n* fix readme unwanted deletes\r\n\r\n* fixing readmes, make fix-copies\r\n\r\n* consistent M-CTC-T naming\r\n\r\n* Update src/transformers/models/mctct/__init__.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* all fixed but variable naming\r\n\r\n* adjust double quotes\r\n\r\n* fixed variable names\r\n\r\n* copyright and mr quilter\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* correct slow tests\r\n\r\n* make fix-copies\r\n\r\n* Update src/transformers/models/mctct/configuration_mctct.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mctct/configuration_mctct.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* m-ctc-t not mctct\r\n\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def test_call(self):\n # Tests that all call wrap to encode_plus and batch_encode_plus\n feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())\n # create three inputs of length 800, 1000, and 12000\n speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)]\n np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]\n\n # Test feature size\n input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors=\"np\").input_features\n self.assertTrue(input_features.ndim == 3)\n self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)\n\n # Test not batched input\n encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors=\"np\").input_features\n encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors=\"np\").input_features\n self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))\n\n # Test batched\n encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors=\"np\").input_features\n encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors=\"np\").input_features\n for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):\n self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 222, "n_words": 93, "vocab_size": 65, "complexity": 4, "nloc": 14, "token_counts": 206, "n_ast_nodes": 317, "n_identifiers": 28, "random_cut": "def test_call(self):\n # Tests that all call wrap to encode_plus and batch_encode_plus\n feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())\n # create three inputs of length 800, 1000, and 12000\n speech_inputs = [floats_list((1, x))[0] for x in range(8000, 14000, 2000)]\n np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]\n\n # Test feature size\n input_features = feature_extractor(np_speech_inputs, padding=True, return_tensors=\"np\").input_features\n self.assertTrue(input_features.ndim == 3)\n self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size)\n\n # Test not batched input\n encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors=\"np\").input_features\n enc" }, { "id": 136752, "commit_id": "acff8b6fa615bfe6463fd76660b14ef2bbafda42", "repo": "ray", "path": "python/ray/tests/test_state_api_log.py", "file_name": "test_state_api_log.py", "fun_name": "test_logs_manager_resolve_file", "commit_message": "[core][observability] Refactor ray log API (#30422)\n\nThis PR changes a few ray log semantics based on this: https://docs.google.com/document/d/1mwLz589IZ4LlPh218dDTMskec9hp3r40hw7ROYq3eVo/edit\r\n\r\nChange ray logs with various ids to ray logs \r\ni.e. ray logs worker --pid=x and ray logs actor --id=x and ray logs cluster \r\nAdded suffix options for querying logs through pid/actor id to differentiate .out and .err files.\r\nAlias ray logs ... to ray logs cluster ... so that\r\nray logs : print help\r\nray logs cluster: show all logs on head node\r\nray logs same as ray logs cluster : list/get files by filename.", "code": "async def test_logs_manager_resolve_file(logs_manager):\n node_id = NodeID(b\"1\" * 28)\n \n logs_client = logs_manager.data_source_client\n logs_client.get_all_registered_agent_ids = MagicMock()\n logs_client.get_all_registered_agent_ids.return_value = [node_id.hex()]\n expected_filename = \"filename\"\n log_file_name, n = await logs_manager.resolve_filename(\n node_id=node_id,\n log_filename=expected_filename,\n actor_id=None,\n task_id=None,\n pid=None,\n get_actor_fn=lambda _: True,\n timeout=10,\n )\n assert log_file_name == expected_filename\n assert n == node_id\n \n # Actor doesn't exist.\n with pytest.raises(ValueError):\n actor_id = ActorID(b\"2\" * 16)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 150, "n_words": 54, "vocab_size": 42, "complexity": 1, "nloc": 172, "token_counts": 874, "n_ast_nodes": 181, "n_identifiers": 25, "random_cut": "async def test_logs_manager_resolve_file(logs_manager):\n node_id = NodeID(b\"1\" * 28)\n \n logs_client = logs_manager.data_source_client\n logs_client.get_all_registered_agent_ids = MagicMock()\n logs_client.get_all_registered_agent_ids.return_value = [node_id.hex()]\n expected_filename = \"filename\"\n log_file_name, n = await logs_manager.resolve_filename(\n node_id=node_id,\n log_filename=expected_filename,\n actor_id=None,\n task_id=None,\n pid=None,\n get_actor_fn=lambda _: True,\n timeout=10,\n )\n assert log_file_name == expected_filename\n assert n == node_id\n \n # Actor doesn't exist.\n wit" }, { "id": 171920, "commit_id": "8d1be809bdaf84e25ff4b66524197a20e54006bd", "repo": "pandas", "path": "pandas/tests/series/methods/test_infer_objects.py", "file_name": "test_infer_objects.py", "fun_name": "test_infer_objects_interval", "commit_message": "BUG: infer_objects with Intervals (#50090)\n\n* BUG: infer_objects with Intervals\r\n\r\n* GH ref\r\n\r\n* fix ArrayManager case", "code": "def test_infer_objects_interval(self, index_or_series):\n # GH#50090\n ii = interval_range(1, 10)\n obj = index_or_series(ii)\n\n result = obj.astype(object).infer_objects()\n tm.assert_equal(result, obj)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 41, "n_ast_nodes": 66, "n_identifiers": 12, "random_cut": "def test_infer_objects_interval(self, index_or_series):\n # GH#50090\n ii = interval_range(1, 10)\n obj = index_or_series(ii)\n\n result = obj.astype(object).infer_objects()\n tm.assert_equal(result, obj)\n" }, { "id": 154556, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_join_by_index", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def _join_by_index(self, other_modin_frames, how, sort, ignore_index):\n \n if how == \"outer\":\n raise NotImplementedError(\"outer join is not supported in HDK engine\")\n\n lhs = self._maybe_materialize_rowid()\n reset_index_names = False\n for rhs in other_modin_frames:\n rhs = rhs._maybe_materialize_rowid()\n if len(lhs._index_cols) != len(rhs._index_cols):\n raise NotImplementedError(\n \"join by indexes with different sizes is not supported\"\n )\n\n reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols\n\n condition = lhs._build_equi_join_condition(\n rhs, lhs._index_cols, rhs._index_cols\n )\n\n exprs = lhs._index_exprs()\n new_columns = lhs.columns.to_list()\n for col in lhs.columns:\n exprs[col] = lhs.ref(col)\n for col in rhs.columns:\n # Handle duplicating column names here. When user specifies\n # suffixes to make a join, actual renaming is done in front-end.\n new_col_name = col\n rename_idx = 0\n while new_col_name in exprs:\n new_col_name = f\"{col}{rename_idx}\"\n rename_idx += 1\n exprs[new_col_name] = rhs.ref(col)\n new_columns.append(new_col_name)\n\n op = JoinNode(\n lhs,\n rhs,\n how=how,\n exprs=exprs,\n condition=condition,\n )\n\n new_columns = Index.__new__(\n Index, data=new_columns, dtype=self.columns.dtype\n )\n lhs = lhs.__constructor__(\n dtypes=lhs._dtypes_for_exprs(exprs),\n columns=new_columns,\n index_cols=lhs._index_cols,\n op=op,\n force_execution_mode=self._force_execution_mode,\n )\n\n if sort:\n lhs = lhs.sort_rows(\n lhs._index_cols,\n ascending=True,\n ignore_index=False,\n na_position=\"last\",\n )\n\n if reset_index_names:\n lhs = lhs._reset_index_names()\n\n if ignore_index:\n new_columns = Index.__new__(RangeIndex, data=range(len(lhs.columns)))\n lhs = lhs._set_columns(new_columns)\n\n return lhs\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 912, "n_words": 171, "vocab_size": 113, "complexity": 11, "nloc": 57, "token_counts": 315, "n_ast_nodes": 498, "n_identifiers": 44, "random_cut": "def _join_by_index(self, other_modin_frames, how, sort, ignore_index):\n \n if how == \"outer\":\n raise NotImplementedError(\"outer join is not supported in HDK engine\")\n\n lhs = self._maybe_materialize_rowid()\n reset_index_names = False\n for rhs in other_modin_frames:\n rhs = rhs._maybe_materialize_rowid()\n if len(lhs._index_cols) != len(rhs._index_cols):\n raise NotImplementedError(\n \"join by indexes with different sizes is not supported\"\n )\n\n reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols\n\n condition = lhs._build_equi_join_condition(\n rhs, lhs._index_cols, rhs._index_cols\n )\n\n exprs = lhs._index_exprs()\n new_columns = lhs.columns.to_list()\n for col in lhs.columns:\n exprs[col] = lhs.ref(col)\n for col in rhs.columns:\n # Handle duplicating column names here. When user specifies\n # suffixes to make a join, actual renaming is done in front-end.\n new_col_name = col\n rename_idx = 0\n while new_col_name in exprs:\n new_col_name = f\"{col}{rename_idx}\"\n rename_idx += 1\n exprs[new_col_name] = rhs.ref(col)\n new_columns.append(new_col_name)\n\n op = JoinNode(\n lhs,\n rhs,\n how=how,\n exprs=exprs,\n condition=condition,\n )\n\n new_columns = Index.__new__(\n Index, data=new_columns, dtype=self.columns.dtype\n )\n lhs = lhs.__constructor__(\n dtypes=lhs._dtypes_for_exprs(exprs),\n columns=new_columns,\n index_cols=lhs._index_cols,\n op=op,\n force_execution_mode=self._force_execution_mode,\n )\n\n if sort:\n lhs = lhs.sort_rows(\n lhs._index_cols,\n ascending=True,\n ignore_index=False,\n na_position=\"last\",\n )\n\n if reset_index_names:\n lhs = lhs._reset_index_names()\n\n if ignore_index:\n new_columns = Index.__new__(RangeIndex, data=range(len(lhs.colu" }, { "id": 103410, "commit_id": "117d1b02bec493f63725228434bb42499de15c72", "repo": "kitty", "path": "kitty/launch.py", "file_name": "launch.py", "fun_name": "__del__", "commit_message": "Start work on edit-in-kitty", "code": "def __del__(self) -> None:\n if self.tdir:\n with suppress(OSError):\n shutil.rmtree(self.tdir)\n self.tdir = ''\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 55, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def __del__(self) -> None:\n if self.tdir:\n with suppress(OSError):\n shutil.rmtree(self" }, { "id": 90318, "commit_id": "167194cf60000cd07d5d084b25b6a585f35ac22a", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_v2.py", "file_name": "test_organization_events_v2.py", "fun_name": "test_mobile_measurements", "commit_message": "feat(discover): Introduce a new standardized meta (#34835)\n\n- The idea here is that instead of having all the fields as top level\r\n attributes under the meta key, move them to a new `fields` key, so\r\n that we can put other meta in like `isMetricsData` or other future\r\n things", "code": "def test_mobile_measurements(self):\n data = load_data(\"transaction\", timestamp=before_now(minutes=1))\n data[\"measurements\"][\"frames_total\"] = {\"value\": 100}\n data[\"measurements\"][\"frames_slow\"] = {\"value\": 10}\n data[\"measurements\"][\"frames_frozen\"] = {\"value\": 5}\n data[\"measurements\"][\"stall_count\"] = {\"value\": 2}\n data[\"measurements\"][\"stall_total_time\"] = {\"value\": 12}\n data[\"measurements\"][\"stall_longest_time\"] = {\"value\": 7}\n self.store_event(data, project_id=self.project.id)\n\n query = {\n \"field\": [\n \"measurements.frames_total\",\n \"measurements.frames_slow\",\n \"measurements.frames_frozen\",\n \"measurements.frames_slow_rate\",\n \"measurements.frames_frozen_rate\",\n \"measurements.stall_count\",\n \"measurements.stall_total_time\",\n \"measurements.stall_longest_time\",\n \"measurements.stall_percentage\",\n ],\n \"query\": \"\",\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200\n data = response.data[\"data\"]\n assert len(data) == 1\n assert data[0][\"measurements.frames_total\"] == 100\n assert data[0][\"measurements.frames_slow\"] == 10\n assert data[0][\"measurements.frames_frozen\"] == 5\n assert data[0][\"measurements.frames_slow_rate\"] == 0.1\n assert data[0][\"measurements.frames_frozen_rate\"] == 0.05\n assert data[0][\"measurements.stall_count\"] == 2\n assert data[0][\"measurements.stall_total_time\"] == 12\n assert data[0][\"measurements.stall_longest_time\"] == 7\n assert data[0][\"measurements.stall_percentage\"] == 0.004\n meta = response.data[\"meta\"][\"fields\"]\n assert meta[\"measurements.frames_total\"] == \"number\"\n assert meta[\"measurements.frames_slow\"] == \"number\"\n assert meta[\"measurements.frames_frozen\"] == \"number\"\n assert meta[\"measurements.frames_slow_rate\"] == \"percentage\"\n assert meta[\"measurements.frames_frozen_rate\"] == \"percentage\"\n assert meta[\"measurements.stall_count\"] == \"number\"\n assert meta[\"measurements.stall_total_time\"] == \"number\"\n assert meta[\"measurements.stall_longest_time\"] == \"number\"\n assert meta[\"measurements.stall_percentage\"] == \"percentage\"\n\n query = {\n \"field\": [\n \"p75(measurements.frames_slow_rate)\",\n \"p75(measurements.frames_frozen_rate)\",\n \"percentile(measurements.frames_slow_rate,0.5)\",\n \"percentile(measurements.frames_frozen_rate,0.5)\",\n \"p75(measurements.stall_percentage)\",\n \"percentile(measurements.stall_percentage,0.5)\",\n ],\n \"query\": \"\",\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200\n data = response.data[\"data\"]\n assert len(data) == 1\n assert data[0][\"p75(measurements.frames_slow_rate)\"] == 0.1\n assert data[0][\"p75(measurements.frames_frozen_rate)\"] == 0.05\n assert data[0][\"p75(measurements.stall_percentage)\"] == 0.004\n assert data[0][\"percentile(measurements.frames_slow_rate,0.5)\"] == 0.1\n assert data[0][\"percentile(measurements.frames_frozen_rate,0.5)\"] == 0.05\n assert data[0][\"percentile(measurements.stall_percentage,0.5)\"] == 0.004\n meta = response.data[\"meta\"][\"fields\"]\n assert meta[\"p75(measurements.frames_slow_rate)\"] == \"percentage\"\n assert meta[\"p75(measurements.frames_frozen_rate)\"] == \"percentage\"\n assert meta[\"p75(measurements.stall_percentage)\"] == \"percentage\"\n assert meta[\"percentile(measurements.frames_slow_rate,0.5)\"] == \"percentage\"\n assert meta[\"percentile(measurements.stall_percentage,0.5)\"] == \"percentage\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 888, "n_words": 219, "vocab_size": 97, "complexity": 1, "nloc": 75, "token_counts": 534, "n_ast_nodes": 917, "n_identifiers": 17, "random_cut": "def test_mobile_measurements(self):\n data = load_data(\"transaction\", timestamp=before_now(minutes=1))\n data[\"measurements\"][\"frames_total\"] = {\"value\": 100}\n data[\"measurements\"][\"frames_slow\"] = {\"value\": 10}\n data[\"measurements\"][\"frames_frozen\"] = {\"value\": 5}\n data[\"measurements\"][\"stall_count\"] = {\"value\": 2}\n data[\"measurements\"][\"stall_total_time\"] = {\"value\": 12}\n data[\"measurements\"][\"stall_longest_time\"] = {\"value\": 7}\n self.store_event(data, project_id=self.project.id)\n\n query = {\n \"field\": [\n \"measurements.frames_total\",\n \"measurements.frames_slow\",\n \"measurements.frames_frozen\",\n \"measurements.frames_slow_rate\",\n \"measurements.frames_frozen_rate\",\n \"measurements.stall_count\",\n \"measurements.stall_total_time\",\n \"measurements.stall_longest_time\",\n \"measurements.stall_percentage\",\n ],\n \"query\": \"\",\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200\n data = response.data[\"data\"]\n assert len(data) == 1\n assert data[0][\"measurements.frames_total\"] == 100\n assert data[0][\"measurements.frames_slow\"] == 10\n assert data[0][\"measurements.frames_frozen\"] == 5\n assert data[0][\"measurements.frames_slow_rate\"] == 0.1\n assert data[0][\"measurements.frames_frozen_rate\"] == 0.05\n assert data[0][\"measurements.stall_count\"] == 2\n assert data[0][\"measurements.stall_total_time\"] == 12\n assert data[0][\"measurements.stall_longest_time\"] == 7\n assert " }, { "id": 105094, "commit_id": "1c1eaf96d5ef4623e36c9124d49e88ab476dd655", "repo": "datasets", "path": "datasets/quickdraw/quickdraw.py", "file_name": "quickdraw.py", "fun_name": "_split_generators", "commit_message": "Add QuickDraw dataset (#3592)\n\n* Add QuickDraw dataset\r\n\r\n* Style\r\n\r\n* Add infos file, dummy data, improve script\r\n\r\n* Add info and dummy data\r\n\r\n* Test readme\r\n\r\n* Finish readme\r\n\r\n* Delete generate_dummy.py\r\n\r\n* Remove whitespace", "code": "def _split_generators(self, dl_manager):\n base_url = _CONFIG_NAME_TO_BASE_URL[self.config.name]\n if not self.config.name.startswith(\"sketch_rnn\"):\n files = dl_manager.download(\n {name: url for name, url in zip(_NAMES, [base_url.format(name) for name in _NAMES])}\n )\n return [\n datasets.SplitGenerator(\n name=datasets.Split.TRAIN,\n gen_kwargs={\n \"files\": files,\n \"split\": \"train\",\n },\n ),\n ]\n else:\n files = dl_manager.download_and_extract(\n {name: url for name, url in zip(_NAMES, [base_url.format(name) for name in _NAMES])}\n )\n return [\n datasets.SplitGenerator(\n name=datasets.Split.TRAIN,\n gen_kwargs={\n \"files\": files,\n \"split\": \"train\",\n },\n ),\n datasets.SplitGenerator(\n name=datasets.Split.VALIDATION,\n gen_kwargs={\n \"files\": files,\n \"split\": \"valid\",\n },\n ),\n datasets.SplitGenerator(\n name=datasets.Split.TEST,\n gen_kwargs={\n \"files\": files,\n \"split\": \"test\",\n },\n ),\n ]\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 754, "n_words": 84, "vocab_size": 39, "complexity": 6, "nloc": 42, "token_counts": 214, "n_ast_nodes": 335, "n_identifiers": 22, "random_cut": "def _split_generators(self, dl_manager):\n base_url = _CONFIG_NAME_TO_BASE_URL[self.config.name]\n if not self.config.name.startswith(\"sketch_rnn\"):\n files = dl_manager.download(\n {name: url for name, url in zip(_NAMES, [base_url.format(name) for name in _NAMES])}\n " }, { "id": 338137, "commit_id": "9e4fe78b95cafc0e4f79dda004aabc7e4953568c", "repo": "accelerate", "path": "tests/test_scheduler.py", "file_name": "test_scheduler.py", "fun_name": "one_cycle_test", "commit_message": "Fix issue with one-cycle logic (#728)\n\n* Fixed!\r\n\r\n* Fix and write tests", "code": "def one_cycle_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\n accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)\n model = torch.nn.Linear(2, 4)\n optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\n scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)\n model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)\n\n # Optimizer has stepped\n scheduler.step()\n if step_scheduler_with_optimizer or (num_processes == 1):\n assert (\n scheduler.scheduler.last_epoch == num_processes\n ), f\"Last Epoch ({scheduler.scheduler.last_epoch}) != Num Processes ({num_processes})\"\n else:\n assert (\n scheduler.scheduler.last_epoch != num_processes\n ), f\"Last Epoch ({scheduler.scheduler.last_epoch}) == Num Processes ({num_processes})\"\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 143, "n_words": 67, "vocab_size": 47, "complexity": 3, "nloc": 15, "token_counts": 143, "n_ast_nodes": 236, "n_identifiers": 24, "random_cut": "def one_cycle_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):\n accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)\n model = torch.nn.Linear(2, 4)\n optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\n scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_" }, { "id": 305643, "commit_id": "6355e682fa4aeb526570597d919ad1fb76755b9a", "repo": "core", "path": "homeassistant/components/motion_blinds/sensor.py", "file_name": "sensor.py", "fun_name": "async_added_to_hass", "commit_message": "Improve entity type hints [m] (#77816)", "code": "async def async_added_to_hass(self) -> None:\n \n self._blind.Register_callback(self.unique_id, self.schedule_update_ha_state)\n await super().async_added_to_hass()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 53, "n_identifiers": 7, "random_cut": "async def async_added_to_hass(self) -> None:\n \n self._blind.Register_callback(self.unique_id, self.schedule_update_ha_state)\n await super().async_added_to_hass()\n" }, { "id": 211260, "commit_id": "22973cb9be4b39759cea0db6aee60b1cdef87b4f", "repo": "PaddleDetection", "path": "deploy/python/preprocess.py", "file_name": "preprocess.py", "fun_name": "generate_scale", "commit_message": "update vehicle plate trt params (#6658)\n\n* fix trt params\r\n\r\n* update config to make safe of trt input shape", "code": "def generate_scale(self, img):\n \n limit_side_len = self.limit_side_len\n h, w, c = img.shape\n\n # limit the max side\n if self.limit_type == 'max':\n if h > w:\n ratio = float(limit_side_len) / h\n else:\n ratio = float(limit_side_len) / w\n elif self.limit_type == 'min':\n if h < w:\n ratio = float(limit_side_len) / h\n else:\n ratio = float(limit_side_len) / w\n elif self.limit_type == 'resize_long':\n ratio = float(limit_side_len) / max(h, w)\n else:\n raise Exception('not support limit type, image ')\n resize_h = int(h * ratio)\n resize_w = int(w * ratio)\n\n resize_h = max(int(round(resize_h / 32) * 32), 32)\n resize_w = max(int(round(resize_w / 32) * 32), 32)\n\n im_scale_y = resize_h / float(h)\n im_scale_x = resize_w / float(w)\n return im_scale_y, im_scale_x\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 342, "n_words": 111, "vocab_size": 55, "complexity": 6, "nloc": 24, "token_counts": 181, "n_ast_nodes": 300, "n_identifiers": 19, "random_cut": "def generate_scale(self, img):\n \n limit_side_len = self.limit_side_len\n h, w, c = img.shape\n\n # limit the max side\n if self.limit_type == 'max':\n if h > w:\n ratio = float(limit_side_len) / h\n else:\n ratio = float(limit_side_len) / w\n elif self.limit_type == 'min':\n if h < w:\n ratio = float(limit_side_len) / h\n else:\n ratio = float(limit_side_len) / w\n " }, { "id": 463, "commit_id": "206df4381dd57b96f166117f2e0860ce828a2e93", "repo": "PySyft", "path": "packages/syft/tests/syft/core/tensor/autodp/private_method_test.py", "file_name": "private_method_test.py", "fun_name": "test_incompatible_input_tensor_type", "commit_message": "fix flake8 issues acrross code", "code": "def test_incompatible_input_tensor_type() -> None:\n\n try:\n x = sy.Tensor(np.float32([1, 2, 3, 4.0]))\n x.private(min_val=0, max_val=5, entities=\"bob\")\n raise AssertionError()\n except TypeError:\n assert True\n\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 53, "n_words": 20, "vocab_size": 20, "complexity": 2, "nloc": 7, "token_counts": 56, "n_ast_nodes": 85, "n_identifiers": 12, "random_cut": "def test_incompatible_input_tensor_type() -> None:\n\n try:\n x = sy.Tensor(np.float32([1, 2, 3, 4.0]))\n x.pri" }, { "id": 108758, "commit_id": "50aaa7c50d1d790d4f6a955d4352d178276848e2", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_backend_pdf.py", "file_name": "test_backend_pdf.py", "fun_name": "test_text_urls", "commit_message": "Correct URL area with rotated texts in PDFs (#23288)\n\n\r\nCo-authored-by: Oscar Gustafsson \r\nCo-authored-by: eindH ", "code": "def test_text_urls():\n pikepdf = pytest.importorskip('pikepdf')\n\n test_url = 'https://test_text_urls.matplotlib.org/'\n\n fig = plt.figure(figsize=(2, 1))\n fig.text(0.1, 0.1, 'test plain 123', url=f'{test_url}plain')\n fig.text(0.1, 0.4, 'test mathtext $123$', url=f'{test_url}mathtext')\n\n with io.BytesIO() as fd:\n fig.savefig(fd, format='pdf')\n\n with pikepdf.Pdf.open(fd) as pdf:\n annots = pdf.pages[0].Annots\n\n # Iteration over Annots must occur within the context manager,\n # otherwise it may fail depending on the pdf structure.\n for y, fragment in [('0.1', 'plain'), ('0.4', 'mathtext')]:\n annot = next(\n (a for a in annots if a.A.URI == f'{test_url}{fragment}'),\n None)\n assert annot is not None\n assert getattr(annot, 'QuadPoints', None) is None\n # Positions in points (72 per inch.)\n assert annot.Rect[1] == decimal.Decimal(y) * 72\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 290, "n_words": 102, "vocab_size": 80, "complexity": 4, "nloc": 17, "token_counts": 182, "n_ast_nodes": 303, "n_identifiers": 33, "random_cut": "def test_text_urls():\n pikepdf = pytest.i" }, { "id": 257909, "commit_id": "9ca3ccae987e62743b3cf7fb66cb4671a7390fff", "repo": "haystack", "path": "test/pipelines/test_standard_pipelines.py", "file_name": "test_standard_pipelines.py", "fun_name": "test_most_similar_documents_pipeline_save", "commit_message": "fix:MostSimilarDocumentsPipeline doesn't have pipeline property (#3265)\n\n* Add comments and a unit test\r\n\r\n* More unit tests for MostSimilarDocumentsPipeline", "code": "def test_most_similar_documents_pipeline_save(tmpdir, document_store_with_docs):\n pipeline = MostSimilarDocumentsPipeline(document_store=document_store_with_docs)\n path = Path(tmpdir, \"most_similar_document_pipeline.yml\")\n pipeline.save_to_yaml(path)\n os.path.exists(path)\n\n\n@pytest.mark.elasticsearch\n@pytest.mark.parametrize(\"document_store_dot_product_with_docs\", [\"elasticsearch\"], indirect=True)", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.elasticsearch\n@pytest.mark.parametrize(\"document_store_dot_product_with_docs\", [\"elasticsearch\"], indirect=True)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 25, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 37, "n_ast_nodes": 99, "n_identifiers": 16, "random_cut": "def test_most_similar_documents_pipeline_save(tmpdir, document_store_with_docs):\n pipeline = MostSimilarDocumentsPipeline(document_store=document_store_with_docs)\n path = Path(tmpdir, \"most_similar_document_pipeline.yml\")\n pipeline.save_to_yaml(path)\n os.path.exists(path)\n\n\n@pytest.mark.elasticsearch\n@pytest.mark.parametrize(\"document_store_dot_product_with_doc" }, { "id": 72474, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/pages/create.py", "file_name": "create.py", "fun_name": "add_subpage", "commit_message": "Reformat with black", "code": "def add_subpage(request, parent_page_id):\n parent_page = get_object_or_404(Page, id=parent_page_id).specific\n if not parent_page.permissions_for_user(request.user).can_add_subpage():\n raise PermissionDenied\n\n page_types = [\n (model.get_verbose_name(), model._meta.app_label, model._meta.model_name)\n for model in type(parent_page).creatable_subpage_models()\n if model.can_create_at(parent_page)\n ]\n # sort by lower-cased version of verbose name\n page_types.sort(key=lambda page_type: page_type[0].lower())\n\n if len(page_types) == 1:\n # Only one page type is available - redirect straight to the create form rather than\n # making the user choose\n verbose_name, app_label, model_name = page_types[0]\n return redirect(\"wagtailadmin_pages:add\", app_label, model_name, parent_page.id)\n\n return TemplateResponse(\n request,\n \"wagtailadmin/pages/add_subpage.html\",\n {\n \"parent_page\": parent_page,\n \"page_types\": page_types,\n \"next\": get_valid_next_url_from_request(request),\n },\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 227, "n_words": 84, "vocab_size": 75, "complexity": 5, "nloc": 22, "token_counts": 152, "n_ast_nodes": 240, "n_identifiers": 30, "random_cut": "def add_subpage(request, parent_page_id):\n parent_page = get_object_or_404(Page, id=parent_page_id).specific\n if not parent_page.permissions_for_user(request.user).can_add_subpage():\n raise PermissionDenied\n\n page_types = [\n (model.get_verbose_name(), model._meta.app_label, model._meta.model_name)\n for model in type(parent_page).creatable_subpage_models()\n if model.can_create_at(parent_page)\n ]\n # sort by lower-cased version of verbose name\n page_types.sort(key=lambda page_type: page_type[0].lower())\n\n if len(page_types) == 1:\n # Only one page type is available - redirect straight to the create form rather than\n # making the user choose\n verbose_name, app_label, model_name = page_types[0]\n return redirect(\"wagtailadmin_pages:add\", app_label, model_name, parent_page.id)\n\n return TemplateResponse" }, { "id": 164816, "commit_id": "65ecb90c863e40518bdb05803c79b24fd2899c77", "repo": "pandas", "path": "pandas/io/formats/css.py", "file_name": "css.py", "fun_name": "atomize", "commit_message": "BUG/ENH: Translate CSS border properties for `Styler.to_excel` (#45312)", "code": "def atomize(self, declarations) -> Generator[tuple[str, str], None, None]:\n for prop, value in declarations:\n attr = \"expand_\" + prop.replace(\"-\", \"_\")\n try:\n expand = getattr(self, attr)\n except AttributeError:\n yield prop, value\n else:\n for prop, value in expand(prop, value):\n yield prop, value\n\n expand_border = _border_expander()\n expand_border_top = _border_expander(\"top\")\n expand_border_right = _border_expander(\"right\")\n expand_border_bottom = _border_expander(\"bottom\")\n expand_border_left = _border_expander(\"left\")\n\n expand_border_color = _side_expander(\"border-{:s}-color\")\n expand_border_style = _side_expander(\"border-{:s}-style\")\n expand_border_width = _side_expander(\"border-{:s}-width\")\n expand_margin = _side_expander(\"margin-{:s}\")\n expand_padding = _side_expander(\"padding-{:s}\")\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 213, "n_words": 69, "vocab_size": 49, "complexity": 4, "nloc": 10, "token_counts": 75, "n_ast_nodes": 235, "n_identifiers": 25, "random_cut": "def atomize(self, declarations) -> Generator[tuple[str, str], None, None]:\n for prop, value in declarations:\n attr = \"expand_\" + prop.replace(\"-\", \"_\")\n " }, { "id": 31450, "commit_id": "f47afefb210a120545a5e9745292aaf6c316d246", "repo": "transformers", "path": "tests/test_modeling_common.py", "file_name": "test_modeling_common.py", "fun_name": "test_equivalence_pt_to_flax", "commit_message": "Use 5e-5 For BigBird PT/Flax equivalence tests (#17780)\n\n* rename to check_pt_flax_outputs\r\n\r\n* update check_pt_flax_outputs\r\n\r\n* use 5e-5 for BigBird PT/Flax test\r\n\r\nCo-authored-by: ydshieh ", "code": "def test_equivalence_pt_to_flax(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n with self.subTest(model_class.__name__):\n fx_model_class_name = \"Flax\" + model_class.__name__\n\n if not hasattr(transformers, fx_model_class_name):\n # no flax model exists for this class\n return\n\n # Output all for aggressive testing\n config.output_hidden_states = True\n config.output_attentions = self.has_attentions\n\n fx_model_class = getattr(transformers, fx_model_class_name)\n\n # load PyTorch class\n pt_model = model_class(config).eval()\n # Flax models don't use the `use_cache` option and cache is not returned as a default.\n # So we disable `use_cache` here for PyTorch model.\n pt_model.config.use_cache = False\n\n # load Flax class\n fx_model = fx_model_class(config, dtype=jnp.float32)\n\n # make sure only flax inputs are forward that actually exist in function args\n fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys()\n\n # prepare inputs\n pt_inputs = self._prepare_for_class(inputs_dict, model_class)\n\n # remove function args that don't exist in Flax\n pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys}\n\n # send pytorch inputs to the correct device\n pt_inputs = {\n k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items()\n }\n\n # convert inputs to Flax\n fx_inputs = {k: np.array(v) for k, v in pt_inputs.items() if torch.is_tensor(v)}\n\n fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)\n fx_model.params = fx_state\n\n # send pytorch model to the correct device\n pt_model.to(torch_device)\n\n with torch.no_grad():\n pt_outputs = pt_model(**pt_inputs)\n fx_outputs = fx_model(**fx_inputs)\n\n fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None])\n pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])\n\n self.assertEqual(fx_keys, pt_keys)\n self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n pt_model.save_pretrained(tmpdirname)\n fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True)\n\n fx_outputs_loaded = fx_model_loaded(**fx_inputs)\n\n fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None])\n pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None])\n\n self.assertEqual(fx_keys, pt_keys)\n self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1011, "n_words": 273, "vocab_size": 137, "complexity": 17, "nloc": 38, "token_counts": 416, "n_ast_nodes": 668, "n_identifiers": 66, "random_cut": "def test_equivalence_pt_to_flax(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n with se" }, { "id": 73286, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/views.py", "file_name": "views.py", "fun_name": "get_prepopulated_fields", "commit_message": "Reformat with black", "code": "def get_prepopulated_fields(self, form):\n fields = []\n for field_name, dependencies in self.model_admin.get_prepopulated_fields(\n self.request\n ).items():\n missing_dependencies = [\n f\"'{f}'\" for f in dependencies if f not in form.fields\n ]\n if len(missing_dependencies) != 0:\n missing_deps_string = \", \".join(missing_dependencies)\n dependency_string = (\n \"dependencies\" if len(missing_dependencies) > 1 else \"dependency\"\n )\n warnings.warn(\n f\"Missing {dependency_string} {missing_deps_string} for prepopulated_field '{field_name}''.\",\n category=RuntimeWarning,\n )\n elif field_name in form.fields:\n fields.append(\n {\n \"field\": form[field_name],\n \"dependencies\": [form[f] for f in dependencies],\n }\n )\n return fields\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 416, "n_words": 73, "vocab_size": 54, "complexity": 8, "nloc": 25, "token_counts": 126, "n_ast_nodes": 217, "n_identifiers": 20, "random_cut": "def get_prepopulated_fields(self, form):\n fields = []\n for field_name, dependencies in self.model_admin" }, { "id": 247791, "commit_id": "9d21ecf7ceab55bc19c4457b8b07401b0b1623a7", "repo": "synapse", "path": "tests/storage/test_id_generators.py", "file_name": "test_id_generators.py", "fun_name": "test_get_next_txn", "commit_message": "Add type hints to tests files. (#12256)", "code": "def test_get_next_txn(self) -> None:\n \n\n # Prefill table with 7 rows written by 'master'\n self._insert_rows(\"master\", 7)\n\n id_gen = self._create_id_generator()\n\n self.assertEqual(id_gen.get_positions(), {\"master\": 7})\n self.assertEqual(id_gen.get_current_token_for_writer(\"master\"), 7)\n\n # Try allocating a new ID gen and check that we only see position\n # advanced after we leave the context manager.\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 101, "n_words": 45, "vocab_size": 41, "complexity": 1, "nloc": 10, "token_counts": 98, "n_ast_nodes": 94, "n_identifiers": 8, "random_cut": "def test_get_next_txn(self) -> None:\n \n\n # Prefill table with 7 rows written by 'master'\n self._insert_rows(\"master\", 7)\n\n id_gen = self._create_id_generator()\n\n self.assertEqual(id_gen.get_positions(), {\"master\": 7})\n self.assertEqual(id_gen.get_current_token_for_writer(\"master\"), 7)\n\n # Try allocating a new ID gen and check that we only see position\n # advanced after we " }, { "id": 309709, "commit_id": "32d4f104ff4fb70c91853fd485f8382f9e927aa5", "repo": "core", "path": "homeassistant/components/webostv/media_player.py", "file_name": "media_player.py", "fun_name": "async_media_play_pause", "commit_message": "Enable strict typing in webostv (#64193)\n\n* Enable strict typing in webostv\r\n\r\nEnable strict typing in webostv\r\n\r\n* Apply review comments", "code": "async def async_media_play_pause(self) -> None:\n \n if self._paused:\n await self.async_media_play()\n else:\n await self.async_media_pause()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 55, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 6, "token_counts": 27, "n_ast_nodes": 51, "n_identifiers": 5, "random_cut": "async def async_media_play_pause(self) -> None:\n \n if self._paused:\n await self.async_media_play()\n else:\n await self.async_media_pause()\n" }, { "id": 316464, "commit_id": "ac85a3ce64ea815fd3530085c085e384cf8269fb", "repo": "core", "path": "homeassistant/components/deconz/button.py", "file_name": "button.py", "fun_name": "async_press", "commit_message": "Use pydeconz interface controls for button platform (#74654)", "code": "async def async_press(self) -> None:\n \n async_button_fn = getattr(\n self.gateway.api.scenes,\n self.entity_description.button_fn,\n )\n await async_button_fn(self._device.group_id, self._device.id)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 42, "n_ast_nodes": 68, "n_identifiers": 12, "random_cut": "async def async_press(self) -> None:\n \n async_button_fn = getattr(\n self.gateway.api.scenes,\n self.entity_description.button_fn,\n )\n " }, { "id": 93368, "commit_id": "e1482001662b446c7c2be7c9daa19cba562c615c", "repo": "sentry", "path": "tests/sentry/snuba/test_entity_subscriptions.py", "file_name": "test_entity_subscriptions.py", "fun_name": "test_get_entity_subscription_for_metrics_dataset_for_users", "commit_message": "refs(metric_alerts): Consolidate `QueryDatasets` and `Dataset` (#36894)\n\nThis refactor pr removes `QueryDatasets` and just uses `Dataset` everywhere. `QueryDatasets` existed\r\nbefore `Dataset`, but `Dataset` is now more widely used and is more up to date. The values here are\r\nthe same, `Dataset` just supports a few more datasets.\r\n\r\nWe already make sure that only datasets that are valid for alerts can be passed to the alert rules\r\napi, so this won't allow people to attempt to create alerts on datasets that don't support them.", "code": "def test_get_entity_subscription_for_metrics_dataset_for_users(self) -> None:\n org_id = self.organization.id\n use_case_id = UseCaseKey.RELEASE_HEALTH\n\n aggregate = \"percentage(users_crashed, users) AS _crash_rate_alert_aggregate\"\n entity_subscription = get_entity_subscription(\n query_type=SnubaQuery.Type.CRASH_RATE,\n dataset=Dataset.Metrics,\n aggregate=aggregate,\n time_window=3600,\n extra_fields={\"org_id\": self.organization.id},\n )\n assert isinstance(entity_subscription, MetricsSetsEntitySubscription)\n assert entity_subscription.aggregate == aggregate\n assert entity_subscription.get_entity_extra_params() == {\n \"organization\": self.organization.id,\n \"granularity\": 10,\n }\n assert entity_subscription.dataset == Dataset.Metrics\n session_status = resolve_tag_key(use_case_id, org_id, \"session.status\")\n session_status_crashed = resolve_tag_value(use_case_id, org_id, \"crashed\")\n snql_query = entity_subscription.build_query_builder(\n \"\", [self.project.id], None, {\"organization_id\": self.organization.id}\n ).get_snql_query()\n key = lambda func: func.alias\n assert sorted(snql_query.query.select, key=key) == sorted(\n [\n Function(\"uniq\", parameters=[Column(\"value\")], alias=\"count\"),\n Function(\n \"uniqIf\",\n parameters=[\n Column(name=\"value\"),\n Function(\n function=\"equals\",\n parameters=[\n Column(session_status),\n session_status_crashed,\n ],\n ),\n ],\n alias=\"crashed\",\n ),\n ],\n key=key,\n )\n assert snql_query.query.where == [\n Condition(Column(\"project_id\"), Op.IN, [self.project.id]),\n Condition(Column(\"org_id\"), Op.EQ, self.organization.id),\n Condition(\n Column(\"metric_id\"),\n Op.EQ,\n resolve(\n UseCaseKey.RELEASE_HEALTH,\n self.organization.id,\n entity_subscription.metric_key.value,\n ),\n ),\n ]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 863, "n_words": 116, "vocab_size": 87, "complexity": 1, "nloc": 57, "token_counts": 327, "n_ast_nodes": 506, "n_identifiers": 50, "random_cut": "def test_get_entity_subscription_for_metrics_dataset_for_users(self) -> None:\n org_id = self.organization.id\n use_case_id = UseCaseKey.RELEASE_HEALTH\n\n aggregate = \"percentage(users_crashed" }, { "id": 77024, "commit_id": "ec70921e52b21c0ab463d1f243a2fc9c64c4e573", "repo": "wagtail", "path": "wagtail/admin/views/generic/models.py", "file_name": "models.py", "fun_name": "form_invalid", "commit_message": "Refactor create snippets view into class-based view (#8332)", "code": "def form_invalid(self, form):\n self.form = form\n error_message = self.get_error_message()\n if error_message is not None:\n messages.validation_error(self.request, error_message, form)\n return super().form_invalid(form)\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 57, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 6, "token_counts": 46, "n_ast_nodes": 72, "n_identifiers": 9, "random_cut": "def form_invalid(self, form):\n self.form = form\n error_message = self.get_error_message()\n if error_message is not None:\n messages.validation_error(self.request, error_message, form)\n return super().form_invalid(form)\n\n" }, { "id": 25810, "commit_id": "f5bdda6f8a9dd21805629f97d3b921f110ec8cdf", "repo": "saleor", "path": "saleor/graphql/warehouse/dataloaders.py", "file_name": "dataloaders.py", "fun_name": "batch_load", "commit_message": "Implement read replicas in all dataloaders (#8751)\n\n* Add read replicas to account dataloaders\n\n* Add read replicas to app dataloaders\n\n* Add read replicas to channel dataloaders\n\n* Add read replicas to checkout dataloaders\n\n* Add read replicas to discount dataloaders\n\n* Add read replicas to gift card dataloaders\n\n* Add read replicas to menu dataloaders\n\n* Add read replicas to order dataloaders\n\n* Add read replicas to page dataloaders\n\n* Add read replicas to product dataloaders\n\n* Add read replicas to shipping dataloaders\n\n* Add read replicas to warehouse dataloaders\n\n* Changelog", "code": "def batch_load(self, keys):\n reservations_by_checkout_line = defaultdict(list)\n queryset = (\n Reservation.objects.using(self.database_connection_name)\n .filter(checkout_line_id__in=keys)\n .not_expired()\n ) # type: ignore\n for reservation in queryset:\n reservations_by_checkout_line[reservation.checkout_line_id].append(\n reservation\n )\n queryset = (\n PreorderReservation.objects.using(self.database_connection_name)\n .filter(checkout_line_id__in=keys)\n .not_expired()\n ) # type: ignore\n for reservation in queryset:\n reservations_by_checkout_line[reservation.checkout_line_id].append(\n reservation\n )\n return [reservations_by_checkout_line[key] for key in keys]\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 243, "n_words": 46, "vocab_size": 25, "complexity": 4, "nloc": 21, "token_counts": 106, "n_ast_nodes": 166, "n_identifiers": 19, "random_cut": "def batch_load(self, keys):\n reservations_by_checkout_line = defaultdict(list)\n queryset = (\n Reservation.objects.using(se" }, { "id": 26261, "commit_id": "ced13da77f1a778941e2a98a57e919980964415b", "repo": "saleor", "path": "saleor/csv/tests/export/products_data/test_handle_relations_data.py", "file_name": "test_handle_relations_data.py", "fun_name": "test_add_date_attribute_info_to_data", "commit_message": "Reference is not removed when user will remove object mvp (#9162)\n\n* Change reference storage place from slug to relation\r\n\r\n* Add attributes delete to page and page_type bulk delete mutation (#9093)\r\n\r\n* Add associated attributes delete function to page and page_type bulk delete mutations\r\n\r\n* After review changes, add bad path test, add trace_atomic_transaction\r\n\r\n* Remove test placeholder\r\n\r\n* Changes after review, add batching to migrations\r\n\r\n* Post MVP and after review changes to migration\r\n\r\n* Revert changes\r\n\r\n* Change validation and deleted unnecessary check in serialization\r\n\r\n* Review changes", "code": "def test_add_date_attribute_info_to_data(product, date_attribute):\n # given\n pk = product.pk\n date = datetime(2021, 8, 10, 5, 3)\n attribute_data = AttributeData(\n slug=date_attribute.slug,\n value_slug=None,\n value_name=None,\n value=None,\n file_url=None,\n input_type=\"date\",\n entity_type=None,\n unit=None,\n rich_text=None,\n boolean=None,\n date_time=date,\n reference_page=None,\n reference_product=product.id,\n )\n input_data = {pk: {}}\n\n # when\n result = add_attribute_info_to_data(\n product.pk, attribute_data, \"product attribute\", input_data\n )\n\n # then\n expected_header = f\"{date_attribute.slug} (product attribute)\"\n assert result[pk][expected_header] == {f\"{date.date()}\"}\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 191, "n_words": 58, "vocab_size": 49, "complexity": 1, "nloc": 24, "token_counts": 126, "n_ast_nodes": 200, "n_identifiers": 26, "random_cut": "def test_add_date_attribute_info_to_data(product, date_attribute):\n # given\n pk = product.pk\n date = datetime(2021, 8, 10, 5, 3)\n attribute_data = AttributeData(\n slug=date_attribute.slug,\n value_slug=None,\n value_name=None,\n value=None,\n file_url=None,\n input_type=\"date\",\n entity_type=No" }, { "id": 15176, "commit_id": "30e0c75067f760aad4a38354486f2815315a78d7", "repo": "ccxt", "path": "python/ccxt/ascendex.py", "file_name": "ascendex.py", "fun_name": "parse_transaction", "commit_message": "1.66.54\n\n[ci skip]", "code": "def parse_transaction(self, transaction, currency=None):\n #\n # {\n # requestId: \"wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB\",\n # time: 1591606166000,\n # asset: \"USDT\",\n # transactionType: \"deposit\",\n # amount: \"25\",\n # commission: \"0\",\n # networkTransactionId: \"0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce\",\n # status: \"pending\",\n # numConfirmed: 8,\n # numConfirmations: 20,\n # destAddress: {\n # address: \"0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722\",\n # destTag: \"...\" # for currencies that have it\n # }\n # }\n #\n id = self.safe_string(transaction, 'requestId')\n amount = self.safe_number(transaction, 'amount')\n destAddress = self.safe_value(transaction, 'destAddress', {})\n address = self.safe_string(destAddress, 'address')\n tag = self.safe_string(destAddress, 'destTag')\n txid = self.safe_string(transaction, 'networkTransactionId')\n type = self.safe_string(transaction, 'transactionType')\n timestamp = self.safe_integer(transaction, 'time')\n currencyId = self.safe_string(transaction, 'asset')\n code = self.safe_currency_code(currencyId, currency)\n status = self.parse_transaction_status(self.safe_string(transaction, 'status'))\n feeCost = self.safe_number(transaction, 'commission')\n return {\n 'info': transaction,\n 'id': id,\n 'currency': code,\n 'amount': amount,\n 'network': None,\n 'address': address,\n 'addressTo': address,\n 'addressFrom': None,\n 'tag': tag,\n 'tagTo': tag,\n 'tagFrom': None,\n 'status': status,\n 'type': type,\n 'updated': None,\n 'txid': txid,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'fee': {\n 'currency': code,\n 'cost': feeCost,\n },\n }\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 741, "n_words": 150, "vocab_size": 103, "complexity": 1, "nloc": 36, "token_counts": 228, "n_ast_nodes": 403, "n_identifiers": 23, "random_cut": "def parse_transaction(self, transaction, currency=None):\n #\n # {\n # requestId: \"wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB\",\n # time: 1591606166000,\n # asset: \"USDT\",\n # transactionType: \"deposit\",\n # amount: \"25\",\n # commission: \"0\",\n # networkTransactionId: \"0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce\",\n # status: \"pending\",\n # numConfirmed: 8,\n # numConfirmations: 20,\n # destAddress: {\n # address: \"0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722\",\n # destTag: \"...\" # for currencies that have it\n # }\n " }, { "id": 147519, "commit_id": "cb1919b8d011c877a9690e3d09dd5de79b87cdd8", "repo": "ray", "path": "python/ray/serve/tests/test_http_adapters.py", "file_name": "test_http_adapters.py", "fun_name": "test_image_to_ndarray", "commit_message": "[Doc][Serve] Add minimal docs for model wrappers and http adapters (#23536)", "code": "def test_image_to_ndarray():\n buffer = io.BytesIO()\n arr = (np.random.rand(100, 100, 3) * 255).astype(\"uint8\")\n image = Image.fromarray(arr).convert(\"RGB\")\n image.save(buffer, format=\"png\")\n np.testing.assert_almost_equal(image_to_ndarray(buffer.getvalue()), arr)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 33, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 74, "n_ast_nodes": 123, "n_identifiers": 19, "random_cut": "def test_image_to_ndarray():\n buffer = io.BytesIO()\n arr = (np.random.rand(100, 100, 3) * 255).astype(\"uint8\")\n image = Image.fromarray(arr).convert(\"RGB\")\n image.save(buffer, format=\"png\")\n np.testing.assert_almost_equal(image_to_" }, { "id": 127190, "commit_id": "440ae620eb27a910a0d1b9ee5cf47d58bbdebc37", "repo": "ray", "path": "python/ray/data/_internal/stats.py", "file_name": "stats.py", "fun_name": "record_task", "commit_message": "Cap the number of stats kept in StatsActor and purge in FIFO order if the limit exceeded (#27964)\n\nThere is a risk of using too much of memory in StatsActor, because its lifetime is the same as cluster lifetime.\r\nThis puts a cap on how many stats to keep, and purge the stats in FIFO order if this cap is exceeded.", "code": "def record_task(self, stats_uuid, task_idx, metadata):\n # Null out the schema to keep the stats size small.\n metadata.schema = None\n if stats_uuid in self.start_time:\n self.metadata[stats_uuid][task_idx] = metadata\n self.last_time[stats_uuid] = time.perf_counter()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 71, "n_words": 29, "vocab_size": 26, "complexity": 2, "nloc": 5, "token_counts": 46, "n_ast_nodes": 70, "n_identifiers": 10, "random_cut": "def record_task(self, stats_uuid, task_idx, metadata):\n # Null out the schema to keep the stats size small.\n metadata.schema = None\n if stats_uuid in self.start_time:\n self.metadata[stats_uuid][task_idx] = metadata\n sel" }, { "id": 164616, "commit_id": "a1ce6a0eb07e5f969ab192b792083cb1c1f702d5", "repo": "pandas", "path": "pandas/tests/computation/test_eval.py", "file_name": "test_eval.py", "fun_name": "test_disallow_scalar_bool_ops", "commit_message": "TST: Don't use autouse fixture in test_eval (#45832)", "code": "def test_disallow_scalar_bool_ops(self, ex, engine, parser):\n x, a, b = np.random.randn(3), 1, 2 # noqa:F841\n df = DataFrame(np.random.randn(3, 2)) # noqa:F841\n\n msg = \"cannot evaluate scalar only bool ops|'BoolOp' nodes are not\"\n with pytest.raises(NotImplementedError, match=msg):\n pd.eval(ex, engine=engine, parser=parser)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 37, "vocab_size": 33, "complexity": 1, "nloc": 6, "token_counts": 73, "n_ast_nodes": 113, "n_identifiers": 20, "random_cut": "def test_disallow_scalar_bool_ops(self, ex, engine, parser):\n x, a, b = np.random.randn(3), 1, 2 # noqa:F841\n df = DataFrame(np." }, { "id": 90402, "commit_id": "0e5a8e05c209392e5c0d2fef8169ba765c2479de", "repo": "sentry", "path": "src/sentry/rules/actions/integrations/base.py", "file_name": "base.py", "fun_name": "get_integration_name", "commit_message": "ref(actions): Move SentryApp and Ticket Rules actions to a module (#34834)", "code": "def get_integration_name(self) -> str:\n \n try:\n integration = self.get_integration()\n except Integration.DoesNotExist:\n return \"[removed]\"\n\n _name: str = integration.name\n return _name\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 8, "token_counts": 33, "n_ast_nodes": 59, "n_identifiers": 9, "random_cut": "def get_integration_name(self) -> str:\n \n try:\n integration = self.get_integration()\n except Integration.DoesNotExist:\n return \"[removed]\"\n\n _name: str = integration.name\n return _name\n" }, { "id": 125080, "commit_id": "759966781f3f83fb6c1fdd7586296fb06b670f2b", "repo": "ray", "path": "python/ray/train/base_trainer.py", "file_name": "base_trainer.py", "fun_name": "_validate_attributes", "commit_message": "[air] Allow users to use instances of `ScalingConfig` (#25712)\n\nCo-authored-by: Xiaowei Jiang \r\nCo-authored-by: matthewdeng \r\nCo-authored-by: Kai Fricke ", "code": "def _validate_attributes(self):\n \n # Run config\n if not isinstance(self.run_config, RunConfig):\n raise ValueError(\n f\"`run_config` should be an instance of `ray.air.RunConfig`, \"\n f\"found {type(self.run_config)} with value `{self.run_config}`.\"\n )\n # Scaling config\n if not isinstance(self.scaling_config, ScalingConfig):\n raise ValueError(\n \"`scaling_config` should be an instance of `ScalingConfig`, \"\n f\"found {type(self.scaling_config)} with value `{self.scaling_config}`.\"\n )\n # Datasets\n if not isinstance(self.datasets, dict):\n raise ValueError(\n f\"`datasets` should be a dict mapping from a string to \"\n f\"`ray.data.Dataset` objects, \"\n f\"found {type(self.datasets)} with value `{self.datasets}`.\"\n )\n elif any(\n not isinstance(ds, ray.data.Dataset) and not callable(ds)\n for ds in self.datasets.values()\n ):\n raise ValueError(\n f\"At least one value in the `datasets` dict is not a \"\n f\"`ray.data.Dataset`: {self.datasets}\"\n )\n # Preprocessor\n if self.preprocessor is not None and not isinstance(\n self.preprocessor, ray.data.Preprocessor\n ):\n raise ValueError(\n f\"`preprocessor` should be an instance of `ray.data.Preprocessor`, \"\n f\"found {type(self.preprocessor)} with value `{self.preprocessor}`.\"\n )\n\n if self.resume_from_checkpoint is not None and not isinstance(\n self.resume_from_checkpoint, ray.air.Checkpoint\n ):\n raise ValueError(\n f\"`resume_from_checkpoint` should be an instance of \"\n f\"`ray.air.Checkpoint`, found {type(self.resume_from_checkpoint)} \"\n f\"with value `{self.resume_from_checkpoint}`.\"\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 647, "n_words": 163, "vocab_size": 84, "complexity": 11, "nloc": 40, "token_counts": 166, "n_ast_nodes": 376, "n_identifiers": 23, "random_cut": "def _validate_attributes(self):\n \n # Run config\n if not isinstance(self.run_config, RunConfig):\n raise ValueError(\n f\"`run_config` should be an instance of `ray.air.RunConfig`, \"\n f\"found {type(self.run_config)} with value `{self.run_config}`.\"\n )\n # Scaling config\n if not isinstance(self.scaling_config, ScalingConfig):\n raise ValueError(\n \"`scaling_config` should be an instance of `ScalingConfig`, \"\n f\"found {type(self.scaling_config)} with value `{self.scaling_config}`.\"\n )\n # Datasets\n if not isinstance(self.datasets, dict):\n raise ValueError(\n f\"`datasets` should be a dict mapping from a string to \"\n f\"`ray.data.Dataset` objects, \"\n f\"found {type(self.datasets)} with value `{self.datasets}`.\"\n )\n elif any(\n not isinstance(ds, ray.data.Dataset) and not callable(ds)\n for ds in self.datasets.values()\n ):\n raise ValueError(\n f\"At least one value in the `datasets` dict is not a \"\n f\"`ray.data.Dataset`: {self.datasets}\"\n )\n # Preprocessor\n if self.preprocessor is not None and not isinstance(\n self.preprocessor, ray.data.Preprocessor\n ):\n raise ValueError(\n f\"`preprocessor` should be an instance of `ray.data.Preprocessor`, \"\n f\"found {type(self.preprocessor)} with value `{self.preprocessor}`.\"\n )\n\n if sel" }, { "id": 27390, "commit_id": "cec30e00e4fcc9fc4eb12e2777051d7966f00132", "repo": "saleor", "path": "saleor/graphql/webhook/tests/benchmark/fixtures.py", "file_name": "fixtures.py", "fun_name": "apps_without_webhooks", "commit_message": "Add benchmark tests for Webhooks events resolvers (#9775)", "code": "def apps_without_webhooks(db):\n return App.objects.bulk_create(\n [\n App(name=\"App1\", is_active=True),\n App(name=\"App2\", is_active=False),\n App(name=\"App3\", is_active=True),\n App(name=\"App4\", is_active=False),\n ]\n )\n\n\n@pytest.fixture", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 78, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 9, "token_counts": 59, "n_ast_nodes": 100, "n_identifiers": 9, "random_cut": "def apps_without_webhooks(db):\n return Ap" }, { "id": 142461, "commit_id": "43aa2299e6623c8f8c7c4a1b80133459d0aa68b0", "repo": "ray", "path": "python/ray/runtime_context.py", "file_name": "runtime_context.py", "fun_name": "get", "commit_message": "[api] Annotate as public / move ray-core APIs to _private and add enforcement rule (#25695)\n\nEnable checking of the ray core module, excluding serve, workflows, and tune, in ./ci/lint/check_api_annotations.py. This required moving many files to ray._private and associated fixes.", "code": "def get(self):\n \n context = {\n \"job_id\": self.job_id,\n \"node_id\": self.node_id,\n \"namespace\": self.namespace,\n }\n if self.worker.mode == ray._private.worker.WORKER_MODE:\n if self.task_id is not None:\n context[\"task_id\"] = self.task_id\n if self.actor_id is not None:\n context[\"actor_id\"] = self.actor_id\n\n return context\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 154, "n_words": 34, "vocab_size": 24, "complexity": 4, "nloc": 12, "token_counts": 77, "n_ast_nodes": 128, "n_identifiers": 13, "random_cut": "def get(self):\n \n context = {\n \"job_id\": self.job_id,\n \"node_id\": self.node_id,\n \"namespace\": self.namespace,\n " }, { "id": 5393, "commit_id": "e3cb566f793c42a70b7bfffca756448f055942ed", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-faker/unit_tests/unit_test.py", "file_name": "unit_test.py", "fun_name": "test_source_streams", "commit_message": "Bump faker version to test M1 Connector builds (#13235)\n\n* Bump faker version to test M1 Connector builds\r\n\r\n* add PR link to changelog\r\n\r\n* Remove birhtday, it doesn't obey seed\r\n\r\n* bump readme\r\n\r\n* more birthday to remove\r\n\r\n* remove image specificity in publish-command\r\n\r\n* Set AMI for testing.\r\n\r\n* test with including buildx emulators\r\n\r\n* auto-bump connector version\r\n\r\n* add TODO comment\r\n\r\nCo-authored-by: Davin Chia \r\nCo-authored-by: Octavia Squidington III ", "code": "def test_source_streams():\n source = SourceFaker()\n config = {\"count\": 1}\n catalog = source.discover(None, config)\n catalog = AirbyteMessage(type=Type.CATALOG, catalog=catalog).dict(exclude_unset=True)\n schemas = [stream[\"json_schema\"] for stream in catalog[\"catalog\"][\"streams\"]]\n\n assert len(schemas) == 1\n assert schemas[0][\"properties\"] == {\n \"id\": {\"type\": \"number\"},\n \"created_at\": {\"type\": \"string\", \"format\": \"date-time\", \"airbyte_type\": \"timestamp_without_timezone\"},\n \"updated_at\": {\"type\": \"string\", \"format\": \"date-time\", \"airbyte_type\": \"timestamp_without_timezone\"},\n \"job\": {\"type\": \"string\"},\n \"company\": {\"type\": \"string\"},\n \"ssn\": {\"type\": \"string\"},\n \"residence\": {\"type\": \"string\"},\n \"current_location\": {\"type\": \"array\"},\n \"blood_group\": {\"type\": \"string\"},\n \"website\": {\"type\": \"array\"},\n \"username\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n \"sex\": {\"type\": \"string\"},\n \"address\": {\"type\": \"string\"},\n \"mail\": {\"type\": \"string\"},\n }\n\n for schema in schemas:\n jsonschema.Draft7Validator.check_schema(schema)\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 229, "n_words": 91, "vocab_size": 53, "complexity": 3, "nloc": 27, "token_counts": 244, "n_ast_nodes": 448, "n_identifiers": 19, "random_cut": "def test_source_streams():\n source = SourceFaker()\n config = {\"count\": 1}\n catalog = source.discover(None, config)\n catalog = AirbyteMessage(type=Type.CATALOG, catalog=catalog).dict(exclude_" }, { "id": 167923, "commit_id": "950408e56f2ff358d28eba1459f24b808583de7b", "repo": "pandas", "path": "pandas/tests/arrays/test_datetimes.py", "file_name": "test_datetimes.py", "fun_name": "test_compare_mismatched_resolutions", "commit_message": "ENH: dt64/td64 comparison support non-nano (#47691)\n\n* ENH: dt64/td64 comparison support non-nano\r\n\r\n* mypy fixup", "code": "def test_compare_mismatched_resolutions(self, comparison_op):\n # comparison that numpy gets wrong bc of silent overflows\n op = comparison_op\n\n iinfo = np.iinfo(np.int64)\n vals = np.array([iinfo.min, iinfo.min + 1, iinfo.max], dtype=np.int64)\n\n # Construct so that arr2[1] < arr[1] < arr[2] < arr2[2]\n arr = np.array(vals).view(\"M8[ns]\")\n arr2 = arr.view(\"M8[s]\")\n\n left = DatetimeArray._simple_new(arr, dtype=arr.dtype)\n right = DatetimeArray._simple_new(arr2, dtype=arr2.dtype)\n\n if comparison_op is operator.eq:\n expected = np.array([False, False, False])\n elif comparison_op is operator.ne:\n expected = np.array([True, True, True])\n elif comparison_op in [operator.lt, operator.le]:\n expected = np.array([False, False, True])\n else:\n expected = np.array([False, True, False])\n\n result = op(left, right)\n tm.assert_numpy_array_equal(result, expected)\n\n result = op(left[1], right)\n tm.assert_numpy_array_equal(result, expected)\n\n if op not in [operator.eq, operator.ne]:\n # check that numpy still gets this wrong; if it is fixed we may be\n # able to remove compare_mismatched_resolutions\n np_res = op(left._ndarray, right._ndarray)\n tm.assert_numpy_array_equal(np_res[1:], ~expected[1:])\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 344, "n_words": 130, "vocab_size": 85, "complexity": 5, "nloc": 23, "token_counts": 260, "n_ast_nodes": 397, "n_identifiers": 30, "random_cut": "def test_compare_mismatched_resolutions(self, comparison_op):\n # comparison that numpy gets wrong bc of silent overflows\n op = comparison_op\n\n iinfo = np.iinfo(np.int64)\n vals = np.array([iinfo.min, iinfo.min + 1, iinfo.max], dtype=np.int64)\n\n # Construct so that arr2[1] < arr[1] < arr[2]" }, { "id": 118967, "commit_id": "3326118e83ffe87623ea440d0ead245f21106a27", "repo": "streamlit", "path": "lib/streamlit/app_session.py", "file_name": "app_session.py", "fun_name": "_create_session_state_changed_message", "commit_message": "AppSession: handle script events on the main thread (#4467)\n\nRefactors AppSession to handle all ScriptRunner events on the main thread.\r\n\r\n- ScriptRunner no longer takes an `enqueue` callback param. Instead, it signals that it wants to enqueue a new ForwardMsg via the `ScriptRunnerEvent.ENQUEUE_FORWARD_MSG` event. (This means that there's now only one channel that ScriptRunner uses to communicate back to AppSession - the `on_event` signal.)\r\n- `AppSession.enqueue` is now a private function called `AppSession._enqueue_forward_msg`, for clarity\r\n- `AppSession._on_scriptrunner_event` performs all its logic in an ioloop callback, which means that all ScriptRunner events are handled on the main thread _only_. (Previously, _some_ scriptrunner events were handled in a callback, and others were handled in line.)\r\n- `app_session_test.py` now has a test for the very convoluted `handle_backmsg_exception` function. (This function should be targeted for refactor down the line!)\r\n- There's also a small amount of unrelated cleanups in `app_session_test.py`\r\n\r\nThis is one of the last big pieces that needs to be in place for the \"faster-reruns\" branch. With this change, AppSession will be able to safely identify and ignore ScriptRunner events that come from a \"zombie\" ScriptRunner that's still executing. \r\n\r\nIt also (hopefully) makes AppSession easier to reason about: with the exception of the two functions that explicitly enqueue ioloop callbacks to be performed on the main thread, AppSession now executes entirely on the main thread!", "code": "def _create_session_state_changed_message(self) -> ForwardMsg:\n \n msg = ForwardMsg()\n msg.session_state_changed.run_on_save = self._run_on_save\n msg.session_state_changed.script_is_running = (\n self._state == AppSessionState.APP_IS_RUNNING\n )\n return msg\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 72, "n_words": 19, "vocab_size": 16, "complexity": 1, "nloc": 8, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 11, "random_cut": "def _create_session_state_changed_message(self) -> ForwardMsg:\n " }, { "id": 89465, "commit_id": "5f5d69722ff6c28ce46ebc958eb9d44d36cbf75b", "repo": "sentry", "path": "tests/sentry/ingest/test_transaction_rule_validator.py", "file_name": "test_transaction_rule_validator.py", "fun_name": "test_non_all_star_rules_valid", "commit_message": "feat(txcluster): Discard rules with all `*`s (#42076)\n\nRules consisting of all `*`s aren't helpful and provide a worse user\r\nexperience, so we want to get rid of them. All `*` rules are produced\r\nwhen the merge threshold is not high enough for the existing data, and\r\nthey look like the following:\r\n\r\n```json\r\n \"rules\": [\r\n \"/*/*/*/*/*/*/**\",\r\n \"/*/*/*/*/*/**\",\r\n \"/*/*/*/*/**\",\r\n \"/*/*/*/**\",\r\n \"/*/*/**\",\r\n \"/*/**\"\r\n ]\r\n```\r\n\r\nThis PR introduces a `RuleValidator` running when computing rules, so\r\nthat invalid rules are never produced. There's also a small refactor to\r\nseparate the concerns of each method. I've tried to build this in an\r\neasy-to-extend way since it's likely we will need to add new validation\r\nrequirements.", "code": "def test_non_all_star_rules_valid():\n assert RuleValidator(\"a/*/**\").is_valid()\n assert RuleValidator(\"/a/*/**\").is_valid()\n\n assert RuleValidator(\"*/a/**\").is_valid()\n assert RuleValidator(\"/*/a/**\").is_valid()\n\n assert RuleValidator(\"a/*/b/**\").is_valid()\n assert RuleValidator(\"/a/*/b/**\").is_valid()\n\n assert RuleValidator(\"a/*/b/*/c/**\").is_valid()\n assert RuleValidator(\"/a/*/b/*/c/**\").is_valid()\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 18, "vocab_size": 11, "complexity": 1, "nloc": 9, "token_counts": 76, "n_ast_nodes": 144, "n_identifiers": 3, "random_cut": "def test_non_all_star_rules_valid():\n assert Rule" }, { "id": 152831, "commit_id": "1f92336be768d235c18a82acb2195b7135101ae7", "repo": "stable-diffusion-webui", "path": "modules/deepbooru.py", "file_name": "deepbooru.py", "fun_name": "deepbooru_process", "commit_message": "refactored the deepbooru module to improve speed on running multiple interogations in a row. Added the option to generate deepbooru tags for textual inversion preproccessing.", "code": "def deepbooru_process(queue, deepbooru_process_return, threshold):\n model, tags = get_deepbooru_tags_model()\n while True: # while process is running, keep monitoring queue for new image\n pil_image = queue.get()\n if pil_image == \"QUIT\":\n break\n else:\n deepbooru_process_return[\"value\"] = get_deepbooru_tags_from_model(model, tags, pil_image, threshold)\n\n", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 84, "n_words": 36, "vocab_size": 32, "complexity": 3, "nloc": 8, "token_counts": 49, "n_ast_nodes": 81, "n_identifiers": 10, "random_cut": "def deepbooru_process(queue, deepbooru_process_return, threshold):\n model, tags = get_deepbooru_tags_model()\n while True: # while process is running, keep monitoring queue for new image\n pil_image = queue.get()\n if pil_image == \"QUIT\":\n break\n else:\n deepbooru_process_return[\"value\"] = get_deepbooru_tags_from_model(model," }, { "id": 182981, "commit_id": "79fbabee0d477a3d9ce3cb55e92c69dbf8be4f4b", "repo": "textual", "path": "tests/devtools/test_redirect_output.py", "file_name": "test_redirect_output.py", "fun_name": "test_print_redirect_to_devtools_only", "commit_message": "Test redirecting to logfile", "code": "async def test_print_redirect_to_devtools_only(devtools):\n await devtools._stop_log_queue_processing()\n\n with redirect_stdout(StdoutRedirector(devtools, None)): # type: ignore\n print(\"Hello, world!\")\n\n assert devtools.log_queue.qsize() == 1\n\n queued_log = await devtools.log_queue.get()\n queued_log_json = json.loads(queued_log)\n payload = queued_log_json[\"payload\"]\n\n assert queued_log_json[\"type\"] == \"client_log\"\n assert payload[\"timestamp\"] == TIMESTAMP\n assert (\n payload[\"encoded_segments\"]\n == \"gANdcQAoY3JpY2guc2VnbWVudApTZWdtZW50CnEBWA0AAABIZWxsbywgd29ybGQhcQJOTodxA4FxBGgBWAEAAAAKcQVOTodxBoFxB2Uu\"\n )\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 100, "n_words": 41, "vocab_size": 32, "complexity": 1, "nloc": 14, "token_counts": 83, "n_ast_nodes": 150, "n_identifiers": 15, "random_cut": "async def test_print_redirect_to_devtools_only(devtools):\n await devtools._stop_log_queue_processing()\n\n with redirect_stdout(StdoutRedirector(devtools, None)): # type: ignore\n print(\"Hello, world!\")\n\n assert devtools.log_queue.qsize() == 1\n\n queued_log = await devtools.log_queue.get()\n queued_log_json = json.loads(queued_l" }, { "id": 169692, "commit_id": "bfdf223133541da7e0002543e36bf71ba59af481", "repo": "pandas", "path": "pandas/tests/reshape/test_get_dummies.py", "file_name": "test_get_dummies.py", "fun_name": "test_dataframe_dummies_drop_first_with_categorical", "commit_message": "ENH: change get_dummies default dtype to bool (#48022)\n\n* ENH: Warn when dtype is not passed to get_dummies\r\n\r\n* Edit get_dummies' dtype warning\r\n\r\n* Add whatsnew entry for issue #45848\r\n\r\n* Fix dtype warning test\r\n\r\n* Suppress warnings in docs\r\n\r\n* Edit whatsnew entry\r\n\r\nCo-authored-by: Marco Edward Gorelli \r\n\r\n* Fix find_stack_level in get_dummies dtype warning\r\n\r\n* Change the default dtype of get_dummies to bool\r\n\r\n* Revert dtype(bool) change\r\n\r\n* Move the changelog entry to v1.6.0.rst\r\n\r\n* Move whatsnew entry to 'Other API changes'\r\n\r\nCo-authored-by: Marco Edward Gorelli \r\nCo-authored-by: Marco Edward Gorelli <33491632+MarcoGorelli@users.noreply.github.com>", "code": "def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype):\n df[\"cat\"] = Categorical([\"x\", \"y\", \"y\"])\n result = get_dummies(df, drop_first=True, sparse=sparse)\n expected = DataFrame(\n {\"C\": [1, 2, 3], \"A_b\": [0, 1, 0], \"B_c\": [0, 0, 1], \"cat_y\": [0, 1, 1]}\n )\n cols = [\"A_b\", \"B_c\", \"cat_y\"]\n expected[cols] = expected[cols].astype(bool)\n expected = expected[[\"C\", \"A_b\", \"B_c\", \"cat_y\"]]\n if sparse:\n for col in cols:\n expected[col] = SparseArray(expected[col])\n tm.assert_frame_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 159, "n_words": 60, "vocab_size": 49, "complexity": 3, "nloc": 13, "token_counts": 151, "n_ast_nodes": 238, "n_identifiers": 18, "random_cut": "def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype):\n df[\"cat\"] = Categorical([\"x\", \"y\", \"y\"])\n result = get_dummies(df, drop_first=True, sparse=sparse)\n expected = DataFrame(\n {\"C\": [1, 2, 3], \"A_b\": [0, 1, 0], \"B_c\": [0, 0, 1], \"cat_y\": [0, 1, 1]}\n )\n cols = [\"A_b\", \"B_c\", \"cat_y\"]\n expected[cols] = expected[cols].astype(bool)\n expected = expected[[\"C\", \"A_b\", \"B_c\", \"cat_y\"]]\n if sparse:\n for col in cols:\n expected[col] = SparseArray(expected[col])\n tm.assert_frame_equal(result, expected)\n" }, { "id": 70350, "commit_id": "4a848bfb4e3ec1a84a3d36fda577c1ed784de498", "repo": "wagtail", "path": "wagtail/core/tests/test_blocks.py", "file_name": "test_blocks.py", "fun_name": "test_bulk_to_python", "commit_message": "Implement a ListValue type for ListBlocks", "code": "def test_bulk_to_python(self):\n block = blocks.ListBlock(blocks.PageChooserBlock())\n\n with self.assertNumQueries(1):\n result = block.bulk_to_python([[4, 5], [], [2]])\n # result will be a list of ListValues - convert to lists for equality check\n clean_result = [list(val) for val in result]\n\n self.assertEqual(clean_result, [\n [Page.objects.get(id=4), Page.objects.get(id=5)],\n [],\n [Page.objects.get(id=2)],\n ])\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 135, "n_words": 42, "vocab_size": 37, "complexity": 2, "nloc": 10, "token_counts": 107, "n_ast_nodes": 165, "n_identifiers": 17, "random_cut": "def test_bulk_to_python(self):\n block = blocks.ListBlock(blocks.PageChooserBlock())\n\n with self.assertNumQueries(1):" }, { "id": 321127, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/webengine/webenginedownloads.py", "file_name": "webenginedownloads.py", "fun_name": "_do_die", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def _do_die(self):\n progress_signal = self._qt_item.downloadProgress\n progress_signal.disconnect()\n if self._qt_item.state() != QWebEngineDownloadItem.DownloadState.DownloadInterrupted:\n self._qt_item.cancel()\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 42, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 11, "random_cut": "def _do_die(self):\n progress_signal = self._qt_item.downloadProgress\n progress_signal.disconnect()\n if self._qt_item.state() != QWebEngineDownloadItem.DownloadState.DownloadInterrupted:\n self._qt_item.cancel()\n" }, { "id": 97793, "commit_id": "3fe3103b93012366f6348a8150cbcd5b4fbb7d8c", "repo": "sentry", "path": "tests/sentry/snuba/metrics/test_snql.py", "file_name": "test_snql.py", "fun_name": "test_dist_count_aggregation_on_tx_status", "commit_message": "feat(metrics): Add derived metric to get total count of transactions (#33111)", "code": "def test_dist_count_aggregation_on_tx_status(self):\n org_id = 1985\n alias = \"thefuture\"\n assert all_transactions(org_id, self.metric_ids, alias) == Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"in\",\n [\n Column(name=\"metric_id\"),\n list(self.metric_ids),\n ],\n alias=None,\n ),\n ],\n alias=alias,\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 266, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 18, "token_counts": 67, "n_ast_nodes": 106, "n_identifiers": 10, "random_cut": "def test_dist_count_aggregation_on_tx_status(self):\n org_id = 1985\n alias = \"thefuture\"\n assert all" }, { "id": 157748, "commit_id": "1c2e25a557db446b5691c18e595e5664cc254730", "repo": "d2l-zh", "path": "d2l/torch.py", "file_name": "torch.py", "fun_name": "read_data_bananas", "commit_message": "sync lib", "code": "def read_data_bananas(is_train=True):\n \n data_dir = d2l.download_extract('banana-detection')\n csv_fname = os.path.join(data_dir, 'bananas_train' if is_train\n else 'bananas_val', 'label.csv')\n csv_data = pd.read_csv(csv_fname)\n csv_data = csv_data.set_index('img_name')\n images, targets = [], []\n for img_name, target in csv_data.iterrows():\n images.append(torchvision.io.read_image(\n os.path.join(data_dir, 'bananas_train' if is_train else\n 'bananas_val', 'images', f'{img_name}')))\n # 这里的target包含(类别,左上角x,左上角y,右下角x,右下角y),\n # 其中所有图像都具有相同的香蕉类(索引为0)\n targets.append(list(target))\n return images, torch.tensor(targets).unsqueeze(1) / 256\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 164, "n_words": 49, "vocab_size": 36, "complexity": 4, "nloc": 13, "token_counts": 126, "n_ast_nodes": 219, "n_identifiers": 26, "random_cut": "def read_data_bananas(is_train=True):\n \n data_dir = d2l.download_extract('banana-detection')\n csv_fname = os.path.join(data_dir, 'bananas_train' if is_train\n else 'bananas_val', 'label.csv')\n csv_data = pd.read_csv(csv_fname)\n csv_data = csv_data.set_index('img_name')\n images, targets = [], []\n fo" }, { "id": 47412, "commit_id": "165945250b62b86efe4f88c6ae8005883a29f2fd", "repo": "airflow", "path": "airflow/models/taskinstance.py", "file_name": "taskinstance.py", "fun_name": "_execute_task", "commit_message": "Fail if task does not push XCom for downstream (#22954)\n\nThe task can already fail if an XCom is pushed but unmappable. This\r\nextends the check to cover cases where the task returns None, or does\r\nnot push at all (i.e. do_xcom_push=False).", "code": "def _execute_task(self, context, task_orig):\n \n task_to_execute = self.task\n # If the task has been deferred and is being executed due to a trigger,\n # then we need to pick the right method to come back to, otherwise\n # we go for the default execute\n if self.next_method:\n # __fail__ is a special signal value for next_method that indicates\n # this task was scheduled specifically to fail.\n if self.next_method == \"__fail__\":\n next_kwargs = self.next_kwargs or {}\n traceback = self.next_kwargs.get(\"traceback\")\n if traceback is not None:\n self.log.error(\"Trigger failed:\\n%s\", \"\\n\".join(traceback))\n raise TaskDeferralError(next_kwargs.get(\"error\", \"Unknown\"))\n # Grab the callable off the Operator/Task and add in any kwargs\n execute_callable = getattr(task_to_execute, self.next_method)\n if self.next_kwargs:\n execute_callable = partial(execute_callable, **self.next_kwargs)\n else:\n execute_callable = task_to_execute.execute\n # If a timeout is specified for the task, make it fail\n # if it goes beyond\n try:\n if task_to_execute.execution_timeout:\n # If we are coming in with a next_method (i.e. from a deferral),\n # calculate the timeout from our start_date.\n if self.next_method:\n timeout_seconds = (\n task_to_execute.execution_timeout - (timezone.utcnow() - self.start_date)\n ).total_seconds()\n else:\n timeout_seconds = task_to_execute.execution_timeout.total_seconds()\n try:\n # It's possible we're already timed out, so fast-fail if true\n if timeout_seconds <= 0:\n raise AirflowTaskTimeout()\n # Run task in timeout wrapper\n with timeout(timeout_seconds):\n result = execute_callable(context=context)\n except AirflowTaskTimeout:\n task_to_execute.on_kill()\n raise\n else:\n result = execute_callable(context=context)\n except: # noqa: E722\n _TASK_EXECUTION_FRAME_LOCAL_STORAGE.frame = currentframe()\n raise\n with create_session() as session:\n if task_to_execute.do_xcom_push:\n xcom_value = result\n else:\n xcom_value = None\n if xcom_value is not None: # If the task returns a result, push an XCom containing it.\n self.xcom_push(key=XCOM_RETURN_KEY, value=xcom_value, session=session)\n self._record_task_map_for_downstreams(task_orig, xcom_value, session=session)\n return result\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 993, "n_words": 251, "vocab_size": 154, "complexity": 13, "nloc": 44, "token_counts": 267, "n_ast_nodes": 463, "n_identifiers": 40, "random_cut": "def _execute_task(self, context, task_orig):\n \n task_to_execute = self.task\n # If the task has been deferred and is being executed due to a trigger,\n # then we need to pick the right method to come back to, otherwise\n # we go for the default execute\n if self.next_method:\n # __fail__ is a special signal value for next_method that indicates\n # this task was scheduled specifically to fail.\n if self.next_method == \"__fail__\":\n next_kwargs = self.next_kwargs or {}\n traceback = self.next_kwargs.get(\"traceback\")\n if traceback is not None:\n self.log.error(\"Trigger failed:\\n%s\", \"\\n\".join(traceback))\n raise TaskDeferralError(next_kwargs.get(\"error\", \"Unknown\"))\n # Grab the callable off the Operator/Task and add in any kwargs\n execute_callable = getattr(task_to_execute, self.next_method)\n if self.next_kwargs:\n execute_callable = partial(execute_callable, **self.next_kwargs)\n else:\n execute_callable = task_to_execute.execute\n # If a timeout is specified for the task, make it fail\n # if it goes beyond\n try:\n if task_to_execute.execution_timeout:\n # If we are coming in with a next_method (i.e. from a deferral),\n # calculate the timeout from our start_date.\n if self.next_method:\n timeout_seconds = (\n task_to_execute.execution_timeout - (timezone.utcnow() - self.start_date)\n ).total_seconds()\n else:\n timeout_seconds = task_to_execute.execution_timeout.total_seconds()\n try:\n # It's possible we're already timed out, so fast-fail if true\n if timeout_seconds <= 0:\n raise AirflowTaskTimeout()\n # Run task in timeout wrapper\n with timeout(timeout_seconds):\n result = execute_callable(context=context)\n except AirflowTaskTimeout:\n task_to_execute.on_kill()\n raise\n else:\n result = execute_callable(context=context)\n except: # noqa: E722\n _TASK_EXECUTION_FRAME_LOCAL_STORAGE.frame = currentframe()\n raise\n with create_session() as session:\n if task_to_execute.do_xcom_push:\n xcom_value = result\n else:\n xcom_value = None\n if xcom_value is not None: # If the task returns a result, push an XCom containing it.\n self.xcom_push(key=XCOM_RETURN_KEY, value=xcom_value, session=session)\n self._record_task_map_for_downstreams(task_orig, xcom_value, sessio" }, { "id": 151291, "commit_id": "98ba57ffaa99cde45d24106354edaeddf4d72525", "repo": "freqtrade", "path": "tests/exchange/test_exchange.py", "file_name": "test_exchange.py", "fun_name": "test_amount_to_contract_precision2", "commit_message": "Better test for contract calculation change\n\ncloses #7449", "code": "def test_amount_to_contract_precision2(amount, precision, precision_mode, contract_size, expected):\n res = amount_to_contract_precision(amount, precision, precision_mode, contract_size)\n assert pytest.approx(res) == expected\n\n\n@pytest.mark.parametrize('exchange_name,open_rate,is_short,trading_mode,margin_mode', [\n # Bittrex\n ('bittrex', 2.0, False, 'spot', None),\n ('bittrex', 2.0, False, 'spot', 'cross'),\n ('bittrex', 2.0, True, 'spot', 'isolated'),\n # Binance\n ('binance', 2.0, False, 'spot', None),\n ('binance', 2.0, False, 'spot', 'cross'),\n ('binance', 2.0, True, 'spot', 'isolated'),\n])", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('exchange_name,open_rate,is_short,trading_mode,margin_mode', [\n # Bittrex\n ('bittrex', 2.0, False, 'spot', None),\n ('bittrex', 2.0, False, 'spot', 'cross'),\n ('bittrex', 2.0, True, 'spot', 'isolated'),\n # Binance\n ('binance', 2.0, False, 'spot', None),\n ('binance', 2.0, False, 'spot', 'cross'),\n ('binance', 2.0, True, 'spot', 'isolated'),\n])", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 80, "n_words": 53, "vocab_size": 29, "complexity": 1, "nloc": 3, "token_counts": 34, "n_ast_nodes": 180, "n_identifiers": 12, "random_cut": "def test_amount_to_contract_precision2(amount, precision, precision_mode, contract_size, expected):\n res = amount_to_contract_precision(amount, precision, prec" }, { "id": 215892, "commit_id": "a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857", "repo": "salt", "path": "salt/modules/win_certutil.py", "file_name": "win_certutil.py", "fun_name": "get_cert_serial", "commit_message": "Add tests, fix state module", "code": "def get_cert_serial(cert_file, saltenv=\"base\"):\n \n cert_file = __salt__[\"cp.cache_file\"](cert_file, saltenv)\n\n # Since we're allowing a path, let's make sure it exists\n if not os.path.exists(cert_file):\n msg = \"cert_file not found: {}\".format(cert_file)\n raise CommandExecutionError(msg)\n\n cmd = 'certutil.exe -silent -verify \"{}\"'.format(cert_file)\n out = __salt__[\"cmd.run\"](cmd)\n # match serial number by paragraph to work with multiple languages\n matches = re.search(r\":\\s*(\\w*)\\r\\n\\r\\n\", out)\n if matches is not None:\n return matches.groups()[0].strip()\n else:\n return None\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 121, "n_words": 63, "vocab_size": 53, "complexity": 3, "nloc": 12, "token_counts": 96, "n_ast_nodes": 166, "n_identifiers": 17, "random_cut": "def get_cert_serial(cert_file, saltenv=\"base\"):\n \n cert_file = __salt__[\"cp.cache_file\"](cert_file, saltenv)\n\n # Since we're allowing a path, let's make sure it exists\n if not os.path.exists(cert_file):\n msg = \"cert_file not found: {}\".format(cert_file)\n raise CommandExecutionError(msg)\n\n cmd = 'certutil.exe -silent -verify \"{}\"'.format(cert_file)\n out" }, { "id": 4279, "commit_id": "ba4e86f146fd75394eb80377fe5780ef889403b0", "repo": "airbyte", "path": "octavia-cli/unit_tests/test_apply/test_resources.py", "file_name": "test_resources.py", "fun_name": "test_create_or_update", "commit_message": "🐙 octavia-cli: secret management (#10885)", "code": "def test_create_or_update(self, mocker, resource):\n expected_results = {resource.resource_id_field: \"resource_id\"}\n operation_fn = mocker.Mock(return_value=expected_results)\n mocker.patch.object(resources, \"ResourceState\")\n payload = \"foo\"\n result, state = resource._create_or_update(operation_fn, payload)\n assert result == expected_results\n assert state == resources.ResourceState.create.return_value\n resources.ResourceState.create.assert_called_with(resource.configuration_path, resource.local_configuration, \"resource_id\")\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 87, "n_words": 32, "vocab_size": 25, "complexity": 1, "nloc": 9, "token_counts": 85, "n_ast_nodes": 136, "n_identifiers": 21, "random_cut": "def test_create_or_update(self, mocker, resource):\n expected_results = {resource.resource_id_field: \"resource_id\"}\n operation_fn = mocker.Mock(return_value=expected_results)\n mocker.patch.object(resources, \"ResourceState\")\n payload = \"foo\"\n result, state = resource._create_or_update(operation_fn, payload)\n assert result == expected_results\n assert state == resources.ResourceState.create.return_valu" }, { "id": 331743, "commit_id": "58ba49c8ef4cd56b15e32b1eb17b268ed7289200", "repo": "pytorch-image-models", "path": "timm/models/mobilevit.py", "file_name": "mobilevit.py", "fun_name": "mobilevit_xxs", "commit_message": "Add MobileViT models (w/ ByobNet base). Close #1038.", "code": "def mobilevit_xxs(pretrained=False, **kwargs):\n return _create_mobilevit('mobilevit_xxs', pretrained=pretrained, **kwargs)\n\n\n@register_model", "url": "https://github.com/huggingface/pytorch-image-models.git", "language": "Python", "ast_errors": "@register_model", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 9, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 39, "n_identifiers": 5, "random_cut": "def mobilevit_xxs(pretrained=False, **kwargs):\n return _create_mo" }, { "id": 80296, "commit_id": "3d45f31536dfd777169c3ff5e9352553b3e89ee3", "repo": "awx", "path": "awx/api/views/root.py", "file_name": "root.py", "fun_name": "get", "commit_message": "Update ping endpoint to use last_seen\n\nUpdate ping endpoint to use last_seen, instead of `modified` on\ninstances `heartbeat`.\n\nSee: https://github.com/ansible/awx/issues/11523", "code": "def get(self, request, format=None):\n \n response = {'ha': is_ha_environment(), 'version': get_awx_version(), 'active_node': settings.CLUSTER_HOST_ID, 'install_uuid': settings.INSTALL_UUID}\n\n response['instances'] = []\n for instance in Instance.objects.exclude(node_type='hop'):\n response['instances'].append(\n dict(\n node=instance.hostname,\n node_type=instance.node_type,\n uuid=instance.uuid,\n heartbeat=instance.last_seen,\n capacity=instance.capacity,\n version=instance.version,\n )\n )\n response['instances'] = sorted(response['instances'], key=operator.itemgetter('node'))\n response['instance_groups'] = []\n for instance_group in InstanceGroup.objects.prefetch_related('instances'):\n response['instance_groups'].append(\n dict(name=instance_group.name, capacity=instance_group.capacity, instances=[x.hostname for x in instance_group.instances.all()])\n )\n response['instance_groups'] = sorted(response['instance_groups'], key=lambda x: x['name'].lower())\n return Response(response)\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 333, "n_words": 59, "vocab_size": 46, "complexity": 4, "nloc": 22, "token_counts": 218, "n_ast_nodes": 356, "n_identifiers": 37, "random_cut": "def get(self, request, format=None):\n \n response = {'ha': is_ha_environment(), 'version': get_awx_version(), 'active_node': settings.CLUSTER_HOST_ID, 'install_uuid': settings.INSTALL_UUID}\n\n response['instances'] = []\n for instance in Instance.objects.exclude(node_type='hop'):\n response['instances'].append(\n dict(\n node=instance.hostname,\n node_type=instance.node_type,\n uuid=instance.uuid,\n heartbeat=instance.last_seen,\n capacity=instance.capacity,\n version=instance.version,\n )\n )\n response['instances'] = sorted(response['instances'], key=operator.itemgetter('node'))\n response['instance_groups'] = []\n for instance_group in InstanceGroup.obje" }, { "id": 262686, "commit_id": "3b8b105b0d6539ac12972de94e0b2a5077fa1ce2", "repo": "TTS", "path": "TTS/tts/layers/overflow/neural_hmm.py", "file_name": "neural_hmm.py", "fun_name": "get_mask_for_last_item", "commit_message": "Adding OverFlow (#2183)\n\n* Adding encoder\r\n\r\n* currently modifying hmm\r\n\r\n* Adding hmm\r\n\r\n* Adding overflow\r\n\r\n* Adding overflow setting up flat start\r\n\r\n* Removing runs\r\n\r\n* adding normalization parameters\r\n\r\n* Fixing models on same device\r\n\r\n* Training overflow and plotting evaluations\r\n\r\n* Adding inference\r\n\r\n* At the end of epoch the test sentences are coming on cpu instead of gpu\r\n\r\n* Adding figures from model during training to monitor\r\n\r\n* reverting tacotron2 training recipe\r\n\r\n* fixing inference on gpu for test sentences on config\r\n\r\n* moving helpers and texts within overflows source code\r\n\r\n* renaming to overflow\r\n\r\n* moving loss to the model file\r\n\r\n* Fixing the rename\r\n\r\n* Model training but not plotting the test config sentences's audios\r\n\r\n* Formatting logs\r\n\r\n* Changing model name to camelcase\r\n\r\n* Fixing test log\r\n\r\n* Fixing plotting bug\r\n\r\n* Adding some tests\r\n\r\n* Adding more tests to overflow\r\n\r\n* Adding all tests for overflow\r\n\r\n* making changes to camel case in config\r\n\r\n* Adding information about parameters and docstring\r\n\r\n* removing compute_mel_statistics moved statistic computation to the model instead\r\n\r\n* Added overflow in readme\r\n\r\n* Adding more test cases, now it doesn't saves transition_p like tensor and can be dumped as json", "code": "def get_mask_for_last_item(lengths, device, out_tensor=None):\n \n max_len = torch.max(lengths).item()\n ids = (\n torch.arange(0, max_len, device=device) if out_tensor is None else torch.arange(0, max_len, out=out_tensor)\n )\n mask = ids == lengths.unsqueeze(1) - 1\n return mask\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 84, "n_words": 31, "vocab_size": 25, "complexity": 2, "nloc": 7, "token_counts": 71, "n_ast_nodes": 108, "n_identifiers": 13, "random_cut": "def get_mask_for_last_item(lengths, device, out_tensor=None):\n \n max_len = torch.max(lengths).item()\n ids = (\n torch.arange(0, max_len, device=device) if out_tensor is None else torch.arange(0, max_len, out=out_tensor)\n )\n mask = ids == lengths." }, { "id": 11651, "commit_id": "51403a57d03f0b1ddfd7fc533ccee78e23f5faa1", "repo": "jina", "path": "tests/k8s/test_k8s.py", "file_name": "test_k8s.py", "fun_name": "k8s_flow_with_needs", "commit_message": "refactor: unify port args (#4382)", "code": "def k8s_flow_with_needs(docker_images):\n flow = (\n Flow(\n name='test-flow-with-needs',\n port=9090,\n protocol='http',\n )\n .add(\n name='segmenter',\n uses=f'docker://{docker_images[0]}',\n )\n .add(\n name='textencoder',\n uses=f'docker://{docker_images[0]}',\n needs='segmenter',\n )\n .add(\n name='imageencoder',\n uses=f'docker://{docker_images[0]}',\n needs='segmenter',\n )\n .add(\n name='merger',\n uses_before=f'docker://{docker_images[1]}',\n needs=['imageencoder', 'textencoder'],\n )\n )\n return flow\n\n\n@pytest.mark.asyncio\n@pytest.mark.timeout(3600)\n@pytest.mark.parametrize('k8s_connection_pool', [True, False])\n@pytest.mark.parametrize(\n 'docker_images',\n [['test-executor', 'executor-merger', 'jinaai/jina']],\n indirect=True,\n)", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "@pytest.mark.asyncio\n@pytest.mark.timeout(3600)\n@pytest.mark.parametrize('k8s_connection_pool', [True, False])\n@pytest.mark.parametrize(\n 'docker_images',\n [['test-executor', 'executor-merger', 'jinaai/jina']],\n indirect=True,\n)", "n_ast_errors": 1, "ast_levels": 19, "n_whitespaces": 281, "n_words": 45, "vocab_size": 32, "complexity": 1, "nloc": 28, "token_counts": 94, "n_ast_nodes": 274, "n_identifiers": 17, "random_cut": "def k8s_flow_with_needs(docker_images):\n flow = (\n Flow(\n name='test-flow-with-needs',\n port=9090,\n protocol='http',\n )\n .add(\n name='segmenter',\n uses=f'docker://{docker_images[0]}',\n )\n .add(\n name='textencoder',\n uses=f'docker://{docker_images[0]}',\n needs='segmenter',\n )\n .add(\n name='imageencoder',\n uses=f'docker://{docker_images[0]}',\n needs='segmenter',\n )\n .add(\n name='merger',\n uses_before=f'docker://{docker_images[1]}',\n needs=['imageencoder', 'textencoder'],\n )\n )\n return flow\n\n\n@pytest.mark.asyncio\n@pytest.mark.timeout(3600)\n@pytest.mark.parametrize('k8s_connection_pool', [True, False])\n@pytest.mark.parametrize(\n 'docker_images',\n [['test-executor', 'executor-merger', 'jinaai/jina']],\n indirect=True,\n)" }, { "id": 153604, "commit_id": "605efa618e7994681f57b11d04d417f353ef8d50", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "count", "commit_message": "DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def count(self, axis=0, level=None, numeric_only=False): # noqa: PR01, RT01, D200\n \n axis = self._get_axis_number(axis)\n frame = self.select_dtypes([np.number, np.bool]) if numeric_only else self\n\n if level is not None:\n if not frame._query_compiler.has_multiindex(axis=axis):\n raise TypeError(\"Can only count levels on hierarchical columns.\")\n return frame.groupby(level=level, axis=axis, sort=True).count()\n return frame._reduce_dimension(\n frame._query_compiler.count(\n axis=axis, level=level, numeric_only=numeric_only\n )\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 166, "n_words": 49, "vocab_size": 42, "complexity": 4, "nloc": 12, "token_counts": 115, "n_ast_nodes": 177, "n_identifiers": 17, "random_cut": "def count(self, axis=0, level=None, numeric_only=False): # noqa: PR01, RT01, D200\n \n axis = self._get_axis_number(axis)\n frame = self.select_dtypes([np.number, np.bool]) if numeric_only else self\n\n if level is not None:\n if not frame._query_compiler.has_multiindex(axis=axis):\n raise TypeError(\"Can only count levels on hierarchical columns.\")\n return frame.groupby(level=level, axis=axis, sort=True).count()\n return frame._reduce_dimension(\n frame._query_compiler.count(\n axis=axis, level=level, numeric_only=numeric_only\n )\n )\n" }, { "id": 77452, "commit_id": "52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c", "repo": "wagtail", "path": "wagtail/models/__init__.py", "file_name": "__init__.py", "fun_name": "reject_moderation", "commit_message": "Replace `PageRevision` with generic `Revision` model (#8441)", "code": "def reject_moderation(self, user=None):\n if self.submitted_for_moderation:\n logger.info(\n 'Page moderation rejected: \"%s\" id=%d revision_id=%d',\n self.content_object.title,\n self.content_object.id,\n self.id,\n )\n log(\n instance=self.as_object(),\n action=\"wagtail.moderation.reject\",\n user=user,\n revision=self,\n )\n self.submitted_for_moderation = False\n self.save(update_fields=[\"submitted_for_moderation\"])\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 218, "n_words": 26, "vocab_size": 25, "complexity": 2, "nloc": 16, "token_counts": 75, "n_ast_nodes": 117, "n_identifiers": 16, "random_cut": "def reject_moderation(self, user=None):\n if self.submitted_for_moderation:\n logger.info(\n 'Page moderation reject" }, { "id": 162874, "commit_id": "ca192b6637778323a25b4fb11341b902ab970fef", "repo": "AutoEq", "path": "webapp/main.py", "file_name": "main.py", "fun_name": "parametric_eq_config_name", "commit_message": "Fixed validators and response structure.", "code": "def parametric_eq_config_name(cls, v):\n if type(v) == str:\n assert v in PEQ_CONFIGS, f'Unknown parametric eq config name \"{v}\"'\n if type(v) == list:\n for config in v:\n if type(config) == str:\n assert config in PEQ_CONFIGS, f'Unknown parametric eq config name \"{config}\"'\n return v\n", "url": "https://github.com/jaakkopasanen/AutoEq.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 117, "n_words": 41, "vocab_size": 23, "complexity": 5, "nloc": 8, "token_counts": 52, "n_ast_nodes": 88, "n_identifiers": 8, "random_cut": "def parametric_eq_config_name(cls, v):\n if type(v) == str:\n assert v in PEQ_CONFIGS, f'Unknown parametric eq config name \"{v}\"'\n if type(v) == list:\n for config in v:\n if type(config) == str:\n assert config in PEQ_CONFIGS, f'Unknown param" }, { "id": 19507, "commit_id": "3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8", "repo": "pipenv", "path": "pipenv/utils/internet.py", "file_name": "internet.py", "fun_name": "replace_pypi_sources", "commit_message": "Code reorg utils into utils module reduces complexity (#4990)\n\n* Split apart the massive utils.py into a utils module", "code": "def replace_pypi_sources(sources, pypi_replacement_source):\n return [pypi_replacement_source] + [\n source for source in sources if not is_pypi_url(source[\"url\"])\n ]\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 28, "n_words": 16, "vocab_size": 15, "complexity": 3, "nloc": 4, "token_counts": 28, "n_ast_nodes": 44, "n_identifiers": 5, "random_cut": "def replace_pypi_sources(sources, pypi_replacement_source):\n return [pypi_replacement_source] + [\n source for source in sources if not is_pypi_url(source[\"url\"])\n " }, { "id": 200869, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/aggregation/tests.py", "file_name": "tests.py", "fun_name": "test_count_distinct_expression", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_count_distinct_expression(self):\n aggs = Book.objects.aggregate(\n distinct_ratings=Count(\n Case(When(pages__gt=300, then=\"rating\")), distinct=True\n ),\n )\n self.assertEqual(aggs[\"distinct_ratings\"], 4)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 70, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 48, "n_ast_nodes": 77, "n_identifiers": 14, "random_cut": "def test_count_distinct_expression(self):\n aggs = Book.objects.aggregate(\n distinct_ratings=Count(\n Case(Whe" }, { "id": 167152, "commit_id": "28d5b017b1eb229319acbba2ae042eaad94e14ba", "repo": "pandas", "path": "pandas/tests/scalar/timestamp/test_timestamp.py", "file_name": "test_timestamp.py", "fun_name": "test_normalize", "commit_message": "ENH: Timestamp.normalize support non-nano (#47316)", "code": "def test_normalize(self, dt64, ts):\n alt = Timestamp(dt64)\n result = ts.normalize()\n assert result._reso == ts._reso\n assert result == alt.normalize()\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 45, "n_words": 18, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 38, "n_ast_nodes": 60, "n_identifiers": 9, "random_cut": "def test_normalize(self, dt64, ts):\n alt = Timestamp(dt64)\n result = ts.normalize()\n " }, { "id": 79150, "commit_id": "ca7f23d176eb36d15ab13118d329675b2abbda89", "repo": "wagtail", "path": "wagtail/admin/forms/workflows.py", "file_name": "workflows.py", "fun_name": "get_workflow_edit_handler", "commit_message": "Update InlinePanel and StreamField styles for new designs (#8983)", "code": "def get_workflow_edit_handler():\n \n # Note. It's a bit of a hack that we use edit handlers here. Ideally, it should be\n # made easier to reuse the inline panel templates for any formset.\n # Since this form is internal, we're OK with this for now. We might want to revisit\n # this decision later if we decide to allow custom fields on Workflows.\n\n panels = [\n FieldPanel(\"name\", heading=_(\"Give your workflow a name\")),\n InlinePanel(\n \"workflow_tasks\",\n [\n FieldPanel(\"task\", widget=AdminTaskChooser(show_clear_link=False)),\n ],\n heading=_(\"Add tasks to your workflow\"),\n label=_(\"Task\"),\n icon=\"thumbtack\",\n ),\n ]\n edit_handler = ObjectList(panels, base_form_class=WagtailAdminModelForm)\n return edit_handler.bind_to_model(Workflow)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 221, "n_words": 92, "vocab_size": 77, "complexity": 1, "nloc": 15, "token_counts": 79, "n_ast_nodes": 137, "n_identifiers": 17, "random_cut": "def get_workflow_edit_handler():\n \n # Note. It's a bit of a hack that we use edit handlers here. Ideally, it should be\n # made easier to reuse the inline panel templates for any formset.\n # Since this form is internal, we're OK with this for now. We might want to revisit\n # this decision later if we decide " }, { "id": 313455, "commit_id": "d6e7a3e537564c6bfc52580e2271491f4f7dc698", "repo": "core", "path": "homeassistant/components/hunterdouglas_powerview/cover.py", "file_name": "cover.py", "fun_name": "_async_force_refresh_state", "commit_message": "Add powerview advanced features (#73061)\n\nCo-authored-by: J. Nick Koston ", "code": "async def _async_force_refresh_state(self) -> None:\n \n await self.async_update()\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 37, "n_identifiers": 4, "random_cut": "async def _async_force_refresh_state(self) -> None:\n \n await self.async_update()\n self.async_write_ha_state()\n" }, { "id": 257394, "commit_id": "7caca41c5d3fc625af4df68e237db5df6b02b724", "repo": "haystack", "path": "test/pipelines/test_eval.py", "file_name": "test_eval.py", "fun_name": "test_extractive_qa_labels_with_filters", "commit_message": "Support context matching in `pipeline.eval()` (#2482)\n\n* calculate context pred metrics\r\n\r\n* Update Documentation & Code Style\r\n\r\n* extend doc_relevance_col values\r\n\r\n* fix import order\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix mypy\r\n\r\n* fix typings literal import\r\n\r\n* add option for custom document_id_field\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix tests and dataframe col-order\r\n\r\n* Update Documentation & Code Style\r\n\r\n* rename content to context in eval dataframe\r\n\r\n* add backward compatibility to EvaluationResult.load()\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add docstrings\r\n\r\n* Update Documentation & Code Style\r\n\r\n* support sas\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add answer_scope param\r\n\r\n* Update Documentation & Code Style\r\n\r\n* rework doc_relevance_col and keep document_id col in case of custom_document_id_field\r\n\r\n* Update Documentation & Code Style\r\n\r\n* improve docstrings\r\n\r\n* Update Documentation & Code Style\r\n\r\n* rename document_relevance_criterion into document_scope\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add document_scope and answer_scope to print_eval_report\r\n\r\n* support all new features in execute_eval_run()\r\n\r\n* fix imports\r\n\r\n* fix mypy\r\n\r\n* Update Documentation & Code Style\r\n\r\n* rename pred_label_sas_grid into pred_label_matrix\r\n\r\n* update dataframe schema and sorting\r\n\r\n* Update Documentation & Code Style\r\n\r\n* pass through context_matching params and extend document_scope test\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add answer_scope tests\r\n\r\n* fix context_matching_threshold for document metrics\r\n\r\n* shorten dataframe apply calls\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix queries getting lost if nothing was retrieved\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Update Documentation & Code Style\r\n\r\n* use document_id scopes\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix answer_scope literal\r\n\r\n* Update Documentation & Code Style\r\n\r\n* update the docs (lg changes)\r\n\r\n* Update Documentation & Code Style\r\n\r\n* update tutorial 5\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix tests\r\n\r\n* Add minor lg updates\r\n\r\n* final docstring changes\r\n\r\n* fix single quotes in docstrings\r\n\r\n* Update Documentation & Code Style\r\n\r\n* dataframe scopes added for each column\r\n\r\n* better docstrings for context_matching params\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix summarizer eval test\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix test\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>\r\nCo-authored-by: agnieszka-m ", "code": "def test_extractive_qa_labels_with_filters(reader, retriever_with_docs, tmp_path):\n labels = [\n # MultiLabel with filter that selects only the document about Carla\n MultiLabel(\n labels=[\n Label(\n query=\"What's her name?\",\n answer=Answer(answer=\"Carla\", offsets_in_context=[Span(11, 16)]),\n document=Document(\n id=\"a0747b83aea0b60c4b114b15476dd32d\",\n content_type=\"text\",\n content=\"My name is Carla and I live in Berlin\",\n ),\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n filters={\"name\": [\"filename1\"]},\n )\n ]\n ),\n # MultiLabel with filter that selects only the document about Christelle\n MultiLabel(\n labels=[\n Label(\n query=\"What's her name?\",\n answer=Answer(answer=\"Christelle\", offsets_in_context=[Span(11, 20)]),\n document=Document(\n id=\"4fa3938bef1d83e4d927669666d0b705\",\n content_type=\"text\",\n content=\"My name is Christelle and I live in Paris\",\n ),\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n filters={\"name\": [\"filename3\"]},\n )\n ]\n ),\n ]\n\n pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)\n eval_result = pipeline.eval(labels=labels, params={\"Retriever\": {\"top_k\": 5}})\n\n metrics = eval_result.calculate_metrics(document_scope=\"document_id\")\n\n reader_result = eval_result[\"Reader\"]\n retriever_result = eval_result[\"Retriever\"]\n\n # The same query but with two different filters and thus two different answers is answered correctly in both cases.\n assert (\n reader_result[reader_result[\"rank\"] == 1][\"answer\"].iloc[0]\n in reader_result[reader_result[\"rank\"] == 1][\"gold_answers\"].iloc[0]\n )\n assert (\n retriever_result[retriever_result[\"rank\"] == 1][\"document_id\"].iloc[0]\n in retriever_result[retriever_result[\"rank\"] == 1][\"gold_document_ids\"].iloc[0]\n )\n assert metrics[\"Reader\"][\"exact_match\"] == 1.0\n assert metrics[\"Reader\"][\"f1\"] == 1.0\n assert metrics[\"Retriever\"][\"mrr\"] == 1.0\n assert metrics[\"Retriever\"][\"recall_multi_hit\"] == 1.0\n assert metrics[\"Retriever\"][\"recall_single_hit\"] == 1.0\n assert metrics[\"Retriever\"][\"precision\"] == 1.0\n assert metrics[\"Retriever\"][\"map\"] == 1.0\n assert metrics[\"Retriever\"][\"ndcg\"] == 1.0\n\n\n@pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True)\n@pytest.mark.parametrize(\"document_store_with_docs\", [\"memory\"], indirect=True)\n@pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True)", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True)\n@pytest.mark.parametrize(\"document_store_with_docs\", [\"memory\"], indirect=True)\n@pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True)", "n_ast_errors": 1, "ast_levels": 20, "n_whitespaces": 862, "n_words": 190, "vocab_size": 104, "complexity": 1, "nloc": 58, "token_counts": 391, "n_ast_nodes": 707, "n_identifiers": 37, "random_cut": "def test_extractive_qa_labels_with_filters(reader, retriever_with_docs, tmp_path):\n labels = [\n # MultiLabel with filter that selects only the document about Carla\n MultiLabel(\n labels=[\n Label(\n query=\"What's her name?\",\n answer=Answer(answer=\"Carla\", offsets_in_context=[Span(11, 16)]),\n document=Document(\n id=\"a0747b83aea0b60c4b114b15476dd32d\",\n content_type=\"text\",\n content=\"My name is Carla and I live in Berlin\",\n ),\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n filters={\"name\": [\"filename1\"]},\n )\n ]\n ),\n # MultiLabel with filter that selects only the document about Christelle\n MultiLabel(\n labels=[\n Label(\n query=\"What's her name?\",\n answer=Answer(answer=\"Christelle\", offsets_in_context=[Span(11, 20)]),\n document=Document(\n id=\"4fa3938bef1d83e4d927669666d0b705\",\n content_type=\"text\",\n content=\"My name is Christelle and I live in Paris\",\n ),\n is_correct_answer=True,\n is_correct_document=True,\n origin=\"gold-label\",\n filters={\"name\": [\"filename3\"]},\n )\n ]\n ),\n ]\n\n pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)\n eval_result = pipeline.eval(labels=labels, params={\"Retriever\": {\"top_k\": 5}})\n\n metrics = eval_result.calculate_metrics(document_scope=\"document_id\")\n\n reader_result = eval_result[\"Reader\"]\n retriever_result = eval_result[\"Retriever\"]\n\n # The same query but with two different filters and thus two different answers is answered correctly in both cases.\n assert (\n reader_result[reader_result[\"rank\"] == 1][\"answer\"].iloc[0]\n in reader_result[reader_result[\"rank\"] == 1][\"gold_answers\"].iloc[0]\n )\n assert (\n retriever_result[retriever_result[\"rank\"] == 1][\"document_id\"].iloc[0]\n in retriever_result[retriever_result[\"rank\"] == 1][\"gold_document_ids\"].il" }, { "id": 116707, "commit_id": "27a418a188709d95f74e49e3a02bd5290b5efcc6", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/mysql_handler/tests/test_mysql_handler.py", "file_name": "test_mysql_handler.py", "fun_name": "test_2_get_tables", "commit_message": "Fully local docker env for mysql tests (#2934)\n\n* Fully local docker env for mysql tests", "code": "def test_2_get_tables(self):\n tables = self.get_table_names()\n assert \"rentals\" in tables, f\"expected to have 'rentals' table in the db but got: {tables}\"\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 33, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 37, "n_identifiers": 4, "random_cut": "def test_2_get_tables(self):\n tables = self.get_table_names(" }, { "id": 73512, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/tests/test_model.py", "file_name": "test_model.py", "fun_name": "test_get_page_url_when_for_settings_fetched_via_for_site", "commit_message": "Reformat with black", "code": "def test_get_page_url_when_for_settings_fetched_via_for_site(self):\n \n self._create_importantpages_object()\n\n settings = ImportantPages.for_site(self.default_site)\n\n # Force site root paths query beforehand\n self.default_site.root_page._get_site_root_paths()\n\n for page_fk_field, expected_result in (\n (\"sign_up_page\", \"http://localhost/\"),\n (\"general_terms_page\", \"http://localhost/\"),\n (\"privacy_policy_page\", \"http://other/\"),\n ):\n with self.subTest(page_fk_field=page_fk_field):\n\n # only the first request for each URL will trigger queries.\n # 2 are triggered instead of 1 here, because tests use the\n # database cache backed, and the cache is queried each time\n # to fetch site root paths (because there's no 'request' to\n # store them on)\n\n with self.assertNumQueries(2):\n\n self.assertEqual(\n settings.get_page_url(page_fk_field), expected_result\n )\n\n # when called directly\n self.assertEqual(\n settings.get_page_url(page_fk_field), expected_result\n )\n\n # when called indirectly via shortcut\n self.assertEqual(\n getattr(settings.page_url, page_fk_field), expected_result\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 506, "n_words": 102, "vocab_size": 74, "complexity": 2, "nloc": 20, "token_counts": 115, "n_ast_nodes": 201, "n_identifiers": 17, "random_cut": "def test_get_page_url_when_for_settings_fetched_via_for_site(self):\n \n self._create_importantpages_object()\n\n settings = ImportantPages.for_site(self.default_site)\n\n # Force site root paths query beforehand\n self.default_site.root_page._get_site_root_paths()\n\n for page_fk_field, expected_result in (\n (\"sign_up_page\", \"http://localhost/\"),\n (\"general_terms_page\", \"http://localhost/\"),\n (\"privacy_policy_page\", \"http://other/\"),\n ):\n with self.subTest(page_fk_field=page_fk_field):\n\n # only the first request for each URL will trigger queries.\n # 2 are triggered instead of 1 here, because tests use the\n # database cache backed, and the cache is queried each time\n # to fetch site root paths (because there's no 'request' to\n # store them on)\n\n with self.assertNumQueries(2):\n\n self" }, { "id": 52037, "commit_id": "ceda319accdeb81df85431b4ebc439f0398e80ec", "repo": "PaddleHub", "path": "modules/image/text_recognition/ch_pp-ocrv3/module.py", "file_name": "module.py", "fun_name": "add_module_config_arg", "commit_message": "update ch_pp-ocrv3 (#2033)\n\n* update ch_pp-ocrv3\r\n\r\n* update README\r\n\r\n* update defalut valuex\r\n\r\n* add a param", "code": "def add_module_config_arg(self):\n \n self.arg_config_group.add_argument('--use_gpu',\n type=ast.literal_eval,\n default=False,\n help=\"whether use GPU or not\")\n self.arg_config_group.add_argument('--output_dir',\n type=str,\n default='ocr_result',\n help=\"The directory to save output images.\")\n self.arg_config_group.add_argument('--visualization',\n type=ast.literal_eval,\n default=False,\n help=\"whether to save output as images.\")\n self.arg_config_group.add_argument('--det_db_unclip_ratio',\n type=float,\n default=1.5,\n help=\"unclip ratio for post processing in DB detection.\")\n self.arg_config_group.add_argument(\n '--det_db_score_mode',\n type=str,\n default=\"fast\",\n help=\"method to calc the final det score, one of fast(using box) and slow(using poly).\")\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 647, "n_words": 57, "vocab_size": 48, "complexity": 1, "nloc": 22, "token_counts": 112, "n_ast_nodes": 183, "n_identifiers": 11, "random_cut": "def add_module_config_arg(self):\n \n self.arg_config_group.add_argument('--use_gpu',\n type=ast.literal_eval,\n default=False,\n help=\"whether use GPU or not\")\n self.arg_config_group.add_argument('--output_dir',\n type=str,\n default='ocr_result',\n help=\"The directory to save output images.\")\n self.arg_config_group.add_argument('--visualization',\n type=ast.literal_eval,\n default=False,\n help=\"whether to save output as images.\")\n self.arg_config_group.add_argument('--det_db_unclip_ratio',\n " }, { "id": 264811, "commit_id": "304282bd4f20aa80b4826b47777b87972ac11832", "repo": "netbox", "path": "netbox/dcim/tests/test_cablepaths.py", "file_name": "test_cablepaths.py", "fun_name": "test_208_circuittermination", "commit_message": "Update tests", "code": "def test_208_circuittermination(self):\n \n interface1 = Interface.objects.create(device=self.device, name='Interface 1')\n circuittermination1 = CircuitTermination.objects.create(circuit=self.circuit, site=self.site, term_side='A')\n\n # Create cable 1\n cable1 = Cable(terminations=[\n CableTermination(cable_end='A', termination=interface1),\n CableTermination(cable_end='B', termination=circuittermination1),\n ])\n cable1.save()\n\n # Check for incomplete path\n self.assertPathExists(\n (interface1, cable1, circuittermination1),\n is_complete=False\n )\n self.assertEqual(CablePath.objects.count(), 1)\n\n # Delete cable 1\n cable1.delete()\n self.assertEqual(CablePath.objects.count(), 0)\n interface1.refresh_from_db()\n self.assertPathIsNotSet(interface1)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 203, "n_words": 47, "vocab_size": 40, "complexity": 1, "nloc": 17, "token_counts": 144, "n_ast_nodes": 234, "n_identifiers": 28, "random_cut": "def test_208_circuittermination(self):\n \n interface1 = Interface.objects.create(device=self.device, name='Interface 1')\n circuittermination1 = CircuitTermination.objects.create(circuit=self.circuit, site=self.site, term_side='A')\n\n # Create cable 1\n cable1 = Cable(terminations=[\n CableTermination(cable_end='A', termination=interface1),\n CableTermination(cable_end='B', termination=circuittermination1),\n ])\n cable1.save()\n\n # Check for incomplete path\n self.assertPathExists(\n (interface1, cable1, circuittermi" }, { "id": 99057, "commit_id": "eab9ecf246184e5746eabc2f22f84b84d35173f3", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_rules.py", "file_name": "test_project_rules.py", "fun_name": "test_create_sentry_app_action_success", "commit_message": "ref(issue-alerts): Move AlertRuleAction logic away from endpoint (#34027)\n\nThis PR moves a function that was in project_rules.py out of there and into an appropriate separate location. It also adds the SentryAppEventAction class to outline the required methods on creating new members of SENTRY_APP_ACTIONS (this is following a similar pattern for TicketEventAction and TICKET_ACTIONS).\r\n\r\nThis refactor was also done and for Metric Alerts in #33948.\r\n\r\nI also added a bunch of tests for the error surfacing that was added in #33571", "code": "def test_create_sentry_app_action_success(self):\n responses.add(\n method=responses.POST,\n url=\"https://example.com/sentry/alert-rule\",\n status=202,\n )\n actions = [\n {\n \"id\": \"sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction\",\n \"settings\": self.sentry_app_settings_payload,\n \"sentryAppInstallationUuid\": self.sentry_app_installation.uuid,\n \"hasSchemaFormConfig\": True,\n },\n ]\n payload = {\n \"name\": \"my super cool rule\",\n \"owner\": f\"user:{self.user.id}\",\n \"conditions\": [],\n \"filters\": [],\n \"actions\": actions,\n \"filterMatch\": \"any\",\n \"actionMatch\": \"any\",\n \"frequency\": 30,\n }\n\n response = self.get_success_response(\n self.organization.slug, self.project.slug, status_code=200, **payload\n )\n new_rule_id = response.data[\"id\"]\n assert new_rule_id is not None\n rule = Rule.objects.get(id=new_rule_id)\n assert rule.data[\"actions\"] == actions\n assert len(responses.calls) == 1\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 375, "n_words": 71, "vocab_size": 58, "complexity": 1, "nloc": 32, "token_counts": 160, "n_ast_nodes": 278, "n_identifiers": 29, "random_cut": "def test_create_sentry_app_action_success(self):\n responses.add(\n method=responses.POST,\n url=\"https://example.com/sentry/alert-rule\",\n status=202,\n )\n actions = [\n {\n \"id\": \"sentry.rules.actions.notify_event_sentry_app.NotifyEventSentryAppAction\",\n \"settings\": self.sentry_app_settings" }, { "id": 212384, "commit_id": "700b5be79008ec576dc9994ee48850da43277d3e", "repo": "bokeh", "path": "bokeh/models/callbacks.py", "file_name": "callbacks.py", "fun_name": "_check_if_provided_a_valid_value", "commit_message": "Generalize tooltips and add description to widgets (#12154)\n\n* Generalize tooltips and add description to widgets\r\n\r\n* Fail in old compiler if TS had errors\r\n\r\n* Update font-awesome icon's implementation\r\n\r\n* Add models representing element queries\r\n\r\n* Introduce a ViewManager for a Document\r\n\r\n* Make LayoutDOM a UIElement\r\n\r\n* Expose Tooltip's target property\r\n\r\n* Make embed_items (etc.) return ViewManager\r\n\r\n* Ignore Undefined/Intrinsic initializer values\r\n\r\n* Update visual baselines\r\n\r\n* Use explicit __all__ exports in models.ui\r\n\r\n* Use the correct DOM element in mousedown\r\n\r\n* Allow attach tooltips to UIElements\r\n\r\n* Redesign and generalize menus\r\n\r\n* Add HelpButton and BuiltinIcon models\r\n\r\n* Fix eager eval in Instance's sphinx_link\r\n\r\n* Add LayoutDOM.context_menu property\r\n\r\n* Let TooltipView handle initial rendering\r\n\r\n* Add position to a Tooltip to display it\r\n\r\n* Move icons to the ui module\r\n\r\n* Make menu items UI elements\r\n\r\n* Allow to reference property values in DOM\r\n\r\n* Redesign stylesheet management, avoid style\r\n\r\n* Add experimental support for third-party icons\r\n\r\n* Add a stub Dialog UI component implementation\r\n\r\n* Add SetValue(obj, attr, value) callback model\r\n\r\n* Allow Callback models in js_{event,property}_callbacks\r\n\r\n* Allow to display Inspector in a dialog box\r\n\r\n* Rename Separator->Divider for consistent naming\r\n\r\n* Preliminary support for icon sizing\r\n\r\n* Split up models/dom/index.ts into sub-modules\r\n\r\n* Update font-awesome example\r\n\r\n* Add more unit tests for core/util/iterator\r\n\r\n* Add SVGIcon and update docstrings\r\n\r\n* Add/Improve docstrings in models/dom.py\r\n\r\n* More docstrings\r\n\r\n* Apply review suggestions\r\n\r\nCo-authored-by: Bryan Van de Ven \r\n\r\nCo-authored-by: Bryan Van de Ven ", "code": "def _check_if_provided_a_valid_value(self):\n descriptor = self.obj.lookup(self.attr)\n\n if descriptor.property.is_valid(self.value):\n return None\n else:\n return f\"{self.value!r} is not a valid value for {self.obj}.{self.attr}\"\n\n# TODO: class Show(Callback): target = Required(Either(Instance(DOMNode), Instance(UIElement)))\n# TODO: class Hide(Callback): ...\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 46, "vocab_size": 32, "complexity": 2, "nloc": 6, "token_counts": 36, "n_ast_nodes": 92, "n_identifiers": 9, "random_cut": "def _check_if_provided_a_valid_value(self):\n descriptor = self.obj.lookup(self.attr)\n\n if descriptor.property.is_valid(self.value):\n return None\n else:\n return f\"{self.value!r} is not a valid value for {self.obj}.{self.attr}\"\n\n# TODO: class Show(Callback): target = Required(Either(Instance(DOMNode), Instance(UIElement)))\n# TODO: class Hide(Callback): ...\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#---" }, { "id": 290849, "commit_id": "8570d3aabeb0bde1345659087849acd9de946ce5", "repo": "core", "path": "homeassistant/components/lock/__init__.py", "file_name": "__init__.py", "fun_name": "supported_features", "commit_message": "Adjust type hints for LockEntityFeature (#82256)", "code": "def supported_features(self) -> LockEntityFeature | int:\n \n return self._attr_supported_features\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 5, "random_cut": "def supported_features(self) -> LockEntityFeature | int:\n \n return self._attr_supported_features\n" }, { "id": 292958, "commit_id": "1556868d562b0426effd0d556b9665d2865a8018", "repo": "core", "path": "tests/components/samsungtv/conftest.py", "file_name": "conftest.py", "fun_name": "rest_api_fixture", "commit_message": "Use async rest api in SamsungTV (#67369)\n\nCo-authored-by: epenet ", "code": "def rest_api_fixture() -> Mock:\n \n with patch(\n \"homeassistant.components.samsungtv.bridge.SamsungTVAsyncRest\",\n autospec=True,\n ) as rest_api_class:\n rest_api_class.return_value.rest_device_info.return_value = {\n \"id\": \"uuid:be9554b9-c9fb-41f4-8920-22da015376a4\",\n \"device\": {\n \"modelName\": \"82GXARRS\",\n \"wifiMac\": \"aa:bb:cc:dd:ee:ff\",\n \"name\": \"[TV] Living Room\",\n \"type\": \"Samsung SmartTV\",\n \"networkType\": \"wireless\",\n },\n }\n yield rest_api_class.return_value\n\n\n@pytest.fixture(name=\"remotews\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"remotews\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 187, "n_words": 36, "vocab_size": 35, "complexity": 1, "nloc": 17, "token_counts": 63, "n_ast_nodes": 142, "n_identifiers": 10, "random_cut": "def rest_api_fixture() -> Mock:\n \n with patch(\n \"homeassistant.components.samsungtv.bridge.SamsungTVAsyncRest\",\n " }, { "id": 175829, "commit_id": "54610bb448a9cf5be77d53b66169fca4c11be6cb", "repo": "cpython", "path": "Lib/test/test_posix.py", "file_name": "test_posix.py", "fun_name": "prepare", "commit_message": "bpo-46426: Improve tests for the dir_fd argument (GH-30668)\n\nEnsure that directory file descriptors refer to directories different\r\nfrom the current directory, and that src_dir_fd and dst_dir_fd refer\r\nto different directories.\r\n\r\nAdd context manager open_dir_fd() in test.support.os_helper.", "code": "def prepare(self):\n TestPosixDirFd.count += 1\n name = f'{os_helper.TESTFN}_{self.count}'\n base_dir = f'{os_helper.TESTFN}_{self.count}base'\n posix.mkdir(base_dir)\n self.addCleanup(posix.rmdir, base_dir)\n fullname = os.path.join(base_dir, name)\n assert not os.path.exists(fullname)\n with os_helper.open_dir_fd(base_dir) as dir_fd:\n yield (dir_fd, name, fullname)\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 95, "n_words": 29, "vocab_size": 27, "complexity": 1, "nloc": 10, "token_counts": 74, "n_ast_nodes": 148, "n_identifiers": 19, "random_cut": "def prepare(self):\n TestPosixDirFd.count" }, { "id": 253721, "commit_id": "098975b3dce69956e5ebc5f95a04589d2bfc8c22", "repo": "d2l-en", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "transpose_qkv", "commit_message": "Refactoring Attention scoring functions and Bahdanau Attention (#2095)\n\n* Attention scoring funcs\r\n\r\n* attn\r\n\r\n* sequence mask\r\n\r\n* remove accents", "code": "def transpose_qkv(X, num_heads):\n \n # Shape of input X: (batch_size, no. of queries or key-value pairs,\n # num_hiddens). Shape of output X: (batch_size, no. of queries or\n # key-value pairs, num_heads, num_hiddens / num_heads)\n X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)\n # Shape of output X: (batch_size, num_heads, no. of queries or key-value\n # pairs, num_hiddens / num_heads)\n X = X.transpose(0, 2, 1, 3)\n # Shape of output: (batch_size * num_heads, no. of queries or key-value\n # pairs, num_hiddens / num_heads)\n return X.reshape(-1, X.shape[2], X.shape[3])\n", "url": "https://github.com/d2l-ai/d2l-en.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 116, "n_words": 83, "vocab_size": 36, "complexity": 1, "nloc": 4, "token_counts": 69, "n_ast_nodes": 107, "n_identifiers": 6, "random_cut": "def transpose_qkv(X, num_heads):\n \n # Shape of input X: (batch_size, no. of queries or key-value pairs," }, { "id": 110601, "commit_id": "3fee7584e2f03049f4dec0ccdf4055cb0e38e05b", "repo": "matplotlib", "path": "lib/mpl_toolkits/axisartist/tests/test_axislines.py", "file_name": "test_axislines.py", "fun_name": "test_axisline_style_size_color", "commit_message": "Add facecolor to axisline style", "code": "def test_axisline_style_size_color():\n fig = plt.figure(figsize=(2, 2))\n ax = fig.add_subplot(axes_class=AxesZero)\n ax.axis[\"xzero\"].set_axisline_style(\"-|>\", size=2.0, facecolor='r')\n ax.axis[\"xzero\"].set_visible(True)\n ax.axis[\"yzero\"].set_axisline_style(\"->, size=1.5\")\n ax.axis[\"yzero\"].set_visible(True)\n\n for direction in (\"left\", \"right\", \"bottom\", \"top\"):\n ax.axis[direction].set_visible(False)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 51, "n_words": 24, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 106, "n_ast_nodes": 178, "n_identifiers": 15, "random_cut": "def test_axisline_style_size_color():\n f" }, { "id": 303343, "commit_id": "842cc060f80a632032dacbe1e2eaa8ca6421eda0", "repo": "core", "path": "tests/components/zwave_js/conftest.py", "file_name": "conftest.py", "fun_name": "mock_addon_running", "commit_message": "Fix zwave_js addon info (#76044)\n\n* Add add-on store info command\r\n\r\n* Use add-on store info command in zwave_js\r\n\r\n* Fix init tests\r\n\r\n* Update tests\r\n\r\n* Fix method for addon store info\r\n\r\n* Fix response parsing\r\n\r\n* Fix store addon installed response parsing\r\n\r\n* Remove addon info log that can contain network keys\r\n\r\n* Add supervisor store addon info test\r\n\r\n* Default to version None if add-on not installed\r\n\r\nCo-authored-by: Mike Degatano \r\n\r\nCo-authored-by: Mike Degatano ", "code": "def mock_addon_running(addon_store_info, addon_info):\n \n addon_store_info.return_value = {\n \"installed\": \"1.0.0\",\n \"state\": \"started\",\n \"version\": \"1.0.0\",\n }\n addon_info.return_value[\"state\"] = \"started\"\n addon_info.return_value[\"version\"] = \"1.0.0\"\n return addon_info\n\n\n@pytest.fixture(name=\"addon_installed\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"addon_installed\")", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 60, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 44, "n_ast_nodes": 104, "n_identifiers": 7, "random_cut": "def mock_addon_running(addon_store_info, addon_info):\n \n addon_store_info.return_value = {\n \"installed\": \"1.0.0\",\n \"state\": \"started\",\n \"version\": \"1.0.0\",\n }\n addon_info.return_value[\"state\"] = \"started\"\n addon_info.return_value[\"version\"] = \"1.0.0\"\n retur" }, { "id": 133618, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "release/tune_tests/cloud_tests/workloads/_tune_script.py", "file_name": "_tune_script.py", "fun_name": "fn_trainable", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def fn_trainable(config, checkpoint_dir=None):\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"checkpoint.json\"), \"rt\") as fp:\n state = json.load(fp)\n else:\n state = {\"internal_iter\": 0}\n\n for i in range(state[\"internal_iter\"], config[\"max_iterations\"]):\n state[\"internal_iter\"] = i\n time.sleep(config[\"sleep_time\"])\n\n if i % config[\"checkpoint_freq\"] == 0:\n with tune.checkpoint_dir(step=i) as cd:\n with open(os.path.join(cd, \"checkpoint.json\"), \"wt\") as fp:\n json.dump(state, fp)\n\n tune.report(\n score=i * 10 * config[\"score_multiplied\"],\n internal_iter=state[\"internal_iter\"],\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 189, "n_words": 54, "vocab_size": 41, "complexity": 4, "nloc": 17, "token_counts": 151, "n_ast_nodes": 260, "n_identifiers": 22, "random_cut": "def fn_trainable(config, checkpoint_dir=None):\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"checkpoint.json\"), \"rt\") as fp:\n state = json.load(fp)\n else:\n state = {\"internal_iter\": 0}\n\n for i in range(state[\"internal_iter\"], config[\"max_iterations\"]):\n state[\"internal_iter\"] = i\n time.sleep(config[\"sleep_time\"])\n\n if i % config[\"checkpoint_freq\"] == 0:\n with tune.checkpoint_dir(step=i) as cd:\n with open(os.path.join(cd, \"checkpoint.json\"), \"wt\") as fp:\n " }, { "id": 75569, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/backends/elasticsearch5.py", "file_name": "elasticsearch5.py", "fun_name": "get_content_type", "commit_message": "Reformat with black", "code": "def get_content_type(self):\n \n return self.model._meta.app_label + \".\" + self.model.__name__\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 39, "n_identifiers": 6, "random_cut": "def get_content_type(self):\n \n return self.model." }, { "id": 171848, "commit_id": "7c208c8907f5ab18f807366c0c5e26ae1dbca299", "repo": "pandas", "path": "pandas/tests/arrays/test_datetimes.py", "file_name": "test_datetimes.py", "fun_name": "test_astype_to_sparse_dt64", "commit_message": "BUG: DatetimeArray.astype(Sparse) (#50082)\n\n* BUG: DatetimeArray.astype(Sparse)\r\n\r\n* GH ref", "code": "def test_astype_to_sparse_dt64(self):\n # GH#50082\n dti = pd.date_range(\"2016-01-01\", periods=4)\n dta = dti._data\n result = dta.astype(\"Sparse[datetime64[ns]]\")\n\n assert result.dtype == \"Sparse[datetime64[ns]]\"\n assert (result == dta).all()\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 63, "n_words": 22, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 46, "n_ast_nodes": 80, "n_identifiers": 12, "random_cut": "def test_astype_to_sparse_dt64(self):\n # GH#50082\n dti = pd.date_range(\"2016-01-01\", periods=4)\n dta = dti._data\n result = " }, { "id": 175059, "commit_id": "8cda1c34eb865c1be5b72e8295f7ca7adfdeb113", "repo": "pip", "path": "src/pip/_vendor/colorama/tests/ansitowin32_test.py", "file_name": "ansitowin32_test.py", "fun_name": "testCallWin32UsesLookup", "commit_message": "Upgrade colorama to 0.4.6", "code": "def testCallWin32UsesLookup(self):\n listener = Mock()\n stream = AnsiToWin32(listener)\n stream.win32_calls = {\n 1: (lambda *_, **__: listener(11),),\n 2: (lambda *_, **__: listener(22),),\n 3: (lambda *_, **__: listener(33),),\n }\n stream.call_win32('m', (3, 1, 99, 2))\n self.assertEqual(\n [a[0][0] for a in listener.call_args_list],\n [33, 11, 22] )\n", "url": "https://github.com/pypa/pip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 138, "n_words": 42, "vocab_size": 34, "complexity": 2, "nloc": 12, "token_counts": 117, "n_ast_nodes": 174, "n_identifiers": 13, "random_cut": "def testCallWin32UsesLookup(self):\n listener = Mock()\n stream = AnsiToWin32(listener)\n stream.win32_calls = {\n 1: (lambda *_, **__: listener(11),),\n 2: (lambda *_, **__: listener(22),),\n 3: (lambda *_, **__: listener(33),),\n }\n stream.call_win32('m', (3, 1, 99, 2))\n self.assertEqual(\n [a[0][0] for a in listener.call_args_list],\n [33, 11" }, { "id": 73776, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/__init__.py", "file_name": "__init__.py", "fun_name": "active", "commit_message": "Reformat with black", "code": "def active(self):\n \n return self.filter(\n Q(status=WorkflowState.STATUS_IN_PROGRESS)\n | Q(status=WorkflowState.STATUS_NEEDS_CHANGES)\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 51, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "def active(self):\n \n return self.filter(\n Q(status=" }, { "id": 317883, "commit_id": "e87c2b9e2590eadcaa14f7256388675fcc64918d", "repo": "core", "path": "homeassistant/components/traccar/device_tracker.py", "file_name": "device_tracker.py", "fun_name": "import_events", "commit_message": "Bump pytraccar to 1.0.0 (#75671)", "code": "async def import_events(self):\n \n start_intervel = datetime.utcnow()\n events = await self._api.get_reports_events(\n devices=[device.id for device in self._devices],\n start_time=start_intervel,\n end_time=start_intervel - self._scan_interval,\n event_types=self._event_types.keys(),\n )\n if events is not None:\n for event in events:\n self._hass.bus.async_fire(\n f\"traccar_{self._event_types.get(event.type)}\",\n {\n \"device_traccar_id\": event.device_id,\n \"device_name\": next(\n (\n dev.name\n for dev in self._devices\n if dev.id == event.device_id\n ),\n None,\n ),\n \"type\": event.type,\n \"serverTime\": event.event_time,\n \"attributes\": event.attributes,\n },\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 547, "n_words": 58, "vocab_size": 49, "complexity": 6, "nloc": 27, "token_counts": 137, "n_ast_nodes": 231, "n_identifiers": 30, "random_cut": "async def import_events(self):\n \n start_intervel = datetime.utcnow()\n events = await self._api.get_reports_events(\n devices=[device.id for device in self._devices],\n start_time=start_intervel,\n end_time=start_intervel - self._scan_interval,\n event_types=self._event_types.keys(),\n )\n if events is not None:\n for event in events:\n self._hass.bus.async_fire(\n f\"traccar_{self._event_types.get(event.type)}\",\n {\n \"device_traccar_id\": event.device_id,\n \"device_name\": next(\n (\n dev.name\n for dev in self._devices\n " }, { "id": 183261, "commit_id": "2d25807f495d2fbd64eb0909b1320ad0cc8e7b7d", "repo": "textual", "path": "sandbox/vertical_container.py", "file_name": "vertical_container.py", "fun_name": "action_remove_placeholder", "commit_message": "[layouts] Fix vertical layout bug with centered content", "code": "async def action_remove_placeholder(self):\n placeholders = self.query(\"Placeholder\")\n placeholders_count = len(placeholders)\n for i, placeholder in enumerate(placeholders):\n if i == placeholders_count - 1:\n await self.remove(placeholder)\n placeholder.parent.children._nodes.remove(placeholder)\n self.refresh(repaint=True, layout=True)\n self.refresh_css()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 101, "n_words": 26, "vocab_size": 24, "complexity": 3, "nloc": 9, "token_counts": 72, "n_ast_nodes": 119, "n_identifiers": 17, "random_cut": "async def action_remove_placeholder(self):\n placeholders = self.query(" }, { "id": 241816, "commit_id": "a0d44d5be19a84b4cb1984eb795dd0266f665c76", "repo": "scipy", "path": "doc/source/tutorial/examples/morphology_binary_dilation_erosion.py", "file_name": "morphology_binary_dilation_erosion.py", "fun_name": "ball", "commit_message": "DOC: added example for morphology: generate_binary_structure, binary_dilation and erosion (#15244)\n\nCo-authored-by: Pamphile Roy ", "code": "def ball(radius, dtype=np.uint8):\n n = 2 * radius + 1\n Z, Y, X = np.mgrid[\n -radius: radius: n * 1j,\n -radius: radius: n * 1j,\n -radius: radius: n * 1j\n ]\n s = X ** 2 + Y ** 2 + Z ** 2\n return np.array(s <= radius * radius, dtype=dtype)\n\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 86, "n_words": 51, "vocab_size": 28, "complexity": 1, "nloc": 9, "token_counts": 83, "n_ast_nodes": 121, "n_identifiers": 12, "random_cut": "def ball(radius, dtype=np.uint8):\n n = 2 * radius + 1\n Z, Y, X = np.mgrid[\n -radius: radius: n * 1j,\n -radius: radius: n * 1j,\n -radius: radius: n * 1j\n ]\n s = X ** 2 + Y ** 2 + Z ** 2\n return np.array(s" }, { "id": 191405, "commit_id": "ce7b14b84381c766ae42a0f71953b2a56c024dbb", "repo": "langchain", "path": "tests/unit_tests/chains/test_react.py", "file_name": "test_react.py", "fun_name": "test_predict_until_observation_error", "commit_message": "Harrison/add react chain (#24)\n\nfrom https://arxiv.org/abs/2210.03629\r\n\r\nstill need to think if docstore abstraction makes sense", "code": "def test_predict_until_observation_error() -> None:\n \n outputs = [\"foo\\nAction 1: foo\"]\n fake_llm = FakeListLLM(outputs)\n fake_llm_chain = LLMChain(llm=fake_llm, prompt=_FAKE_PROMPT)\n with pytest.raises(ValueError):\n predict_until_observation(fake_llm_chain, \"\", 1)\n\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 46, "n_ast_nodes": 82, "n_identifiers": 13, "random_cut": "def test_predict_until_observation_error() -> None:\n \n outputs = [\"foo\\nAction" }, { "id": 30917, "commit_id": "babeff5524bf3d5d62cfa70e1297158a755b0810", "repo": "transformers", "path": "tests/onnx/test_onnx_v2.py", "file_name": "test_onnx_v2.py", "fun_name": "_onnx_export", "commit_message": "Add support for Perceiver ONNX export (#17213)\n\n* Start adding perceiver support for ONNX\r\n\r\n* Fix pad token bug for fast tokenizers\r\n\r\n* Fix formatting\r\n\r\n* Make get_preprocesor more opinionated (processor priority, otherwise tokenizer/feature extractor)\r\n\r\n* Clean docs format\r\n\r\n* Minor cleanup following @sgugger's comments\r\n\r\n* Fix typo in docs\r\n\r\n* Fix another docs typo\r\n\r\n* Fix one more typo in docs\r\n\r\n* Update src/transformers/onnx/utils.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/onnx/utils.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/onnx/utils.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_constructor, device=\"cpu\"):\n from transformers.onnx import export\n\n model_class = FeaturesManager.get_model_class_for_feature(feature)\n config = AutoConfig.from_pretrained(model_name)\n model = model_class.from_config(config)\n onnx_config = onnx_config_class_constructor(model.config)\n\n if is_torch_available():\n from transformers.utils import torch_version\n\n if torch_version < onnx_config.torch_onnx_minimum_version:\n pytest.skip(\n \"Skipping due to incompatible PyTorch version. Minimum required is\"\n f\" {onnx_config.torch_onnx_minimum_version}, got: {torch_version}\"\n )\n\n preprocessor = get_preprocessor(model_name)\n\n # Useful for causal lm models that do not use pad tokens.\n if isinstance(preprocessor, PreTrainedTokenizerBase) and not getattr(config, \"pad_token_id\", None):\n config.pad_token_id = preprocessor.eos_token_id\n\n with NamedTemporaryFile(\"w\") as output:\n try:\n onnx_inputs, onnx_outputs = export(\n preprocessor, model, onnx_config, onnx_config.default_onnx_opset, Path(output.name), device=device\n )\n validate_model_outputs(\n onnx_config,\n preprocessor,\n model,\n Path(output.name),\n onnx_outputs,\n onnx_config.atol_for_validation,\n )\n except (RuntimeError, ValueError) as e:\n self.fail(f\"{name}, {feature} -> {e}\")\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 508, "n_words": 108, "vocab_size": 89, "complexity": 6, "nloc": 31, "token_counts": 189, "n_ast_nodes": 319, "n_identifiers": 45, "random_cut": "def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_constructor, device=\"cpu\"):\n from transformers.onnx import export\n\n model_class = FeaturesManager.get_model_class_for_feature(feature)\n config = AutoConfig.from_pretrained(model_name)\n model = model_class.from_config(config)\n onnx_config = onnx_config_class_constructor(model.config)\n\n if is_torch_available():\n from transformers.utils import torch_version\n\n if torch_version < onnx_config.torch_onnx_minimum_version:\n pytest.skip(\n \"Skipping due to incompatible PyTorch version. Minimum required is\"\n f\" {onnx_config.torch_onnx_minimum_version}, got: {torch_version}\"\n )\n\n preprocessor = get_preprocessor(model_name)\n\n # Useful for causal lm models that do not use pad tokens.\n if isinstance(preprocessor, PreTrainedTokenizerBase) and not getattr(config, \"pad_token_id\", None):\n config.pad_token_id = preprocessor.eos_token_id\n\n with NamedTemporaryFile(\"w\") as output:\n try:\n onnx_inputs, onnx_outputs = export(\n preprocessor, model, onnx_config, onnx_config.default_onnx_opset, Path(output.name), device=device\n )\n validate_model_outputs(\n onnx_config,\n preprocessor,\n model,\n Path(output.name),\n onnx_outputs,\n onnx_config.atol_for_validation,\n )\n except (RuntimeError, Valu" }, { "id": 61094, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py", "file_name": "candidates.py", "fun_name": "__eq__", "commit_message": "upd; format", "code": "def __eq__(self, other):\n # type: (Any) -> bool\n if isinstance(other, self.__class__):\n return links_equivalent(self._link, other._link)\n return False\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 4, "token_counts": 30, "n_ast_nodes": 46, "n_identifiers": 7, "random_cut": "def __eq__(self, other):\n # type: (Any) -> bool\n if isinstance(other, self.__class__):\n return links_equivalent(self._link, other._link)\n ret" }, { "id": 78648, "commit_id": "22e904fb01e0206f95019f0c8ea0db80c7d4efc8", "repo": "wagtail", "path": "wagtail/admin/tests/tests.py", "file_name": "tests.py", "fun_name": "test_summary_items", "commit_message": "Refine dashboard design - summary panels\n\n* Update colours and icons for summary panels\n* add h1 id for aria referencing\n* rework layout to use flex box & not floats\n* move summary styles to own component scss file\n* now functions correctly in RTL mode", "code": "def test_summary_items(self):\n response = self.client.get(reverse(\"wagtailadmin_home\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"
  • 0 broken links
  • \")\n\n # check that media attached to summary items is correctly pulled in\n if DJANGO_VERSION >= (4, 1):\n self.assertContains(\n response,\n '',\n html=True,\n )\n else:\n self.assertContains(\n response,\n '',\n html=True,\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 221, "n_words": 46, "vocab_size": 38, "complexity": 2, "nloc": 16, "token_counts": 73, "n_ast_nodes": 119, "n_identifiers": 11, "random_cut": "def test_summary_items(self):\n response = self.client.get(reverse(\"wagtailadmin_home\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"
  • 0 broken links
  • \")\n\n #" }, { "id": 174321, "commit_id": "846d8e59654078ecc020933ca82e6f1ff2bb44ca", "repo": "pip", "path": "src/pip/_internal/metadata/importlib.py", "file_name": "importlib.py", "fun_name": "canonical_name", "commit_message": "Try to cover Path interface differences", "code": "def canonical_name(self) -> NormalizedName:\n # Try to get the name from the metadata directory name. This is much\n # faster than reading metadata.\n if self._info_location is None:\n return self._get_dist_normalized_name()\n stem, suffix = os.path.splitext(self._info_location.name)\n if suffix not in (\".dist-info\", \".egg-info\"):\n return self._get_dist_normalized_name()\n name, _, _ = stem.partition(\"-\")\n return canonicalize_name(name)\n", "url": "https://github.com/pypa/pip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 48, "vocab_size": 39, "complexity": 3, "nloc": 8, "token_counts": 69, "n_ast_nodes": 116, "n_identifiers": 14, "random_cut": "def canonical_name(self) -> NormalizedName:\n # Try to get the name from the metadata directory name. This is much\n # faster than reading metadata.\n if self._info_location is None:\n return self._get_dist_normalized_name()\n stem, suffix = os.path.splitext(self._info_location.name)\n if suffix not in (\".dist-info\", \".egg-info\"):\n return self._get_dist_normalized_name()\n name, _, _ = stem.partition(\"-\")\n" }, { "id": 172809, "commit_id": "4545f4a20d9ff90b99bbd4e3e34b6de4441d6367", "repo": "calibre-web", "path": "cps/web.py", "file_name": "web.py", "fun_name": "render_adv_search_results", "commit_message": "Better epub cover parsing with multiple cover-image items\nCode cosmetics\nrenamed variables\nrefactored xml page generation\nrefactored prepare author", "code": "def render_adv_search_results(term, offset=None, order=None, limit=None):\n sort_param = order[0] if order else [db.Books.sort]\n pagination = None\n\n cc = get_cc_columns(filter_config_custom_read=True)\n calibre_db.session.connection().connection.connection.create_function(\"lower\", 1, db.lcase)\n if not config.config_read_column:\n query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, ub.ReadBook).select_from(db.Books)\n .outerjoin(ub.ReadBook, and_(db.Books.id == ub.ReadBook.book_id,\n int(current_user.id) == ub.ReadBook.user_id)))\n else:\n try:\n read_column = cc[config.config_read_column]\n query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, read_column.value)\n .select_from(db.Books)\n .outerjoin(read_column, read_column.book == db.Books.id))\n except (KeyError, AttributeError):\n log.error(\"Custom Column No.%d is not existing in calibre database\", config.config_read_column)\n # Skip linking read column\n query = calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, None)\n query = query.outerjoin(ub.ArchivedBook, and_(db.Books.id == ub.ArchivedBook.book_id,\n int(current_user.id) == ub.ArchivedBook.user_id))\n\n q = query.outerjoin(db.books_series_link, db.Books.id == db.books_series_link.c.book) \\\n .outerjoin(db.Series) \\\n .filter(calibre_db.common_filters(True))\n\n # parse multiselects to a complete dict\n tags = dict()\n elements = ['tag', 'serie', 'shelf', 'language', 'extension']\n for element in elements:\n tags['include_' + element] = term.get('include_' + element)\n tags['exclude_' + element] = term.get('exclude_' + element)\n\n author_name = term.get(\"author_name\")\n book_title = term.get(\"book_title\")\n publisher = term.get(\"publisher\")\n pub_start = term.get(\"publishstart\")\n pub_end = term.get(\"publishend\")\n rating_low = term.get(\"ratinghigh\")\n rating_high = term.get(\"ratinglow\")\n description = term.get(\"comment\")\n read_status = term.get(\"read_status\")\n if author_name:\n author_name = author_name.strip().lower().replace(',', '|')\n if book_title:\n book_title = book_title.strip().lower()\n if publisher:\n publisher = publisher.strip().lower()\n\n search_term = []\n cc_present = False\n for c in cc:\n if c.datatype == \"datetime\":\n column_start = term.get('custom_column_' + str(c.id) + '_start')\n column_end = term.get('custom_column_' + str(c.id) + '_end')\n if column_start:\n search_term.extend([u\"{} >= {}\".format(c.name,\n format_date(datetime.strptime(column_start, \"%Y-%m-%d\").date(),\n format='medium',\n locale=get_locale())\n )])\n cc_present = True\n if column_end:\n search_term.extend([u\"{} <= {}\".format(c.name,\n format_date(datetime.strptime(column_end, \"%Y-%m-%d\").date(),\n format='medium',\n locale=get_locale())\n )])\n cc_present = True\n elif term.get('custom_column_' + str(c.id)):\n search_term.extend([(u\"{}: {}\".format(c.name, term.get('custom_column_' + str(c.id))))])\n cc_present = True\n\n if any(tags.values()) or author_name or book_title or \\\n publisher or pub_start or pub_end or rating_low or rating_high \\\n or description or cc_present or read_status:\n search_term, pub_start, pub_end = extend_search_term(search_term,\n author_name,\n book_title,\n publisher,\n pub_start,\n pub_end,\n tags,\n rating_high,\n rating_low,\n read_status)\n # q = q.filter()\n if author_name:\n q = q.filter(db.Books.authors.any(func.lower(db.Authors.name).ilike(\"%\" + author_name + \"%\")))\n if book_title:\n q = q.filter(func.lower(db.Books.title).ilike(\"%\" + book_title + \"%\"))\n if pub_start:\n q = q.filter(func.datetime(db.Books.pubdate) > func.datetime(pub_start))\n if pub_end:\n q = q.filter(func.datetime(db.Books.pubdate) < func.datetime(pub_end))\n q = adv_search_read_status(q, read_status)\n if publisher:\n q = q.filter(db.Books.publishers.any(func.lower(db.Publishers.name).ilike(\"%\" + publisher + \"%\")))\n q = adv_search_text(q, tags['include_tag'], tags['exclude_tag'], db.Tags.id)\n q = adv_search_text(q, tags['include_serie'], tags['exclude_serie'], db.Series.id)\n q = adv_search_text(q, tags['include_extension'], tags['exclude_extension'], db.Data.format)\n q = adv_search_shelf(q, tags['include_shelf'], tags['exclude_shelf'])\n q = adv_search_language(q, tags['include_language'], tags['exclude_language'], )\n q = adv_search_ratings(q, rating_high, rating_low, )\n\n if description:\n q = q.filter(db.Books.comments.any(func.lower(db.Comments.text).ilike(\"%\" + description + \"%\")))\n\n # search custom culumns\n try:\n q = adv_search_custom_columns(cc, term, q)\n except AttributeError as ex:\n log.error_or_exception(ex)\n flash(_(\"Error on search for custom columns, please restart Calibre-Web\"), category=\"error\")\n\n q = q.order_by(*sort_param).all()\n flask_session['query'] = json.dumps(term)\n ub.store_combo_ids(q)\n result_count = len(q)\n if offset is not None and limit is not None:\n offset = int(offset)\n limit_all = offset + int(limit)\n pagination = Pagination((offset / (int(limit)) + 1), limit, result_count)\n else:\n offset = 0\n limit_all = result_count\n entries = calibre_db.order_authors(q[offset:limit_all], list_return=True, combined=True)\n return render_title_template('search.html',\n adv_searchterm=search_term,\n pagination=pagination,\n entries=entries,\n result_count=result_count,\n title=_(u\"Advanced Search\"), page=\"advsearch\",\n order=order[1])\n\n\n@web.route(\"/advsearch\", methods=['GET'])\n@login_required_if_no_ano", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "@web.route(\"/advsearch\", methods=['GET'])\n@login_required_if_no_ano", "n_ast_errors": 1, "ast_levels": 23, "n_whitespaces": 2465, "n_words": 455, "vocab_size": 251, "complexity": 33, "nloc": 122, "token_counts": 1247, "n_ast_nodes": 2034, "n_identifiers": 126, "random_cut": "def render_adv_search_results(term, offset=None, order=None, limit=None):\n sort_param = order[0] if order else [db.Books.sort]\n pagination = None\n\n cc = get_cc_columns(filter_config_custom_read=True)\n calibre_db.session.connection().connection.connection.create_function(\"lower\", 1, db.lcase)\n if not config.config_read_column:\n query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, ub.ReadBook).select_from(db.Books)\n .outerjoin(ub.ReadBook, and_(db.Books.id == ub.ReadBook.book_id,\n int(current_user.id) == ub.ReadBook.user_id)))\n else:\n try:\n read_column = cc[config.config_read_column]\n query = (calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, read_column.value)\n .select_from(db.Books)\n .outerjoin(read_column, read_column.book == db.Books.id))\n except (KeyError, AttributeError):\n log.error(\"Custom Column No.%d is not existing in calibre database\", config.config_read_column)\n # Skip linking read column\n query = calibre_db.session.query(db.Books, ub.ArchivedBook.is_archived, None)\n query = query.outerjoin(ub.ArchivedBook, and_(db.Books.id == ub.ArchivedBook.book_id,\n int(current_user.id) == ub.ArchivedBook.user_id))\n\n q = query.outerjoin(db.books_series_link, db.Books.id == db.books_series_link.c.book) \\\n .outerjoin(db.Series) \\\n .filter(calibre_db.common_filters(True))\n\n # parse multiselects to a complete dict\n tags = dict()\n elements = ['tag', 'serie', 'shelf', 'language', 'extension']\n for element in elements:\n tags['include_' + element] = term.get('include_' + element)\n tags['exclude_' + element] = term.get('exclude_' + element)\n\n author_name = term.get(\"author_name\")\n book_title = term.get(\"book_title\")\n publisher = term.get(\"publisher\")\n pub_start = term.get(\"publishstart\")\n pub_end = term.get(\"publishend\")\n rating_low = term.get(\"ratinghigh\")\n rating_high = term.get(\"ratinglow\")\n description = term.get(\"comment\")\n read_status = term.get(\"read_status\")\n if author_name:\n author_name = author_name.strip().lower().replace(',', '|')\n if book_title:\n book_title = book_title.strip().lower()\n if publisher:\n publisher = publisher.strip().lower()\n\n s" }, { "id": 204673, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/commands/migrate.py", "file_name": "migrate.py", "fun_name": "migration_progress_callback", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def migration_progress_callback(self, action, migration=None, fake=False):\n if self.verbosity >= 1:\n compute_time = self.verbosity > 1\n if action == \"apply_start\":\n if compute_time:\n self.start = time.monotonic()\n self.stdout.write(\" Applying %s...\" % migration, ending=\"\")\n self.stdout.flush()\n elif action == \"apply_success\":\n elapsed = (\n \" (%.3fs)\" % (time.monotonic() - self.start) if compute_time else \"\"\n )\n if fake:\n self.stdout.write(self.style.SUCCESS(\" FAKED\" + elapsed))\n else:\n self.stdout.write(self.style.SUCCESS(\" OK\" + elapsed))\n elif action == \"unapply_start\":\n if compute_time:\n self.start = time.monotonic()\n self.stdout.write(\" Unapplying %s...\" % migration, ending=\"\")\n self.stdout.flush()\n elif action == \"unapply_success\":\n elapsed = (\n \" (%.3fs)\" % (time.monotonic() - self.start) if compute_time else \"\"\n )\n if fake:\n self.stdout.write(self.style.SUCCESS(\" FAKED\" + elapsed))\n else:\n self.stdout.write(self.style.SUCCESS(\" OK\" + elapsed))\n elif action == \"render_start\":\n if compute_time:\n self.start = time.monotonic()\n self.stdout.write(\" Rendering model states...\", ending=\"\")\n self.stdout.flush()\n elif action == \"render_success\":\n elapsed = (\n \" (%.3fs)\" % (time.monotonic() - self.start) if compute_time else \"\"\n )\n self.stdout.write(self.style.SUCCESS(\" DONE\" + elapsed))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 717, "n_words": 141, "vocab_size": 54, "complexity": 16, "nloc": 39, "token_counts": 314, "n_ast_nodes": 542, "n_identifiers": 17, "random_cut": "def migration_progress_callback(self, action, migration=None, fake=False):\n if self.verbosity >= 1:\n compute_time = self.verbosity > 1\n if action == \"apply_start\":\n if compute_time:\n self.start = time.monotonic()\n self.stdout.write(\" Applying %s...\" % migration, ending=\"\")\n self.stdout.flush()\n elif action == \"apply_success\":\n elapsed = (\n \" (%.3fs)\" % (time.monotonic() - self.start) if compute_time else \"\"\n )\n if fake:\n self.stdout.write(self.style.SUCCESS(\" FAKED\" + elapsed))\n else:\n self.stdout.write(self.style.SUCCESS(\" OK\" + elapsed))\n elif action == \"unapply_start\":\n if compute_time:\n self.start = time.monotonic()\n self.stdout.write(\" Unapplying %s...\" % migration, ending=\"\")\n self.stdout.flush()\n elif action == \"unapply_success\":\n elapsed = (\n \" (%.3fs)\" % (time.monotonic() - self.start) if compute_time else \"\"\n )\n if fake:\n self.stdout.write(self.style.SUCCESS(\" FAKED\" + elapsed))\n else:\n self.stdout.write(self.style.SUCCESS(\" OK\" + elapsed))\n elif action == \"render_start\":\n if compute_time:\n self.start = time.monotonic()\n self.stdout.write(\" Rendering model states...\", ending=\"\")\n self.stdout.flush()\n elif action == \"render_success\":\n elapsed = (\n \" (%.3fs)\" % (time.monotonic() - self.start) if compute_time else \"\"\n )\n self.stdout.write(self.style.SUCCESS(\" DONE\" + elapsed))\n" }, { "id": 256737, "commit_id": "f7a01624e0581a05800ed31ddb1bbb32b169b256", "repo": "haystack", "path": "test/test_pipeline.py", "file_name": "test_pipeline.py", "fun_name": "test_validate_pipeline_config_invalid_component_param_key", "commit_message": "Refactor Pipeline peripherals (#2253)\n\n* move peripheral stuff to utils, add more and better tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* move config related peripherals to config module, fix tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* remove unnecessary list comprehensions\r\n\r\n* apply ZanSara's feedback\r\n\r\n* remove classes in pipeline utils\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_validate_pipeline_config_invalid_component_param_key():\n with pytest.raises(ValueError, match=\"is not a valid config variable name\"):\n validate_config({\"components\": [{\"name\": \"test\", \"type\": \"test\", \"params\": {\"\\btest\": \"test\"}}]})\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 28, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 3, "token_counts": 42, "n_ast_nodes": 85, "n_identifiers": 6, "random_cut": "def test_validate_pipeline_config_invalid_component_param_key():\n with pytest.raises(ValueError, match=\"is not a valid config variable name\"):\n validate_config({\"components\"" }, { "id": 66870, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/payroll/doctype/additional_salary/additional_salary.py", "file_name": "additional_salary.py", "fun_name": "get_additional_salaries", "commit_message": "style: format code with black", "code": "def get_additional_salaries(employee, start_date, end_date, component_type):\n\tcomp_type = \"Earning\" if component_type == \"earnings\" else \"Deduction\"\n\n\tadditional_sal = frappe.qb.DocType(\"Additional Salary\")\n\tcomponent_field = additional_sal.salary_component.as_(\"component\")\n\toverwrite_field = additional_sal.overwrite_salary_structure_amount.as_(\"overwrite\")\n\n\tadditional_salary_list = (\n\t\tfrappe.qb.from_(additional_sal)\n\t\t.select(\n\t\t\tadditional_sal.name,\n\t\t\tcomponent_field,\n\t\t\tadditional_sal.type,\n\t\t\tadditional_sal.amount,\n\t\t\tadditional_sal.is_recurring,\n\t\t\toverwrite_field,\n\t\t\tadditional_sal.deduct_full_tax_on_selected_payroll_date,\n\t\t)\n\t\t.where(\n\t\t\t(additional_sal.employee == employee)\n\t\t\t& (additional_sal.docstatus == 1)\n\t\t\t& (additional_sal.type == comp_type)\n\t\t)\n\t\t.where(\n\t\t\tadditional_sal.payroll_date[start_date:end_date]\n\t\t\t| ((additional_sal.from_date <= end_date) & (additional_sal.to_date >= end_date))\n\t\t)\n\t\t.run(as_dict=True)\n\t)\n\n\tadditional_salaries = []\n\tcomponents_to_overwrite = []\n\n\tfor d in additional_salary_list:\n\t\tif d.overwrite:\n\t\t\tif d.component in components_to_overwrite:\n\t\t\t\tfrappe.throw(\n\t\t\t\t\t_(\n\t\t\t\t\t\t\"Multiple Additional Salaries with overwrite property exist for Salary Component {0} between {1} and {2}.\"\n\t\t\t\t\t).format(frappe.bold(d.component), start_date, end_date),\n\t\t\t\t\ttitle=_(\"Error\"),\n\t\t\t\t)\n\n\t\t\tcomponents_to_overwrite.append(d.component)\n\n\t\tadditional_salaries.append(d)\n\n\treturn additional_salaries\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 64, "n_words": 105, "vocab_size": 82, "complexity": 5, "nloc": 41, "token_counts": 228, "n_ast_nodes": 359, "n_identifiers": 41, "random_cut": "def get_additional_salaries(employee, start_date, end_date, component_type):\n\tcomp_type = \"Earning\" if component_type == \"earnings\" else \"Deduction\"\n\n\tadditional_sal = frappe.qb.DocType(\"Additional Salary\")\n\tcomponent_field = additional_sal.salary_component.as_(\"component\")\n\toverwrite_field = additional_sal.overwrite_salary_structure_amount.as_(\"overwrite\")\n\n\tadditional_salary_list = (\n\t\tfrappe.qb.from_(additional_sal)\n\t\t.select(\n\t\t\tadditional_sal.name,\n\t\t\tcomponent_field,\n\t\t\tadditional_sal.type,\n\t\t\tadditional_sal.amount,\n\t\t\tadditional_sal.is_recurring,\n\t\t\toverwrite_field,\n\t\t\tadditional_sal.deduct_full_tax_on_selected_payroll_date,\n\t\t)\n\t\t.where(\n\t\t\t(additional_sal.employee == employee)\n\t\t\t& (additional_sal.docstatus == 1)\n\t\t\t& (additional_sal.type == comp_type)\n\t\t)\n\t\t.where(\n\t\t\tadditional_sal.payroll_date[start_date:end_date]\n\t\t\t|" }, { "id": 207274, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/management/commands/label_command.py", "file_name": "label_command.py", "fun_name": "handle_label", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def handle_label(self, label, **options):\n print(\n \"EXECUTE:LabelCommand label=%s, options=%s\"\n % (label, sorted(options.items()))\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 47, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 7, "random_cut": "def handle_label(self, label, **options):\n " }, { "id": 106429, "commit_id": "b0a60ce2032172aeaaf27fe3866ab72768f10cb2", "repo": "youtube-dl", "path": "youtube_dl/jsinterp.py", "file_name": "jsinterp.py", "fun_name": "_all_operators", "commit_message": "[jsinterp] Improve JS language support (#31175)\n\n* operator ??\r\n* operator ?.\r\n* operator **\r\n* accurate operator functions\r\n* `undefined` handling\r\n* object literals {a: 1, \"b\": expr}\r\n* more tests for weird JS comparisons: see https://github.com/ytdl-org/youtube-dl/issues/31173#issuecomment-1217854397.", "code": "def _all_operators():\n return itertools.chain(\n # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence\n _SC_OPERATORS, _LOG_OPERATORS, _COMP_OPERATORS, _OPERATORS)\n", "url": "https://github.com/ytdl-org/youtube-dl.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 39, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 26, "n_identifiers": 7, "random_cut": "def _all_operators():\n return itertools.chain(\n # Ref: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence\n " }, { "id": 321575, "commit_id": "deb21acdebd77c6dc6d5fe4d8cad75e4ca074138", "repo": "qutebrowser", "path": "tests/end2end/fixtures/quteprocess.py", "file_name": "quteprocess.py", "fun_name": "wait_scroll_pos_changed", "commit_message": "qt6 tests: Fix remaining PyQt5 references", "code": "def wait_scroll_pos_changed(self, x=None, y=None):\n \n __tracebackhide__ = (lambda e:\n e.errisinstance(testprocess.WaitForTimeout))\n if (x is None and y is not None) or (y is None and x is not None):\n raise ValueError(\"Either both x/y or neither must be given!\")\n\n if x is None and y is None:\n point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here\n elif x == '0' and y == '0':\n point = 'Py*.QtCore.QPoint()'\n else:\n point = 'Py*.QtCore.QPoint({}, {})'.format(x, y)\n self.wait_for(category='webview',\n message='Scroll position changed to ' + point)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 221, "n_words": 78, "vocab_size": 54, "complexity": 9, "nloc": 13, "token_counts": 107, "n_ast_nodes": 184, "n_identifiers": 15, "random_cut": "def wait_scroll_pos_changed(self, x=None, y=None):\n \n __tracebackhide__ = (lambda e:\n e.errisinstance(testprocess.WaitForTimeout))\n if (x is None and y is not None) or (y is None and x is not None):\n raise ValueError(\"Either both x/y or neither must be given!\")\n\n if x is None and y is None:\n point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here\n elif x == '0' and y" }, { "id": 187882, "commit_id": "8bd58c047ca38e9bb639a6948e45b4387fc5d147", "repo": "streamlink", "path": "tests/plugins/test_rtve.py", "file_name": "test_rtve.py", "fun_name": "test_translate_has_streams", "commit_message": "plugins.rtve: fix ZTNR.translate", "code": "def test_translate_has_streams():\n # real payload with modified end (IEND chunk of size 0), to reduce test size\n data = \\\n \"iVBORw0KGgoAAAANSUhEUgAAAVQAAAFUCAIAAAD08FPiAAACr3RFWHRXczlVSWdtM2ZPTGY4b2R4\" \\\n \"dWo5aHZnRlRhOndvZEtxN3pLOG5oNGRpbT1vREBTWHhOMGtzUVomNndAWkV5cz1GOUlCSiYxdDcy\" \\\n \"QmdDOFM2NGFVJmh1Nzk2bUpwOFVJOE1DJlpAY2lzdGcmbEUmRE5DZFV4SHpEOFgvLmppZ1l4b3M1\" \\\n \"QU1lOnl3ZS04VlBwQkZvLlFMUWZHTy1vQjNVeHhfVDF1JkRSQTpPP2J4Wm0zbFlxS3IjAEhEX1JF\" \\\n \"QURZJSUwNTYwNzI4Mjg4MzUyNjQyMzUxMTA0Mzg0NzI4NzY4NDEyODAzODU0ODMwMDQ3NzcwNDEx\" \\\n \"MDAyODE1MzM3NDU3ODAxMDg3MjgxNTg1MzMzNDE3MTYxMTE4NzQ1MTU3MjYxOTUwNzI4NzEyNDgw\" \\\n \"MzI4NTM1ODM1ODU3MzQyNzE0NjcyODE2NTgzNDI4NTE0NTg1MzIwMzgxODU3NDY0NzUwODI3OTQ0\" \\\n \"ODg3NjEzMTUzNDMxMTUxNzYzNDU1NzE0MDA1MDUzNDIxODE0ODYyNDIzODM2MTczMzQ0NjAwNTIw\" \\\n \"NTU2NDYyNDgxODYzNDA2MzA4MTE0ODUxMTQ2Mzg2MzYyMjQ4Mjc3MjIyMjUzNjMxMjI1MjEzMTU0\" \\\n \"NjI1NjIyMjM3MTA4NjEwNjI0NTYyNTMxNTA2ODEyMjQ2MzYzNzE0MzY4MDU1MTgxNTQ2NTU3MTMx\" \\\n \"NTI0NzU4MTU2NjAxMjY0MjA1MDU2MzcwMDM3NzcwMjA0MTYxMzE3MjQxMTI2NzYzMzUyNjY3NTQ1\" \\\n \"NTA1MTUxNTc2NTEzMTUwNjcxNDcyMDI2MTQyMjczNTI4NzExNjA4NTU3NjIzMzMxMzU0NDM1Mzgw\" \\\n \"MTI0MTQzMTU1MTMyNzc4ODI1MjcyMjUwMjY4MzYyMDUzMjQzNjA0MTYyMzkhB8fSAAAAAElFTkQAAAAACg==\"\n\n assert list(ZTNR.translate(data)) == [\n (\n \"HD_READY\",\n \"https://rtvehlsvodlote7modo2.rtve.es/mediavodv2/resources/TE_NGVA/mp4/5/3/1656573649835.mp4/video.m3u8\"\n + \"?hls_no_audio_only=true&idasset=6638770\"\n ),\n ]\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 205, "n_words": 56, "vocab_size": 43, "complexity": 1, "nloc": 22, "token_counts": 53, "n_ast_nodes": 90, "n_identifiers": 5, "random_cut": "def test_translate_has_streams():\n # real payload with modified end (IEND chunk of size 0), to reduce test size\n data = \\\n \"iVBORw0KGgoAAAANSUhEUgAAAVQAAAFUCAIAAAD08FPiAAACr3RFWHRXczlVSWdtM2ZPTGY4b2R4\" \\\n \"dWo5aHZnRlRhOndvZEtxN3pLOG5oNGRpbT1vREBTWHhOMGtzUVomNndAWkV5cz1GOUlCSiYxdDcy\" \\\n \"QmdDOFM2NGFVJmh1Nzk2bUpwOFVJOE1DJlpAY2lzdGcmbEUmRE5DZFV4SHpEO" }, { "id": 249590, "commit_id": "e8f30a76caa4394ebb3e77c56df951e3626b3fdd", "repo": "synapse", "path": "tests/storage/test_event_federation.py", "file_name": "test_event_federation.py", "fun_name": "test_get_insertion_event_backward_extremities_in_room", "commit_message": "Fix overflows in /messages backfill calculation (#13936)\n\n* Reproduce bug\r\n* Compute `least_function` first\r\n* Substitute `least_function` with an f-string\r\n* Bugfix: avoid overflow\r\n\r\nCo-authored-by: Eric Eastwood ", "code": "def test_get_insertion_event_backward_extremities_in_room(self):\n \n setup_info = self._setup_room_for_insertion_backfill_tests()\n room_id = setup_info.room_id\n depth_map = setup_info.depth_map\n\n # Try at \"insertion_eventB\"\n backfill_points = self.get_success(\n self.store.get_insertion_event_backward_extremities_in_room(\n room_id, depth_map[\"insertion_eventB\"], limit=100\n )\n )\n backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]\n self.assertEqual(backfill_event_ids, [\"insertion_eventB\", \"insertion_eventA\"])\n\n # Try at \"insertion_eventA\"\n backfill_points = self.get_success(\n self.store.get_insertion_event_backward_extremities_in_room(\n room_id, depth_map[\"insertion_eventA\"], limit=100\n )\n )\n backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]\n # Event \"2\" has a depth of 2 but is not included here because we only\n # know the approximate depth of 5 from our event \"3\".\n self.assertListEqual(backfill_event_ids, [\"insertion_eventA\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 269, "n_words": 83, "vocab_size": 56, "complexity": 3, "nloc": 18, "token_counts": 117, "n_ast_nodes": 191, "n_identifiers": 15, "random_cut": "def test_get_insertion_event_backward_extremities_in_room(self):\n \n setup_info = self._setup_room_for_insertion_backfill_tests()\n room_id = setup_info.room_id\n depth_map = setup_info.depth_map\n\n # Try at \"insertion_eventB\"\n backfill_points = self.get_success(\n self.store.get_insertion_event_backward_extremities_in_room(\n room_id, depth_map[\"insertion_eventB\"], limit=100\n )\n )\n backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]\n self.assertEqual(backfill_event_ids, [\"insertion_eventB\", \"insertion_eventA\"])\n\n # Try at \"insertion_eventA\"\n backfill_points = self.get_success(\n self.store.get_insertion_event_backward_extremities_in_room(\n room_id, depth_map[\"insertion_eventA\"], limit=100\n )\n )\n backfill_event_ids = [backfill_point[0] for backfill_point in backfill_points]\n # Event \"2\" has a depth of 2 but is not included here because we only\n # know the approximate depth of 5 from our event \"3\".\n self.assertListEqual(backfill_event_ids, [\"insertion_eventA\"])\n" }, { "id": 65915, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/education/doctype/topic/topic.py", "file_name": "topic.py", "fun_name": "add_topic_to_courses", "commit_message": "style: format code with black", "code": "def add_topic_to_courses(topic, courses, mandatory=False):\n\tcourses = json.loads(courses)\n\tfor entry in courses:\n\t\tcourse = frappe.get_doc(\"Course\", entry)\n\t\tcourse.append(\"topics\", {\"topic\": topic, \"topic_name\": topic})\n\t\tcourse.flags.ignore_mandatory = True\n\t\tcourse.save()\n\tfrappe.db.commit()\n\tfrappe.msgprint(\n\t\t_(\"Topic {0} has been added to all the selected courses successfully.\").format(\n\t\t\tfrappe.bold(topic)\n\t\t),\n\t\ttitle=_(\"Courses updated\"),\n\t\tindicator=\"green\",\n\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 28, "n_words": 44, "vocab_size": 41, "complexity": 2, "nloc": 15, "token_counts": 100, "n_ast_nodes": 178, "n_identifiers": 23, "random_cut": "def add_topic_to_courses(topic, courses, mandatory=False):\n\tcourses = json.loads(courses)\n\tfor entry in courses:\n\t\tcourse = frappe.get_doc(\"Course\", entry)\n\t\tcourse.append(\"topics\", {\"topic\": topic, \"topic_name\": topic})\n\t\tcourse.flags.ignore_mandatory = True\n\t\tcourse.save()\n\tfrappe.db.commit()\n\tfrappe.msgprint(\n\t\t_(\"Topic {0} has been added to all the selected courses successfully.\").format(\n\t\t\tfrappe.bold(" }, { "id": 107061, "commit_id": "da31ed386482845629a8505f81810ddb341514fb", "repo": "matplotlib", "path": "lib/matplotlib/widgets.py", "file_name": "widgets.py", "fun_name": "update", "commit_message": "Fix drawing animated artists changed in selector callback", "code": "def update(self):\n \n if not self.ax.get_visible() or self.ax.figure._cachedRenderer is None:\n return False\n if self.useblit:\n if self.background is not None:\n self.canvas.restore_region(self.background)\n else:\n self.update_background(None)\n for artist in self.artists + self._get_animated_artists():\n if artist.stale:\n self.ax.draw_artist(artist)\n self.canvas.blit(self.ax.bbox)\n else:\n self.canvas.draw_idle()\n return False\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 200, "n_words": 35, "vocab_size": 26, "complexity": 7, "nloc": 15, "token_counts": 108, "n_ast_nodes": 177, "n_identifiers": 19, "random_cut": "def update(self):\n \n if not self.ax.get_visible() or self.ax.figure._cachedRenderer is None:\n return False\n if self.useblit:\n if self.background is not None:\n self.canvas.restore_region(self.background)\n else:\n self.update_background(None)\n for artist in self.artists + self._get_animated_artists():\n if artist.stale:\n " }, { "id": 191649, "commit_id": "9bb7195085a843db3f53e4fd9a51c79f3698a19d", "repo": "langchain", "path": "langchain/llms/huggingface_hub.py", "file_name": "huggingface_hub.py", "fun_name": "_identifying_params", "commit_message": "Harrison/llm saving (#331)\n\nCo-authored-by: Akash Samant <70665700+asamant21@users.noreply.github.com>", "code": "def _identifying_params(self) -> Mapping[str, Any]:\n \n _model_kwargs = self.model_kwargs or {}\n return {\n **{\"repo_id\": self.repo_id, \"task\": self.task},\n **{\"model_kwargs\": _model_kwargs},\n }\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 69, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 46, "n_ast_nodes": 77, "n_identifiers": 9, "random_cut": "def _identifying_params(self) -> Mapping[str, Any]:\n \n _mod" }, { "id": 224790, "commit_id": "3fd48806aa3fe6c571e0669afcbc4327f4c032e4", "repo": "mkdocs", "path": "mkdocs/tests/config/config_options_tests.py", "file_name": "config_options_tests.py", "fun_name": "test_invalid_children_config_int", "commit_message": "Refactor config_options_tests to use less implementation details\n\nPrevent missed warnings, test more error messages.\n\nAnd minor fixes to error messages themselves.", "code": "def test_invalid_children_config_int(self):\n with self.expect_error(option=\"Expected nav to be a list, got a int: 1\"):\n self.get_config(self.Schema, {'option': [{\"foo.md\": [{\"bar.md\": 1}]}]})\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 35, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 3, "token_counts": 41, "n_ast_nodes": 74, "n_identifiers": 6, "random_cut": "def test_invalid_children_config_int(self):\n with self.expect_error(option=\"Expected nav to be a list, got a int: 1\"):\n self.get" }, { "id": 4516, "commit_id": "9f21fae6684e6833e8896d017cd5c859046b61a7", "repo": "airbyte", "path": "octavia-cli/integration_tests/conftest.py", "file_name": "conftest.py", "fun_name": "destination_state_path", "commit_message": "🐛 octavia-cli: use `list` endpoint instead of `list_latest` (#11505)", "code": "def destination_state_path(octavia_test_project_directory):\n state_path = f\"{octavia_test_project_directory}/destinations/postgres/state.yaml\"\n silent_remove(state_path)\n yield state_path\n silent_remove(state_path)\n\n\n@pytest.fixture(scope=\"session\")", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "@pytest.fixture(scope=\"session\")", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 20, "n_words": 10, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 19, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def destination_state_path(octavia_test_project_directory):\n state_path = f\"{octavia_test_project_directory}/destinations/postgres/state.yaml\"\n silent_remove(state_path)\n yield state_path\n " }, { "id": 301542, "commit_id": "087c0b59edb4f6233849e2cf6eb9057474251934", "repo": "core", "path": "homeassistant/components/squeezebox/media_player.py", "file_name": "media_player.py", "fun_name": "async_play_media", "commit_message": "Update integrations to pass target player when resolving media (#72597)", "code": "async def async_play_media(self, media_type, media_id, **kwargs):\n \n cmd = \"play\"\n index = None\n\n if kwargs.get(ATTR_MEDIA_ENQUEUE):\n cmd = \"add\"\n\n if media_source.is_media_source_id(media_id):\n media_type = MEDIA_TYPE_MUSIC\n play_item = await media_source.async_resolve_media(\n self.hass, media_id, self.entity_id\n )\n media_id = play_item.url\n\n if media_type in MEDIA_TYPE_MUSIC:\n if not media_id.startswith(SQUEEZEBOX_SOURCE_STRINGS):\n # do not process special squeezebox \"source\" media ids\n media_id = async_process_play_media_url(self.hass, media_id)\n\n await self._player.async_load_url(media_id, cmd)\n return\n\n if media_type == MEDIA_TYPE_PLAYLIST:\n try:\n # a saved playlist by number\n payload = {\n \"search_id\": int(media_id),\n \"search_type\": MEDIA_TYPE_PLAYLIST,\n }\n playlist = await generate_playlist(self._player, payload)\n except ValueError:\n # a list of urls\n content = json.loads(media_id)\n playlist = content[\"urls\"]\n index = content[\"index\"]\n else:\n payload = {\n \"search_id\": media_id,\n \"search_type\": media_type,\n }\n playlist = await generate_playlist(self._player, payload)\n\n _LOGGER.debug(\"Generated playlist: %s\", playlist)\n\n await self._player.async_load_playlist(playlist, cmd)\n if index is not None:\n await self._player.async_index(index)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 593, "n_words": 125, "vocab_size": 77, "complexity": 8, "nloc": 37, "token_counts": 214, "n_ast_nodes": 357, "n_identifiers": 35, "random_cut": "async def async_play_media(self, media_type, media_id, **kwargs):\n \n cmd = \"play\"\n index = None\n\n if kwargs.get(ATTR_MEDIA_ENQUEUE):\n cmd = \"add\"\n\n if media_source.is_media_source_id(media_id):\n media_type = MEDIA_TYPE_MUSIC\n play_item = await media_source.async_resolve_media(\n self.hass, media_id, self.entity_id\n )\n media_id = play_item.url\n\n if media_type in MEDIA_TYPE_MUSIC:\n if not media_id.startswith(SQUEEZEBOX_SOURCE_STRINGS):\n # do not process special squeezebox \"source\" media ids\n media_id = async_process_play_media_url(self.hass, media_id)\n\n await se" }, { "id": 172765, "commit_id": "4545f4a20d9ff90b99bbd4e3e34b6de4441d6367", "repo": "calibre-web", "path": "cps/helper.py", "file_name": "helper.py", "fun_name": "get_sorted_author", "commit_message": "Better epub cover parsing with multiple cover-image items\nCode cosmetics\nrenamed variables\nrefactored xml page generation\nrefactored prepare author", "code": "def get_sorted_author(value):\n value2 = None\n try:\n if ',' not in value:\n regexes = [r\"^(JR|SR)\\.?$\", r\"^I{1,3}\\.?$\", r\"^IV\\.?$\"]\n combined = \"(\" + \")|(\".join(regexes) + \")\"\n value = value.split(\" \")\n if re.match(combined, value[-1].upper()):\n if len(value) > 1:\n value2 = value[-2] + \", \" + \" \".join(value[:-2]) + \" \" + value[-1]\n else:\n value2 = value[0]\n elif len(value) == 1:\n value2 = value[0]\n else:\n value2 = value[-1] + \", \" + \" \".join(value[:-1])\n else:\n value2 = value\n except Exception as ex:\n log.error(\"Sorting author %s failed: %s\", value, ex)\n if isinstance(list, value2):\n value2 = value[0]\n else:\n value2 = value\n return value2\n\n", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 339, "n_words": 96, "vocab_size": 52, "complexity": 7, "nloc": 25, "token_counts": 189, "n_ast_nodes": 322, "n_identifiers": 17, "random_cut": "def get_sorted_author(value):\n value2 = None\n try:\n if ',' not in value:\n regexes = [r\"^(JR|SR)\\.?$\", r\"^I{1,3}\\.?$\", r\"^IV\\.?$\"]\n combined = \"(\" + \")|(\".join(regexes) + \")\"\n value = value.split(\" \")\n if re.match(combined, value[-1].upper()):\n if len(value) > 1:\n value2 = value[-2] + \", \" + \" \".join(value[:-2]) + \" \" + value[-1]\n else:\n value2 = value[0]\n elif len(value) == 1:\n value2 = value[0]\n else:\n value2 = value[-1] + \", \" + \" \".join(value[:-1])\n else:\n value2 = value\n except Exception as ex:\n log.error(\"Sorting author %s failed: %s\", value, ex)\n if isins" }, { "id": 51162, "commit_id": "98d598b7fe14ddca68f8107a66a1f8a3e4ce2bd8", "repo": "PaddleHub", "path": "modules/image/semantic_segmentation/lseg/module.py", "file_name": "module.py", "fun_name": "run_cmd", "commit_message": "Add LSeg Module (#2038)\n\n* add LSeg\r\n\r\n* add LSeg README\r\n\r\n* add requirements.txt\r\n\r\n* update README\r\n\r\n* update module\r\n\r\n* update\r\n\r\n* update\r\n\r\n* update\r\n\r\n* update\r\n\r\n* pre-commit\r\n\r\n* update\r\n\r\n* save jpg -> save png\r\n\r\n* bgr -> bgra\r\n\r\n* fix typo\r\n\r\n* pre-commit", "code": "def run_cmd(self, argvs):\n \n self.parser = argparse.ArgumentParser(description=\"Run the {} module.\".format(self.name),\n prog='hub run {}'.format(self.name),\n usage='%(prog)s',\n add_help=True)\n self.parser.add_argument('--input_path', type=str, help=\"path to image.\")\n self.parser.add_argument('--labels', type=str, nargs='+', help=\"segmentation labels.\")\n self.parser.add_argument('--output_dir',\n type=str,\n default='lseg_output',\n help=\"The directory to save output images.\")\n args = self.parser.parse_args(argvs)\n self.segment(image=args.input_path, labels=args.labels, visualization=True, output_dir=args.output_dir)\n return 'segmentation results are saved in %s' % args.output_dir\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 336, "n_words": 49, "vocab_size": 45, "complexity": 1, "nloc": 14, "token_counts": 144, "n_ast_nodes": 236, "n_identifiers": 26, "random_cut": "def run_cmd(self, argvs):\n \n self.parser = argparse.ArgumentParser(description=\"Run the {} module.\".format(self.name),\n prog='hub run {}'.format(self.name),\n usage='%(prog)s',\n add_help=True)\n self.parser.add_argument('--input_path', type=str, help=\"path to image.\")\n self.parser.a" }, { "id": 60351, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/test/test_python_layer.py", "file_name": "test_python_layer.py", "fun_name": "python_net_file", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def python_net_file():\n with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:\n f.write()\n return f.name\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 26, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 11, "token_counts": 30, "n_ast_nodes": 55, "n_identifiers": 8, "random_cut": "def python_net_file():\n with tempfile.NamedTemporaryFile(mode='w+', delet" }, { "id": 268840, "commit_id": "ba6fddb32d20ccd0759c90d19978b02da6568fe7", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "_detect_save_format", "commit_message": "When interactive logging is disabled (using absl logging) and verbose=\"auto\", we will use verbose=2, which does not print the progress bar.\n\nPiperOrigin-RevId: 420939364", "code": "def _detect_save_format(filepath):\n \n\n filepath = io_utils.path_to_string(filepath)\n if saving_utils.is_hdf5_filepath(filepath):\n return filepath, 'h5'\n\n # Filepath could be a TensorFlow checkpoint file prefix or SavedModel\n # directory. It's possible for filepath to be both a prefix and directory.\n # Prioritize checkpoint over SavedModel.\n if _is_readable_tf_checkpoint(filepath):\n save_format = 'tf'\n elif tf.saved_model.contains_saved_model(filepath):\n ckpt_path = os.path.join(filepath, tf.saved_model.VARIABLES_DIRECTORY,\n tf.saved_model.VARIABLES_FILENAME)\n if _is_readable_tf_checkpoint(ckpt_path):\n filepath = ckpt_path\n save_format = 'tf'\n else:\n raise ValueError('Unable to load weights. filepath {} appears to be a '\n 'SavedModel directory, but checkpoint either doesn\\'t '\n 'exist, or is incorrectly formatted.'.format(filepath))\n else:\n # Not a TensorFlow checkpoint. This filepath is likely an H5 file that\n # doesn't have the hdf5/keras extensions.\n save_format = 'h5'\n return filepath, save_format\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 231, "n_words": 110, "vocab_size": 70, "complexity": 5, "nloc": 19, "token_counts": 102, "n_ast_nodes": 186, "n_identifiers": 19, "random_cut": "def _detect_save_format(filepath):\n \n\n filepath = io_utils.path_to_string(filepath)\n if saving_utils.is_hdf5_filepath(filepath):\n return filepath, 'h5'\n\n # Filepath could be a TensorFlow checkpoint file prefix or SavedModel\n # directory. It's possible for filepath to be both a prefix and directory.\n # Prioritize checkpoint over SavedModel.\n if _is_readable_tf_checkpoint(filepath):\n save_format = 'tf'\n elif tf.saved_model.contains_saved_model(filepath):\n ckpt_path = os.path.join(filepath, tf.saved_model.VARIABLES_DIRECTORY,\n tf.saved_model.VARIABLES_FILENAME)\n if _is_readable_tf_checkpoint(ckpt_" }, { "id": 155108, "commit_id": "fd776a5a4faf7695d461f369bb2470dcb8aa2745", "repo": "modin", "path": "modin/core/execution/dispatching/factories/test/test_dispatcher.py", "file_name": "test_dispatcher.py", "fun_name": "test_set_execution", "commit_message": "FEAT-#5230: Support external query compiler and IO (#5231)\n\nSigned-off-by: Devin Petersohn \r\nCo-authored-by: Vasily Litvinov ", "code": "def test_set_execution():\n with _switch_execution(\"Bar\", \"Foo\"):\n assert FactoryDispatcher.get_factory() == FooOnBarFactory\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 39, "n_identifiers": 5, "random_cut": "def test_set_execution():\n with _switch_execution(\"Bar\", \"Foo\"):\n assert FactoryDispatcher.get_factory() == FooOnBarFactory\n\n" }, { "id": 26291, "commit_id": "e85f83b4c7ef25e18509577dbc808893a3484f61", "repo": "saleor", "path": "saleor/graphql/meta/mutations.py", "file_name": "mutations.py", "fun_name": "get_instance", "commit_message": "Catch update_fields did not affect any rows errors and return response with message (#9225)\n\n* Add select_for_update() to querysets in checkout and meta mutations\r\n\r\n* Remove redundant select_for_update\r\n\r\n* Remove reduntant file after merging conflicts\r\n\r\n* Catch update_fields did not affect any rows errors and return response with message\r\n\r\n* Review changes\r\n\r\n* Update products.py", "code": "def get_instance(cls, info, **data):\n object_id = data.get(\"id\")\n qs = data.get(\"qs\", None)\n\n try:\n type_name, _ = from_global_id_or_error(object_id)\n # ShippingMethodType represents the ShippingMethod model\n if type_name == \"ShippingMethodType\":\n qs = shipping_models.ShippingMethod.objects\n\n return cls.get_node_or_error(info, object_id, qs=qs)\n except GraphQLError as e:\n if instance := cls.get_instance_by_token(object_id, qs):\n return instance\n raise ValidationError(\n {\n \"id\": ValidationError(\n str(e), code=MetadataErrorCode.GRAPHQL_ERROR.value\n )\n }\n )\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 279, "n_words": 54, "vocab_size": 45, "complexity": 4, "nloc": 18, "token_counts": 105, "n_ast_nodes": 171, "n_identifiers": 24, "random_cut": "def get_instance(cls, info, **data):\n object_id = data.get(\"id\")\n qs = data.get(\"qs\", None)\n\n try:\n type_name, _ = from_global_id_or_error(object_id)\n # ShippingMethodType represents the ShippingMethod model\n if type_name == \"ShippingMethodType\":\n qs = shipping_models.ShippingMethod.objects\n\n return cls.get_node_or_error(info, object_id, qs=qs)\n except GraphQLError as e:\n if instance := cls.get_instance_by_token(object_id, qs):\n return instance\n raise ValidationError(\n {\n \"id\": ValidationError(\n str(e), code=MetadataErrorCode.GRAPHQL_ERROR.value\n )\n }\n )\n" }, { "id": 319271, "commit_id": "10ca515ac527b746d6a948d6aebca5d253923b64", "repo": "paperless-ngx", "path": "src/documents/tests/test_tasks.py", "file_name": "test_tasks.py", "fun_name": "test_barcode_reader_unreadable", "commit_message": "addes tests:\n- barcode-39\n- barcode-128\n- qr barcodes\n- test for consumption\n\nSigned-off-by: Florian Brandes ", "code": "def test_barcode_reader_unreadable(self):\n test_file = os.path.join(\n os.path.dirname(__file__),\n \"samples\",\n \"barcodes\",\n \"barcode-39-PATCHT-unreadable.png\",\n )\n img = Image.open(test_file)\n self.assertEqual(tasks.barcode_reader(img), [])\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 86, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 9, "token_counts": 51, "n_ast_nodes": 84, "n_identifiers": 14, "random_cut": "def test_barcode_reader_unreadable(self):\n test_file = os.path.join(\n os.path.dirname(__file__),\n \"samples\",\n \"barcodes\",\n \"barcode-39-PATCHT-unreadable.png\",\n )\n img = Image.open(test_file)" }, { "id": 289957, "commit_id": "bcae6d604e2967c7475f0caa4b1b5e4e76ab88bf", "repo": "core", "path": "homeassistant/components/mqtt/device_tracker/schema_discovery.py", "file_name": "schema_discovery.py", "fun_name": "source_type", "commit_message": "Improve MQTT type hints part 8 (#81034)\n\n* Improve typing device_tracker discovery\r\n\r\n* Improve typing device_tracker yaml\r\n\r\n* Add test source_type attribute\r\n\r\n* Follow up comment\r\n\r\n* Initialize at `__init__` not at class level.\r\n\r\n* Use full name for return variable\r\n\r\n* Correct import, remove assert\r\n\r\n* Use AsyncSeeCallback", "code": "def source_type(self) -> SourceType | str:\n \n source_type: SourceType | str = self._config[CONF_SOURCE_TYPE]\n return source_type\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 24, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def source_type(self) -> SourceType | str:\n \n sour" }, { "id": 97814, "commit_id": "cef95dd7be74a43fb986e7bb3cd638481de0fc2f", "repo": "sentry", "path": "tests/sentry/api/test_client_state.py", "file_name": "test_client_state.py", "fun_name": "test_large_payload", "commit_message": "Client State endpoint (#33135)\n\n* Client State endpoint\r\n\r\n* fixes\r\n\r\n* use configured cluster key\r\n\r\n* Add tests\r\n\r\n* make endpoint private\r\n\r\n* updates\r\n\r\n* fix tests", "code": "def test_large_payload(self):\n resp = self.client.put(\n self.path,\n {\"test\": 300 * \"Dummy Data\"},\n )\n assert resp.status_code == 413\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 58, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 7, "random_cut": "def test_large_payload(self):\n resp = self.client.put(\n " }, { "id": 72314, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/tests.py", "file_name": "tests.py", "fun_name": "test_not_logged_in_redirect", "commit_message": "Reformat with black", "code": "def test_not_logged_in_redirect(self):\n response = self.client.get(\"/admin/sdfgdsfgdsfgsdf/\")\n\n # Check that the user was redirected to the login page and that next was set correctly\n self.assertRedirects(\n response, reverse(\"wagtailadmin_login\") + \"?next=/admin/sdfgdsfgdsfgsdf/\"\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 28, "vocab_size": 25, "complexity": 1, "nloc": 5, "token_counts": 28, "n_ast_nodes": 52, "n_identifiers": 7, "random_cut": "def test_not_logged_in_redirect(self):\n response = self." }, { "id": 275622, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/utils.py", "file_name": "utils.py", "fun_name": "make_global_gradient_clipnorm_fn", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def make_global_gradient_clipnorm_fn(clipnorm):\n \n if clipnorm is None:\n return lambda grads_and_vars: grads_and_vars\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 20, "n_ast_nodes": 29, "n_identifiers": 3, "random_cut": "def make_global_gradient_clipnorm_fn(clipnorm):\n \n if clipnorm is None:\n retur" }, { "id": 168916, "commit_id": "d0268e719f899789f9606beb4592a17d27086b4c", "repo": "pandas", "path": "pandas/tests/frame/methods/test_reindex.py", "file_name": "test_reindex.py", "fun_name": "test_reindex_uint_dtypes_fill_value", "commit_message": "BUG: reindex using wrong fill value when indexing cols and index for uint dtypes (#48185)", "code": "def test_reindex_uint_dtypes_fill_value(self, any_unsigned_int_numpy_dtype):\n # GH#48184\n df = DataFrame({\"a\": [1, 2], \"b\": [1, 2]}, dtype=any_unsigned_int_numpy_dtype)\n result = df.reindex(columns=list(\"abcd\"), index=[0, 1, 2, 3], fill_value=10)\n expected = DataFrame(\n {\"a\": [1, 2, 10, 10], \"b\": [1, 2, 10, 10], \"c\": 10, \"d\": 10},\n dtype=any_unsigned_int_numpy_dtype,\n )\n tm.assert_frame_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 106, "n_words": 43, "vocab_size": 32, "complexity": 1, "nloc": 8, "token_counts": 113, "n_ast_nodes": 169, "n_identifiers": 15, "random_cut": "def test_reindex_uint_dtypes_fill_value(self, any_unsigned_int_numpy_dtype):\n # GH#48184\n df = DataFrame({\"a\": [1, 2], \"b\":" }, { "id": 274134, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/rnn/time_distributed.py", "file_name": "time_distributed.py", "fun_name": "compute_output_shape", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def compute_output_shape(self, input_shape):\n input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n\n child_input_shape = tf.nest.map_structure(\n self._remove_timesteps, input_shape\n )\n child_output_shape = self.layer.compute_output_shape(child_input_shape)\n child_output_shape = tf_utils.convert_shapes(\n child_output_shape, to_tuples=False\n )\n timesteps = tf_utils.convert_shapes(input_shape)\n timesteps = tf.nest.flatten(timesteps)[1]\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 105, "n_words": 28, "vocab_size": 19, "complexity": 1, "nloc": 13, "token_counts": 89, "n_ast_nodes": 117, "n_identifiers": 15, "random_cut": "def compute_output_shape(self, input_shape):\n input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n\n child_input_shape = tf.nest.map_structure(\n self._remove_timesteps, input_shape\n )\n " }, { "id": 260446, "commit_id": "dcd0d4f054b4586f617d35885df05eaae4708876", "repo": "scikit-learn", "path": "sklearn/tests/test_random_projection.py", "file_name": "test_random_projection.py", "fun_name": "test_random_projection_transformer_invalid_input", "commit_message": "MAINT Use `_validate_params` in RandomProjection family (#23831)\n\nCo-authored-by: Meekail Zain <34613774+Micky774@users.noreply.github.com>", "code": "def test_random_projection_transformer_invalid_input():\n n_components = \"auto\"\n fit_data = [[0, 1, 2]]\n for RandomProjection in all_RandomProjection:\n with pytest.raises(ValueError):\n RandomProjection(n_components=n_components).fit(fit_data)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 43, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 42, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "def test_random_projection_transformer_invalid_input():\n n_components = \"auto\"\n fit_data = [[0, 1, 2]]\n for RandomProjection in all_RandomProjection:\n with pytest.raises(ValueError):\n RandomProjection(n" }, { "id": 295394, "commit_id": "2d37066ce59064ed6121720e03424e4dc73c2b43", "repo": "core", "path": "homeassistant/components/demo/vacuum.py", "file_name": "vacuum.py", "fun_name": "turn_off", "commit_message": "Add EntityFeature enum to Vacuum (#69121)", "code": "def turn_off(self, **kwargs):\n \n if self.supported_features & VacuumEntityFeature.TURN_OFF == 0:\n return\n\n self._state = False\n self._status = \"Charging\"\n self.schedule_update_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 63, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 36, "n_ast_nodes": 63, "n_identifiers": 9, "random_cut": "def turn_off(self, **kwargs):\n \n " }, { "id": 261300, "commit_id": "e41753ebd57c44ae91b389f190c43ddc0b384a75", "repo": "scikit-learn", "path": "sklearn/tests/test_pipeline.py", "file_name": "test_pipeline.py", "fun_name": "test_pipeline_raise_set_params_error", "commit_message": "MAINT Clean deprecation for 1.2: normalize in linear models (#24391)", "code": "def test_pipeline_raise_set_params_error():\n # Test pipeline raises set params error message for nested models.\n pipe = Pipeline([(\"cls\", LinearRegression())])\n\n # expected error message\n error_msg = re.escape(\n \"Invalid parameter 'fake' for estimator Pipeline(steps=[('cls',\"\n \" LinearRegression())]). Valid parameters are: ['memory', 'steps', 'verbose'].\"\n )\n with pytest.raises(ValueError, match=error_msg):\n pipe.set_params(fake=\"nope\")\n\n # invalid outer parameter name for compound parameter: the expected error message\n # is the same as above.\n with pytest.raises(ValueError, match=error_msg):\n pipe.set_params(fake__estimator=\"nope\")\n\n # expected error message for invalid inner parameter\n error_msg = re.escape(\n \"Invalid parameter 'invalid_param' for estimator LinearRegression(). Valid\"\n \" parameters are: ['copy_X', 'fit_intercept', 'n_jobs', 'positive'].\"\n )\n with pytest.raises(ValueError, match=error_msg):\n pipe.set_params(cls__invalid_param=\"nope\")\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 182, "n_words": 95, "vocab_size": 58, "complexity": 1, "nloc": 16, "token_counts": 96, "n_ast_nodes": 180, "n_identifiers": 15, "random_cut": "def test_pipeline_raise_set_params_error():\n # Test pipeline raises set params error message for nested models.\n pipe = Pipeline([(\"cls\", LinearRegression())])\n\n # expected error message\n error_msg = re.escape(\n \"Invalid parameter 'fake' for estimator Pipeline(steps=[('cls',\"\n \" LinearRegression())]). Valid parameters are: ['memory', 'steps', 'verbose'].\"\n )\n with pytest.raises(ValueError, match=error_msg):\n pipe.set_params(fake=\"nope\")\n\n # invalid outer parameter name for compound parameter: the expected error message\n # is the same as above.\n with pytest.raises(ValueError, match=error_msg):\n pipe.set_params(fake__estimator=\"nope\")\n\n # expected error message for invalid inner parameter\n error_msg = re.escape(\n \"Invalid parameter 'invalid_param' for estimator LinearRegression(). Valid\"\n \" parameters " }, { "id": 149435, "commit_id": "7d3116f9fbe446a31837f483aa9bef550d7a1d3d", "repo": "freqtrade", "path": "tests/rpc/test_rpc_webhook.py", "file_name": "test_rpc_webhook.py", "fun_name": "test_exception_send_msg", "commit_message": "webhookbuy -> webhookentry", "code": "def test_exception_send_msg(default_conf, mocker, caplog):\n default_conf[\"webhook\"] = get_webhook_dict()\n del default_conf[\"webhook\"][\"webhookentry\"]\n\n webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)\n webhook.send_msg({'type': RPCMessageType.ENTRY})\n assert log_has(f\"Message type '{RPCMessageType.ENTRY}' not configured for webhooks\",\n caplog)\n\n default_conf[\"webhook\"] = get_webhook_dict()\n default_conf[\"webhook\"][\"webhookentry\"][\"value1\"] = \"{DEADBEEF:8f}\"\n msg_mock = MagicMock()\n mocker.patch(\"freqtrade.rpc.webhook.Webhook._send_msg\", msg_mock)\n webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)\n msg = {\n 'type': RPCMessageType.ENTRY,\n 'exchange': 'Binance',\n 'pair': 'ETH/BTC',\n 'limit': 0.005,\n 'order_type': 'limit',\n 'stake_amount': 0.8,\n 'stake_amount_fiat': 500,\n 'stake_currency': 'BTC',\n 'fiat_currency': 'EUR'\n }\n webhook.send_msg(msg)\n assert log_has(\"Problem calling Webhook. Please check your webhook configuration. \"\n \"Exception: 'DEADBEEF'\", caplog)\n\n msg_mock = MagicMock()\n mocker.patch(\"freqtrade.rpc.webhook.Webhook._send_msg\", msg_mock)\n msg = {\n 'type': 'DEADBEEF',\n 'status': 'whatever'\n }\n with pytest.raises(NotImplementedError):\n webhook.send_msg(msg)\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 269, "n_words": 93, "vocab_size": 67, "complexity": 1, "nloc": 34, "token_counts": 207, "n_ast_nodes": 374, "n_identifiers": 20, "random_cut": "def test_exception_send_msg(default_conf, mocker, caplog):\n default_conf[\"webhook\"] = get_webhook_dict()\n del default_conf[\"webhook\"][\"webhookentry\"]\n\n webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)\n webhook.send_msg({'type': RPCMessageType.ENTRY})\n assert log_has(f\"Message type '{RPCMessageType.ENTRY}' not configured for webhooks\",\n caplog)\n\n default_conf[\"webhook\"] = get_webhook_dict()\n default_conf[\"webhook\"][\"webhookentry\"][\"value1\"] = \"{DEADBEEF:8f}\"\n msg_mock = MagicMock()\n mocker.patch(\"freqtrade.rpc.webhook.Webhook._send_msg\", msg_mock)\n webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)\n msg = {\n 'type': RPCMessageType.ENTRY,\n 'exchange': 'Binance',\n 'pair': 'ETH/BTC',\n 'limit': 0" }, { "id": 275268, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/optimizer.py", "file_name": "optimizer.py", "fun_name": "learning_rate", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def learning_rate(self, learning_rate):\n if isinstance(\n self._learning_rate, learning_rate_schedule.LearningRateSchedule\n ):\n raise TypeError(\n \"This optimizer was created with a `LearningRateSchedule`\"\n \" object as its `learning_rate` constructor argument, \"\n \"hence its learning rate is not settable. If you need the\"\n \" learning rate to be settable, you should instantiate \"\n \"the optimizer with a float `learning_rate` argument.\"\n )\n self._learning_rate.assign(learning_rate)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 183, "n_words": 55, "vocab_size": 44, "complexity": 2, "nloc": 12, "token_counts": 36, "n_ast_nodes": 65, "n_identifiers": 8, "random_cut": "def learning_rate(self, learning_rate):\n if isinstance(\n self._learning_rate, learning_rate_schedule.LearningRateSchedule\n ):\n raise TypeError(\n \"This optimizer was created with a `LearningRateSchedule`\"\n \" object as its `learning_rate` constructor argument, \"\n \"hence its learning rate is not settable. If you need" }, { "id": 276476, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/tests/model_architectures.py", "file_name": "model_architectures.py", "fun_name": "call", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def call(self, inputs, **kwargs):\n x = self.dense1(inputs)\n x = self.dp(x)\n x = self.bn(x)\n return self.dense2(x)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 42, "n_words": 15, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 41, "n_ast_nodes": 65, "n_identifiers": 9, "random_cut": "def call(self, inputs, **kwargs):\n x = self.dense1(inputs)\n " }, { "id": 145243, "commit_id": "58e5f0140d247059ca45b249446614929930c126", "repo": "ray", "path": "dashboard/modules/job/tests/test_job_manager.py", "file_name": "test_job_manager.py", "fun_name": "test_failed_runtime_env_validation", "commit_message": "[jobs] Rename JobData -> JobInfo (#22499)\n\n`JobData` could be confused with the actual output data of a job, `JobInfo` makes it more clear that this is status information + metadata.", "code": "async def test_failed_runtime_env_validation(self, job_manager):\n \n run_cmd = f\"python {_driver_script_path('override_env_var.py')}\"\n job_id = job_manager.submit_job(\n entrypoint=run_cmd, runtime_env={\"working_dir\": \"path_not_exist\"}\n )\n\n data = job_manager.get_job_info(job_id)\n assert data.status == JobStatus.FAILED\n assert \"path_not_exist is not a valid URI\" in data.message\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 8, "token_counts": 52, "n_ast_nodes": 101, "n_identifiers": 15, "random_cut": "async def test_failed_runtime_env_validation(self, job_manager):\n \n run_cmd = f\"python {_driver_script_path('override_env_var.py')}\"\n job_id = job_manager.submit" }, { "id": 120615, "commit_id": "17de89b16ac5ee05aee03115d858e67489eab973", "repo": "jax", "path": "jaxlib/lapack.py", "file_name": "lapack.py", "fun_name": "syevd_mhlo", "commit_message": "feat: refactor code using pyupgrade\n\nThis PR upgrades legacy Python code to 3.7+ code using pyupgrade:\n```sh\npyupgrade --py37-plus --keep-runtime-typing **.py\n```\n\na", "code": "def syevd_mhlo(dtype, a, lower=False):\n a_type = ir.RankedTensorType(a.type)\n dims = a_type.shape\n assert len(dims) >= 2\n m, n = dims[-2:]\n assert m == n\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n b = 1\n for d in batch_dims:\n b *= d\n layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))\n\n i32_type = ir.IntegerType.get_signless(32)\n if dtype == np.float32:\n fn = b\"lapack_ssyevd\"\n eigvals_type = ir.F32Type.get()\n workspace = [\n ir.RankedTensorType.get([_lapack.syevd_work_size(n)],\n a_type.element_type),\n ir.RankedTensorType.get([_lapack.syevd_iwork_size(n)], i32_type),\n ]\n workspace_layouts = [[0], [0]]\n elif dtype == np.float64:\n fn = b\"lapack_dsyevd\"\n eigvals_type = ir.F64Type.get()\n workspace = [\n ir.RankedTensorType.get([_lapack.syevd_work_size(n)],\n a_type.element_type),\n ir.RankedTensorType.get([_lapack.syevd_iwork_size(n)], i32_type),\n ]\n workspace_layouts = [[0], [0]]\n elif dtype == np.complex64:\n fn = b\"lapack_cheevd\"\n eigvals_type = ir.F32Type.get()\n workspace = [\n ir.RankedTensorType.get([_lapack.heevd_work_size(n)],\n a_type.element_type),\n ir.RankedTensorType.get([_lapack.heevd_rwork_size(n)], eigvals_type),\n ir.RankedTensorType.get([_lapack.syevd_iwork_size(n)], i32_type),\n ]\n workspace_layouts = [[0], [0], [0]]\n elif dtype == np.complex128:\n fn = b\"lapack_zheevd\"\n eigvals_type = ir.F64Type.get()\n workspace = [\n ir.RankedTensorType.get([_lapack.heevd_work_size(n)],\n a_type.element_type),\n ir.RankedTensorType.get([_lapack.heevd_rwork_size(n)], eigvals_type),\n ir.RankedTensorType.get([_lapack.syevd_iwork_size(n)], i32_type),\n ]\n workspace_layouts = [[0], [0], [0]]\n else:\n raise NotImplementedError(f\"Unsupported dtype {dtype}\")\n\n scalar_layout = []\n layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))\n out = custom_call(\n fn,\n [\n a.type,\n ir.RankedTensorType.get(batch_dims + (n,), eigvals_type),\n ir.RankedTensorType.get(batch_dims, i32_type),\n ] + workspace,\n [_mhlo_s32(1 if lower else 0), _mhlo_s32(b), _mhlo_s32(n), a],\n operand_layouts=[scalar_layout] * 3 + [layout],\n result_layouts=[\n layout,\n tuple(range(num_bd, -1, -1)),\n tuple(range(num_bd - 1, -1, -1)),\n ] + workspace_layouts)\n return out[:3]\n\n\n# # geev: Nonsymmetric eigendecomposition\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 582, "n_words": 215, "vocab_size": 105, "complexity": 7, "nloc": 70, "token_counts": 606, "n_ast_nodes": 911, "n_identifiers": 48, "random_cut": "def syevd_mhlo(dtype, a, lower=False):\n a_type = ir.RankedTensorType(a.type)\n dims = a_type.shape\n assert len(dims) >= 2\n m, n = dims[-2:]\n assert m == n\n batch_dims = tuple(dims[:-2])\n num_bd = len(batch_dims)\n b = 1\n for d in batch_dims:\n b *= d\n layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))\n\n i32_type = ir.IntegerType.get_signless(32)\n if dtype == np.float32:\n fn = b\"lapack_ssyevd\"\n eigvals_type = ir.F32Type.get()\n workspace = [\n ir.RankedTensorType.get([_lapack.syevd_work_size(n)],\n " }, { "id": 83151, "commit_id": "d560d124a304a2f6dd467200aab7f070a78bf155", "repo": "zulip", "path": "zerver/tests/test_message_edit.py", "file_name": "test_message_edit.py", "fun_name": "test_move_message_to_stream_and_topic", "commit_message": "python: Replace string concatenations with f-strings.", "code": "def test_move_message_to_stream_and_topic(self) -> None:\n (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(\n \"iago\", \"test move stream\", \"new stream\", \"test\"\n )\n\n with queries_captured() as queries, cache_tries_captured() as cache_tries:\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"stream_id\": new_stream.id,\n \"propagate_mode\": \"change_all\",\n \"topic\": \"new topic\",\n },\n )\n self.assert_length(queries, 52)\n self.assert_length(cache_tries, 13)\n\n messages = get_topic_messages(user_profile, old_stream, \"test\")\n self.assert_length(messages, 1)\n self.assertEqual(\n messages[0].content,\n f\"This topic was moved by @_**Iago|{user_profile.id}** to #**new stream>new topic**\",\n )\n\n messages = get_topic_messages(user_profile, new_stream, \"new topic\")\n self.assert_length(messages, 4)\n self.assertEqual(\n messages[3].content,\n f\"This topic was moved here from #**test move stream>test** by @_**Iago|{user_profile.id}**\",\n )\n self.assert_json_success(result)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 384, "n_words": 89, "vocab_size": 66, "complexity": 1, "nloc": 29, "token_counts": 163, "n_ast_nodes": 283, "n_identifiers": 21, "random_cut": "def test_move_message_to_stream_and_topic(self) -> None:\n (user_profile, old_stream, new_stream, msg_id, msg_id_later) = self.prepare_move_topics(\n \"iago\", \"" }, { "id": 291868, "commit_id": "e2308fd15cec4dfdd25d843b72cd3071657fd5b8", "repo": "core", "path": "homeassistant/components/matter/entity.py", "file_name": "entity.py", "fun_name": "async_added_to_hass", "commit_message": "Add matter integration BETA (#83064)\n\n* Add matter base (#79372)\r\n\r\nCo-authored-by: Marcel van der Veldt \r\n\r\n* Add matter server add-on flow (#82698)\r\n\r\n* Add matter server add-on flow\r\n\r\n* Fix stale error argument\r\n\r\n* Clean docstrings\r\n\r\n* Use localhost as default address\r\n\r\n* Add matter websocket api foundation (#82848)\r\n\r\n* Add matter config entry add-on management (#82865)\r\n\r\n* Use matter refactored server/client library (#83003)\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Bump python-matter-server to 1.0.6 (#83059)\r\n\r\n* Extend matter websocket api (#82948)\r\n\r\n* Extend matter websocket api\r\n\r\n* Finish docstring\r\n\r\n* Fix pin type\r\n\r\n* Adjust api after new client\r\n\r\n* Adjust api to frontend for now\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "async def async_added_to_hass(self) -> None:\n \n await super().async_added_to_hass()\n\n # Subscribe to attribute updates.\n for attr_cls in self.entity_description.subscribe_attributes:\n if matter_attr := self.get_matter_attribute(attr_cls):\n self._attributes_map[attr_cls] = matter_attr.path\n self._unsubscribes.append(\n self.matter_client.subscribe(\n self._on_matter_event,\n EventType.ATTRIBUTE_UPDATED,\n self._device_type_instance.node.node_id,\n matter_attr.path,\n )\n )\n continue\n # not sure if this can happen, but just in case log it.\n LOGGER.warning(\"Attribute not found on device: %s\", attr_cls)\n\n # make sure to update the attributes once\n self._update_from_device()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 326, "n_words": 61, "vocab_size": 53, "complexity": 3, "nloc": 17, "token_counts": 93, "n_ast_nodes": 153, "n_identifiers": 23, "random_cut": "async def async_added_to_hass(self) -> None:\n \n await super().async_added_to_hass()\n\n # Subscribe to attribute updates.\n for attr_cls in self.entity_description.subscribe_attributes:\n if matter_attr := self.get_matter_attribute(attr_cls):\n self._attributes_map[attr_cls] = matter_attr.path\n self._unsubscribes.append(\n self.matter_client.subscribe(\n self._on_matter_event,\n EventType.ATTRIBUT" }, { "id": 219683, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "_sqrt_nearest", "commit_message": "add python 3.10.4 for windows", "code": "def _sqrt_nearest(n, a):\n \n if n <= 0 or a <= 0:\n raise ValueError(\"Both arguments to _sqrt_nearest should be positive.\")\n\n b=0\n while a != b:\n b, a = a, a--n//a>>1\n return a\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 60, "n_words": 31, "vocab_size": 27, "complexity": 4, "nloc": 7, "token_counts": 42, "n_ast_nodes": 77, "n_identifiers": 5, "random_cut": "def _sqrt_nearest(n, a):\n \n i" }, { "id": 188091, "commit_id": "def9bedd3093e88f56618b068f3db53a042e6c1e", "repo": "jumpserver", "path": "apps/applications/serializers/application.py", "file_name": "application.py", "fun_name": "app", "commit_message": "perf: 账号备份优化 (#7503)\n\n* perf: 账号备份优化\r\n\r\n* feat: 优化账号备份获取有序备份字段列表\r\n\r\nCo-authored-by: feng626 <1304903146@qq.com>\r\nCo-authored-by: Michael Bai ", "code": "def app(self):\n if isinstance(self.instance, models.Application):\n instance = self.instance\n else:\n instance = None\n return instance\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 14, "vocab_size": 11, "complexity": 2, "nloc": 6, "token_counts": 29, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def app(self):\n if isinstance(self.instance, models.Application):\n instance = self.instance\n else:\n instance = None\n return instance\n" }, { "id": 163518, "commit_id": "4e034ec0006b6c05160ce67ea1420ce28f295c91", "repo": "pandas", "path": "pandas/core/indexes/base.py", "file_name": "base.py", "fun_name": "union", "commit_message": "DEPR: DatetimeIndex.intersection with mixed timezones cast to UTC, not object (#45357)\n\n* DEPR: DatetimeIndex.intersection with mixed timezones cast to UTC instead of object\r\n\r\n* GH ref\r\n\r\n* mypy fixup\r\n\r\nCo-authored-by: Jeff Reback ", "code": "def union(self, other, sort=None):\n \n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if not is_dtype_equal(self.dtype, other.dtype):\n if (\n isinstance(self, ABCMultiIndex)\n and not is_object_dtype(unpack_nested_dtype(other))\n and len(other) > 0\n ):\n raise NotImplementedError(\n \"Can only union MultiIndex with MultiIndex or Index of tuples, \"\n \"try mi.to_flat_index().union(other) instead.\"\n )\n self._deprecate_dti_setop(other, \"union\")\n\n dtype = self._find_common_type_compat(other)\n left = self.astype(dtype, copy=False)\n right = other.astype(dtype, copy=False)\n return left.union(right, sort=sort)\n\n elif not len(other) or self.equals(other):\n # NB: whether this (and the `if not len(self)` check below) come before\n # or after the is_dtype_equal check above affects the returned dtype\n return self._get_reconciled_name_object(other)\n\n elif not len(self):\n return other._get_reconciled_name_object(self)\n\n result = self._union(other, sort=sort)\n\n return self._wrap_setop_result(other, result)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 400, "n_words": 102, "vocab_size": 76, "complexity": 8, "nloc": 25, "token_counts": 186, "n_ast_nodes": 300, "n_identifiers": 27, "random_cut": "def union(self, other, sort=None):\n \n self._validate_sort_keyword(sort)\n self._assert_can_do_setop(other)\n other, result_name = self._convert_can_do_setop(other)\n\n if not is_dtype_equal(self.dtype, other.dtype):\n if (\n isinstance(self, ABCMultiIndex)\n and not is_object_dtype(unpack_nested_dtype(other))\n and len(other) > 0\n ):\n raise NotImplementedError(\n \"Can only union MultiIndex with MultiIndex or Index of tuples, \"\n \"try mi.to_flat_index().union(other) instead.\"\n )\n self._deprecate_dti_setop(other, \"union\")\n\n dtype = self._find_common_type_compat(other)\n left = self.astype(dtype, copy=False)\n right = other.astype(dtype, copy=False)\n return left.union(right, sort=sort)\n\n elif not len(other) or self.equals(other):\n # NB: whether this (and the `if not len(self)` check below) come before\n # or after the is_dtype_equal check above affects the returned dtype\n return self._get_reconciled_name_object(other)\n\n elif not len(self):\n return other._get_reconciled_name_object(self)\n\n result = self._union(other, sort=sort)\n\n return self._wrap_" }, { "id": 69547, "commit_id": "7bd06e6fbc30ebd92b18055983e3b88fa9545e2a", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/workstation_type/test_workstation_type.py", "file_name": "test_workstation_type.py", "fun_name": "create_workstation_type", "commit_message": "test: test case to check workstation type", "code": "def create_workstation_type(**args):\n\targs = frappe._dict(args)\n\n\tif workstation_type := frappe.db.exists(\"Workstation Type\", args.workstation_type):\n\t\treturn frappe.get_doc(\"Workstation Type\", workstation_type)\n\telse:\n\t\tdoc = frappe.new_doc(\"Workstation Type\")\n\t\tdoc.update(args)\n\t\tdoc.insert()\n\t\treturn doc\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 15, "n_words": 24, "vocab_size": 20, "complexity": 2, "nloc": 9, "token_counts": 62, "n_ast_nodes": 106, "n_identifiers": 12, "random_cut": "def create_workstation_type(**args):\n\targs = frappe._dict(args)\n\n\tif workstation_type := frappe.db.exists(\"Workstation Type\", args.workstation_type):\n\t\treturn frappe.get_doc(\"Workstation Type\", workstation_type)\n\telse:\n\t\tdoc = frappe.new_doc(\"Workstation Type\")\n\t\tdoc.update(args)\n\t\tdoc.insert()\n\t\tretur" }, { "id": 172728, "commit_id": "4545f4a20d9ff90b99bbd4e3e34b6de4441d6367", "repo": "calibre-web", "path": "cps/editbooks.py", "file_name": "editbooks.py", "fun_name": "edit_single_cc_data", "commit_message": "Better epub cover parsing with multiple cover-image items\nCode cosmetics\nrenamed variables\nrefactored xml page generation\nrefactored prepare author", "code": "def edit_single_cc_data(book_id, book, column_id, to_save):\n cc = (calibre_db.session.query(db.CustomColumns)\n .filter(db.CustomColumns.datatype.notin_(db.cc_exceptions))\n .filter(db.CustomColumns.id == column_id)\n .all())\n return edit_cc_data(book_id, book, to_save, cc)\n\n", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 50, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 67, "n_ast_nodes": 100, "n_identifiers": 18, "random_cut": "def edit_single_cc_data(book_id, book, column_id, to_save):\n cc = (calibre_db.session.query(db.CustomColumns)\n .filter(db.CustomColumns" }, { "id": 50763, "commit_id": "a6790a651a12eb391060e533868bf0ba197f6f7e", "repo": "PaddleHub", "path": "modules/image/text_to_image/stable_diffusion/diffusers/schedulers/scheduling_karras_ve.py", "file_name": "scheduling_karras_ve.py", "fun_name": "set_timesteps", "commit_message": "Add stable diffusion module", "code": "def set_timesteps(self, num_inference_steps):\n self.num_inference_steps = num_inference_steps\n self.timesteps = np.arange(0, self.num_inference_steps)[::-1].copy()\n self.schedule = [(self.sigma_max * (self.sigma_min**2 / self.sigma_max**2)**(i / (num_inference_steps - 1)))\n for i in self.timesteps]\n self.schedule = np.array(self.schedule, dtype=np.float32)\n\n self.set_format(tensor_format=self.tensor_format)\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 88, "n_words": 30, "vocab_size": 25, "complexity": 2, "nloc": 7, "token_counts": 104, "n_ast_nodes": 160, "n_identifiers": 16, "random_cut": "def set_timesteps(self, num_inference_steps):\n self.num_inference_steps = num_inference_steps\n self.timesteps = np.arange(0, self.num_inference_steps)[::-1].copy()\n self.schedule = [(self.sigma_max * (self.sigma_min**2 / self.sigma_max**2)**(i / (num_inference_steps - 1)))\n for i in self.timest" }, { "id": 35842, "commit_id": "d83d22f578276e9f201b0b3b0f8f9bd68e86c133", "repo": "transformers", "path": "tests/maskformer/test_feature_extraction_maskformer.py", "file_name": "test_feature_extraction_maskformer.py", "fun_name": "prepare_feat_extract_dict", "commit_message": "Maskformer (#15682)\n\n* maskformer\r\n\r\n* conflicts\r\n\r\n* conflicts\r\n\r\n* minor fixes\r\n\r\n* feature extractor test fix\r\n\r\nrefactor MaskFormerLoss following conversation\r\n\r\nMaskFormer related types should not trigger a module time import error\r\n\r\nmissed one\r\n\r\nremoved all the types that are not used\r\n\r\nupdate config mapping\r\n\r\nminor updates in the doc\r\n\r\nresolved conversation that doesn't need a discussion\r\n\r\nminor changes\r\n\r\nresolved conversations\r\n\r\nfixed DetrDecoder\r\n\r\n* minor changes\r\n\r\nminor changes\r\n\r\nfixed mdx file\r\n\r\ntest feature_extractor return types\r\n\r\nfunctional losses -> classes\r\n\r\nremoved the return type test for the feature extractor\r\n\r\nminor changes + style + quality\r\n\r\n* conflicts?\r\n\r\n* rebase master\r\n\r\n* readme\r\n\r\n* added missing files\r\n\r\n* deleded poolformers test that where in the wrong palce\r\n\r\n* CI\r\n\r\n* minor changes\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* resolved conversations\r\n\r\n* minor changes\r\n\r\n* conversations\r\n\r\n[Unispeech] Fix slow tests (#15818)\r\n\r\n* remove soundfile old way of loading audio\r\n\r\n* Adapt slow test\r\n\r\n[Barthez Tokenizer] Fix saving (#15815)\r\n\r\n[TFXLNet] Correct tf xlnet generate (#15822)\r\n\r\n* [TFXLNet] Correct tf xlnet\r\n\r\n* adapt test comment\r\n\r\nFix the push run (#15807)\r\n\r\nFix semantic segmentation pipeline test (#15826)\r\n\r\nFix dummy_inputs() to dummy_inputs in symbolic_trace doc (#15776)\r\n\r\nAdd model specific output classes to PoolFormer model docs (#15746)\r\n\r\n* Added model specific output classes to poolformer docs\r\n\r\n* Fixed Segformer typo in Poolformer docs\r\n\r\nAdding the option to return_timestamps on pure CTC ASR models. (#15792)\r\n\r\n* Adding the option to return_timestamps on pure CTC ASR models.\r\n\r\n* Remove `math.prod` which was introduced in Python 3.8\r\n\r\n* int are not floats.\r\n\r\n* Reworking the PR to support \"char\" vs \"word\" output.\r\n\r\n* Fixup!\r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Quality.\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\nHFTracer.trace should use/return self.graph to be compatible with torch.fx.Tracer (#15824)\r\n\r\nFix tf.concatenate + test past_key_values for TF models (#15774)\r\n\r\n* fix wrong method name tf.concatenate\r\n\r\n* add tests related to causal LM / decoder\r\n\r\n* make style and quality\r\n\r\n* clean-up\r\n\r\n* Fix TFBertModel's extended_attention_mask when past_key_values is provided\r\n\r\n* Fix tests\r\n\r\n* fix copies\r\n\r\n* More tf.int8 -> tf.int32 in TF test template\r\n\r\n* clean-up\r\n\r\n* Update TF test template\r\n\r\n* revert the previous commit + update the TF test template\r\n\r\n* Fix TF template extended_attention_mask when past_key_values is provided\r\n\r\n* Fix some styles manually\r\n\r\n* clean-up\r\n\r\n* Fix ValueError: too many values to unpack in the test\r\n\r\n* Fix more: too many values to unpack in the test\r\n\r\n* Add a comment for extended_attention_mask when there is past_key_values\r\n\r\n* Fix TFElectra extended_attention_mask when past_key_values is provided\r\n\r\n* Add tests to other TF models\r\n\r\n* Fix for TF Electra test: add prepare_config_and_inputs_for_decoder\r\n\r\n* Fix not passing training arg to lm_head in TFRobertaForCausalLM\r\n\r\n* Fix tests (with past) for TF Roberta\r\n\r\n* add testing for pask_key_values for TFElectra model\r\n\r\nCo-authored-by: ydshieh \r\n\r\n[examples/summarization and translation] fix readme (#15833)\r\n\r\nAdd ONNX Runtime quantization for text classification notebook (#15817)\r\n\r\nRe-enable doctests for the quicktour (#15828)\r\n\r\n* Re-enable doctests for the quicktour\r\n\r\n* Re-enable doctests for task_summary (#15830)\r\n\r\n* Remove &\r\n\r\nFramework split model report (#15825)\r\n\r\nAdd TFConvNextModel (#15750)\r\n\r\n* feat: initial implementation of convnext in tensorflow.\r\n\r\n* fix: sample code for the classification model.\r\n\r\n* chore: added checked for from the classification model.\r\n\r\n* chore: set bias initializer in the classification head.\r\n\r\n* chore: updated license terms.\r\n\r\n* chore: removed ununsed imports\r\n\r\n* feat: enabled argument during using drop_path.\r\n\r\n* chore: replaced tf.identity with layers.Activation(linear).\r\n\r\n* chore: edited default checkpoint.\r\n\r\n* fix: minor bugs in the initializations.\r\n\r\n* partial-fix: tf model errors for loading pretrained pt weights.\r\n\r\n* partial-fix: call method updated\r\n\r\n* partial-fix: cross loading of weights (4x3 variables to be matched)\r\n\r\n* chore: removed unneeded comment.\r\n\r\n* removed playground.py\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: renaming TFConvNextStage conv and layer norm layers\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* add: tests for convnext.\r\n\r\n* fix: integration tester class.\r\n\r\n* fix: issues mentioned in pr feedback (round 1).\r\n\r\n* fix: how output_hidden_states arg is propoagated inside the network.\r\n\r\n* feat: handling of arg for pure cnn models.\r\n\r\n* chore: added a note on equal contribution in model docs.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* feat: encapsulation for the convnext trunk.\r\n\r\n* Fix variable naming; Test-related corrections; Run make fixup\r\n\r\n* chore: added Joao as a contributor to convnext.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: corrected copyright year and added comment on NHWC.\r\n\r\n* chore: fixed the black version and ran formatting.\r\n\r\n* chore: ran make style.\r\n\r\n* chore: removed from_pt argument from test, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: tests in the convnext subclass, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: moved convnext test to the correct location\r\n\r\n* fix: locations for the test file of convnext.\r\n\r\n* fix: convnext tests.\r\n\r\n* chore: applied sgugger's suggestion for dealing w/ output_attentions.\r\n\r\n* chore: added comments.\r\n\r\n* chore: applied updated quality enviornment style.\r\n\r\n* chore: applied formatting with quality enviornment.\r\n\r\n* chore: revert to the previous tests/test_modeling_common.py.\r\n\r\n* chore: revert to the original test_modeling_common.py\r\n\r\n* chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py\r\n\r\n* fix: tests for convnext.\r\n\r\n* chore: removed output_attentions argument from convnext config.\r\n\r\n* chore: revert to the earlier tf utils.\r\n\r\n* fix: output shapes of the hidden states\r\n\r\n* chore: removed unnecessary comment\r\n\r\n* chore: reverting to the right test_modeling_tf_common.py.\r\n\r\n* Styling nits\r\n\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger \r\n\r\n* minor changes\r\n\r\n* doc fix in feature extractor\r\n\r\n* doc\r\n\r\n* typose\r\n\r\n* removed detr logic from config\r\n\r\n* removed detr logic from config\r\n\r\n* removed num_labels\r\n\r\n* small fix in the config\r\n\r\n* auxilary -> auxiliary\r\n\r\n* make style\r\n\r\n* some test is failing\r\n\r\n* fix a weird char in config prevending doc-builder\r\n\r\n* retry to fix the doc-builder issue\r\n\r\n* make style\r\n\r\n* new try to fix the doc builder\r\n\r\n* CI\r\n\r\n* change weights to facebook\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger ", "code": "def prepare_feat_extract_dict(self):\n return {\n \"do_resize\": self.do_resize,\n \"size\": self.size,\n \"max_size\": self.max_size,\n \"do_normalize\": self.do_normalize,\n \"image_mean\": self.image_mean,\n \"image_std\": self.image_std,\n \"size_divisibility\": self.size_divisibility,\n }\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 109, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 10, "token_counts": 50, "n_ast_nodes": 84, "n_identifiers": 9, "random_cut": "def prepare_feat_extract_dict(self):\n return {\n \"do_resize\": self.do_resize,\n \"size\": self.size,\n \"max_size\": self.max_size,\n \"do_no" }, { "id": 253030, "commit_id": "db7074a37dd9d696b6ca085df90a9c84d434f97a", "repo": "mitmproxy", "path": "test/mitmproxy/tools/console/test_statusbar.py", "file_name": "test_statusbar.py", "fun_name": "test_shorten_message", "commit_message": "clean up `statusbar.py`", "code": "def test_shorten_message(message, ready_message):\n assert statusbar.shorten_message(message, max_width=30) == ready_message\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 10, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 30, "n_identifiers": 6, "random_cut": "def test_shorten_message(message, ready_message):\n assert statusbar.shorten_message(message, max_width=30) == ready_message\n\n" }, { "id": 121183, "commit_id": "09ba51f323a2675d5f31b3a8829f7dcd7d989e24", "repo": "jax", "path": "jax/interpreters/pxla.py", "file_name": "pxla.py", "fun_name": "_check_gda_or_array_xla_sharding_match", "commit_message": "Move _get_array_mapping from gda.py to pxla.py\n\nPiperOrigin-RevId: 459891853", "code": "def _check_gda_or_array_xla_sharding_match(args, in_array_mappings):\n from jax.experimental.global_device_array import GlobalDeviceArray\n from jax.experimental.array import Array\n\n for arg, inp_array_mapping in safe_zip(args, in_array_mappings):\n if not isinstance(arg, (GlobalDeviceArray, Array)):\n continue\n # TODO(yashkatariya): For `Array` check the `sharding` directly when pxla\n # takes sharding instances.\n arr_type, arr_mapping = (\n ('GDA', _get_array_mapping(arg.mesh_axes)) if isinstance(arg, GlobalDeviceArray)\n else ('Array', _get_array_mapping(arg.sharding.spec))\n )\n if inp_array_mapping != arr_mapping:\n raise ValueError(\n f\"{arr_type} sharding does not match the input sharding. \"\n f\"Got {arr_type} spec: {array_mapping_to_axis_resources(arr_mapping)} and \"\n f\"auto sharding spec: {array_mapping_to_axis_resources(inp_array_mapping)} \"\n f\"for {arr_type}: {arg}\")\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 159, "n_words": 79, "vocab_size": 64, "complexity": 5, "nloc": 16, "token_counts": 102, "n_ast_nodes": 191, "n_identifiers": 21, "random_cut": "def _check_gda_or_array_xla_sharding_match(args, in_array_mappings):" }, { "id": 66407, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/operation/operation_dashboard.py", "file_name": "operation_dashboard.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data():\n\treturn {\n\t\t\"fieldname\": \"operation\",\n\t\t\"transactions\": [{\"label\": _(\"Manufacture\"), \"items\": [\"BOM\", \"Work Order\", \"Job Card\"]}],\n\t}\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 11, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 34, "n_ast_nodes": 67, "n_identifiers": 2, "random_cut": "def get_data():\n\treturn {\n\t\t\"fieldname\": \"operation\",\n\t\t\"transactions\": [{\"label\": _(\"Manuf" }, { "id": 3908, "commit_id": "1e0ac30ebdcfce55a5644bcd486044da45c93dd6", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-orb/unit_tests/test_source.py", "file_name": "test_source.py", "fun_name": "test_check_connection_fail", "commit_message": "🎉 New Source: Orb (#9985)\n\n* V1 of source_orb connector\r\n\r\n* add boostrap.md file\r\n\r\n* add clause on Pagination to bootstrap.md\r\n\r\n* add SUMMARY documentation\r\n\r\n* add lookback_window_days connector parameter\r\n\r\n* Add support for start_date parameter\r\n\r\n* Add ability to transform record in order to un-nest IDs\r\n\r\n* Add support for extracting event properties based on connector configuration", "code": "def test_check_connection_fail(mocker):\n responses.add(responses.GET, \"https://api.billwithorb.com/v1/ping\", json={\"error\": \"Unauthorized\"}, status=401)\n source = SourceOrb()\n logger_mock = MagicMock()\n (ok, err) = source.check_connection(logger_mock, MagicMock())\n assert (ok, type(err)) == (False, HTTPError)\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 38, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 6, "token_counts": 68, "n_ast_nodes": 109, "n_identifiers": 16, "random_cut": "def test_check_connection_fail(mocker):\n responses.a" }, { "id": 11289, "commit_id": "2efe175c975975532f6e3fd326ed280addf20eba", "repo": "jina", "path": "tests/integration/pods/test_pod.py", "file_name": "test_pod.py", "fun_name": "test_pods_with_replicas_advance_faster", "commit_message": "fix: return responses (#4343)", "code": "async def test_pods_with_replicas_advance_faster(port_generator):\n head_port = port_generator()\n port_expose = port_generator()\n graph_description = '{\"start-gateway\": [\"pod0\"], \"pod0\": [\"end-gateway\"]}'\n pod_addresses = f'{{\"pod0\": [\"0.0.0.0:{head_port}\"]}}'\n\n # create a single head pod\n head_pod = _create_head_pod(head_port, 'head')\n head_pod.start()\n\n # create a single gateway pod\n gateway_pod = _create_gateway_pod(graph_description, pod_addresses, port_expose)\n gateway_pod.start()\n\n # create the shards\n pods = []\n for i in range(10):\n # create worker\n worker_port = port_generator()\n # create a single worker pod\n worker_pod = _create_worker_pod(worker_port, f'pod0/{i}', 'FastSlowExecutor')\n pods.append(worker_pod)\n worker_pod.start()\n\n await asyncio.sleep(0.1)\n\n head_pod.wait_start_success()\n gateway_pod.wait_start_success()\n for pod in pods:\n # this would be done by the Pod, its adding the worker to the head\n pod.wait_start_success()\n activate_msg = ControlRequest(command='ACTIVATE')\n activate_msg.add_related_entity('worker', '127.0.0.1', pod.args.port_in)\n GrpcConnectionPool.send_request_sync(activate_msg, f'127.0.0.1:{head_port}')\n\n c = Client(return_responses=True, host='localhost', port=port_expose, asyncio=True)\n input_docs = [Document(text='slow'), Document(text='fast')]\n responses = c.post('/', inputs=input_docs, request_size=1, return_results=True)\n response_list = []", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 266, "n_words": 123, "vocab_size": 83, "complexity": 6, "nloc": 38, "token_counts": 291, "n_ast_nodes": 369, "n_identifiers": 44, "random_cut": "async def test_pods_with_replicas_advance_faster(port_generator):\n head_port = port_generator()\n port_expose = port_generator()\n graph_description = '{\"start-gateway\": [\"pod0\"], \"pod0\": [\"end-gateway\"]}'\n pod_addresses = f'{{\"pod0\": [\"0.0.0.0:{head_port}\"]}}'\n\n # create a single head pod\n head_pod =" }, { "id": 6961, "commit_id": "2471b6b3f925303f337e0de9ded2cba8e23c9be9", "repo": "ludwig", "path": "ludwig/utils/data_utils.py", "file_name": "data_utils.py", "fun_name": "use_credentials", "commit_message": "fix: restore existing credentials when exiting use_credentials context manager (#2112)", "code": "def use_credentials(creds):\n if creds is None:\n with contextlib.nullcontext():\n yield\n return\n\n # https://filesystem-spec.readthedocs.io/en/latest/features.html#configuration\n # This allows us to avoid having to plumb the `storage_options` kwargs through\n # every remote FS call in Ludwig. This implementation is restricted to one thread\n # in the process acquiring the lock at once.\n with GLOBAL_CRED_LOCK:\n with tempfile.TemporaryDirectory() as tmpdir:\n fname = os.path.join(tmpdir, \"conf.json\")\n with open(fname, \"w\") as f:\n json.dump(creds, f)\n\n # Backup any existing credentials\n old_conf = dict(**conf)\n\n conf.clear()\n set_conf_files(tmpdir, conf)\n try:\n yield\n finally:\n # Restore previous credentials\n with open(fname, \"w\") as f:\n json.dump(old_conf, f)\n conf.clear()\n set_conf_files(tmpdir, conf)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 343, "n_words": 93, "vocab_size": 65, "complexity": 3, "nloc": 20, "token_counts": 113, "n_ast_nodes": 210, "n_identifiers": 21, "random_cut": "def use_credentials(creds):\n if creds is None:\n with contextlib.nullcontext():\n " }, { "id": 254926, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/quantizelinear.py", "file_name": "quantizelinear.py", "fun_name": "export", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export() -> None:\n node = onnx.helper.make_node('QuantizeLinear',\n inputs=['x', 'y_scale', 'y_zero_point'],\n outputs=['y'],)\n\n x = np.array([0, 2, 3, 1000, -254, -1000]).astype(np.float32)\n y_scale = np.float32(2)\n y_zero_point = np.uint8(128)\n y = np.array([128, 129, 130, 255, 1, 0]).astype(np.uint8)\n\n expect(node, inputs=[x, y_scale, y_zero_point], outputs=[y],\n name='test_quantizelinear')\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 166, "n_words": 39, "vocab_size": 35, "complexity": 1, "nloc": 10, "token_counts": 129, "n_ast_nodes": 196, "n_identifiers": 18, "random_cut": "def export() -> None:\n node = onnx.helper.make_node(" }, { "id": 8894, "commit_id": "4b3c8211b3e3eca5f9fdf6553bbd45c9c7587b0d", "repo": "insightface", "path": "body/human_pose/ambiguity_aware/lib/core/config.py", "file_name": "config.py", "fun_name": "_update_dict", "commit_message": "update", "code": "def _update_dict(k, v):\n for vk, vv in v.items():\n if vk in config[k]:\n config[k][vk] = vv\n else:\n raise ValueError(\"{}.{} not exist in config.py\".format(k, vk))\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 61, "n_words": 23, "vocab_size": 20, "complexity": 3, "nloc": 6, "token_counts": 49, "n_ast_nodes": 77, "n_identifiers": 9, "random_cut": "def _update_dict(k, v):\n for vk, vv in v.items():\n if vk in config[k]:\n config[k][vk] = vv" }, { "id": 19858, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/commands/search.py", "file_name": "search.py", "fun_name": "add_options", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"-i\",\n \"--index\",\n dest=\"index\",\n metavar=\"URL\",\n default=PyPI.pypi_url,\n help=\"Base URL of Python Package Index (default %default)\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 107, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 10, "token_counts": 48, "n_ast_nodes": 79, "n_identifiers": 12, "random_cut": "def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"-i\",\n \"--index\",\n dest=\"index\",\n metavar=\"URL\",\n default=PyPI.pypi_url," }, { "id": 259086, "commit_id": "2e213c618841f3635885bab034606512c40a7fd4", "repo": "scikit-learn", "path": "sklearn/decomposition/tests/test_pca.py", "file_name": "test_pca.py", "fun_name": "test_variance_correctness", "commit_message": "FIX Reduces memory usage of `PCA.transform` (#22553)", "code": "def test_variance_correctness(copy):\n \n rng = np.random.RandomState(0)\n X = rng.randn(1000, 200)\n pca = PCA().fit(X)\n pca_var = pca.explained_variance_ / pca.explained_variance_ratio_\n true_var = np.var(X, ddof=1, axis=0).sum()\n np.testing.assert_allclose(pca_var, true_var)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 7, "token_counts": 75, "n_ast_nodes": 120, "n_identifiers": 21, "random_cut": "def test_variance_correctness(copy):\n \n rng = np.random.RandomState(0)\n X = rng.randn(1000, 200)\n pca = PCA().fit(X)\n pca_var = pca.explained_variance_ / pca.exp" }, { "id": 124115, "commit_id": "8fc340967654a09cfe00abfc325471258ea5b4e8", "repo": "ray", "path": "dashboard/modules/snapshot/snapshot_head.py", "file_name": "snapshot_head.py", "fun_name": "get_component_activities", "commit_message": "[dashboard] Add `component_activities` API (#25996)\n\nAdd /api/component_activities to the dashboard snapshot router which returns whether various Ray components are considered active\r\nThis currently only contains a response entry for drivers, but will add entries for other components on request as followups", "code": "async def get_component_activities(self, req) -> aiohttp.web.Response:\n # Get activity information for driver\n timeout = req.query.get(\"timeout\", None)\n if timeout and timeout.isdigit():\n timeout = int(timeout)\n else:\n timeout = 5\n\n driver_activity_info = await self._get_job_activity_info(timeout=timeout)\n\n resp = {\"driver\": dataclasses.asdict(driver_activity_info)}\n return aiohttp.web.Response(\n text=json.dumps(resp),\n content_type=\"application/json\",\n status=aiohttp.web.HTTPOk.status_code,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 151, "n_words": 41, "vocab_size": 34, "complexity": 3, "nloc": 13, "token_counts": 99, "n_ast_nodes": 162, "n_identifiers": 23, "random_cut": "async def get_component_activities(self, req) -> aiohttp.web.Response:\n # Get activity information for driver\n timeout = req.query.get(\"timeout\", None)\n if timeout and timeout.isdigit():\n timeout = int(timeout)\n else:\n timeout = 5\n\n dr" }, { "id": 194804, "commit_id": "5322cd4f5821e339bf1edab98d93b5a008b97a2b", "repo": "ParlAI", "path": "parlai/core/build_data.py", "file_name": "build_data.py", "fun_name": "download_from_google_drive", "commit_message": "[circle] Fixing broken unit tests (#4343)", "code": "def download_from_google_drive(gd_id, destination):\n \n URL = 'https://docs.google.com/uc?export=download'\n\n with get_http_session() as session:\n response = session.get(URL, params={'id': gd_id}, stream=True)\n token = _get_confirm_token(response) or 't'\n\n if token:\n response.close()\n params = {'id': gd_id, 'confirm': token}\n response = session.get(URL, params=params, stream=True)\n\n CHUNK_SIZE = 32768\n with PathManager.open(destination, 'wb') as f:\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n response.close()\n\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 187, "n_words": 57, "vocab_size": 45, "complexity": 5, "nloc": 15, "token_counts": 120, "n_ast_nodes": 208, "n_identifiers": 20, "random_cut": "def download_from_google_drive(gd_id, destination):\n \n URL = 'https://docs.google.com/uc?export=download'\n\n with get_http_session() as session:\n response = session.get(URL, params={'id': gd_id}, stream=True)\n token = _get_confirm_token(response) or 't'\n\n if token:\n response.close()\n params = {'id': gd_id, 'confirm': token}\n response = session.get(URL, p" }, { "id": 93398, "commit_id": "e1482001662b446c7c2be7c9daa19cba562c615c", "repo": "sentry", "path": "tests/sentry/snuba/test_tasks.py", "file_name": "test_tasks.py", "fun_name": "test_event_types", "commit_message": "refs(metric_alerts): Consolidate `QueryDatasets` and `Dataset` (#36894)\n\nThis refactor pr removes `QueryDatasets` and just uses `Dataset` everywhere. `QueryDatasets` existed\r\nbefore `Dataset`, but `Dataset` is now more widely used and is more up to date. The values here are\r\nthe same, `Dataset` just supports a few more datasets.\r\n\r\nWe already make sure that only datasets that are valid for alerts can be passed to the alert rules\r\napi, so this won't allow people to attempt to create alerts on datasets that don't support them.", "code": "def test_event_types(self):\n self.create_release(self.project, version=\"something\")\n expected_conditions = [\n And(\n [\n Or(\n [\n Condition(Column(name=\"type\"), Op.EQ, \"error\"),\n Condition(Column(name=\"type\"), Op.EQ, \"default\"),\n ]\n ),\n Or(\n [\n Condition(\n Function(\n \"ifNull\", parameters=[Column(name=\"tags[sentry:release]\"), \"\"]\n ),\n Op.IN,\n [\"something\"],\n ),\n Condition(\n Function(\n \"ifNull\", parameters=[Column(name=\"tags[sentry:release]\"), \"\"]\n ),\n Op.IN,\n [\"123\"],\n ),\n ]\n ),\n ]\n ),\n Condition(Column(name=\"project_id\"), Op.IN, (self.project.id,)),\n ]\n self.run_test(\n SnubaQuery.Type.ERROR,\n Dataset.Events,\n \"count_unique(user)\",\n \"release:latest OR release:123\",\n expected_conditions,\n entity_extra_fields={\n \"event_types\": [\n SnubaQueryEventType.EventType.ERROR,\n SnubaQueryEventType.EventType.DEFAULT,\n ]\n },\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 953, "n_words": 63, "vocab_size": 39, "complexity": 1, "nloc": 46, "token_counts": 196, "n_ast_nodes": 310, "n_identifiers": 27, "random_cut": "def test_event_types(self):\n self.create_release(self.project, version=\"something\")\n expected_conditions = [\n And(\n [\n Or(\n [\n Condition" }, { "id": 110278, "commit_id": "9b6abd0b4933811e0a45c2535ab8fd107db65dd9", "repo": "matplotlib", "path": "lib/matplotlib/figure.py", "file_name": "figure.py", "fun_name": "add_artist", "commit_message": "DOC: improve grammar and consistency", "code": "def add_artist(self, artist, clip=False):\n \n artist.set_figure(self)\n self.artists.append(artist)\n artist._remove_method = self.artists.remove\n\n if not artist.is_transform_set():\n artist.set_transform(self.transSubfigure)\n\n if clip:\n artist.set_clip_path(self.patch)\n\n self.stale = True\n return artist\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 99, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 10, "token_counts": 69, "n_ast_nodes": 113, "n_identifiers": 15, "random_cut": "def add_artist(self, artist, clip=False):\n \n artist.set_figure(self)\n self.artists.append(artist)\n artist._remove_method = self.artists.remove\n\n if not artist.is_transform_set():\n " }, { "id": 265290, "commit_id": "4bb4bbce1461bee0644e97900006e3fe6d71a3e4", "repo": "netbox", "path": "netbox/ipam/forms/models.py", "file_name": "models.py", "fun_name": "clean", "commit_message": "Clean up validation", "code": "def clean(self):\n super().clean()\n\n interface = self.cleaned_data.get('interface')\n vminterface = self.cleaned_data.get('vminterface')\n vlan = self.cleaned_data.get('vlan')\n\n if not (interface or vminterface or vlan):\n raise ValidationError('A termination must specify an interface or VLAN.')\n if len([x for x in (interface, vminterface, vlan) if x]) > 1:\n raise ValidationError('A termination can only have one terminating object (an interface or VLAN).')\n\n self.instance.assigned_object = interface or vminterface or vlan\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 130, "n_words": 60, "vocab_size": 41, "complexity": 9, "nloc": 10, "token_counts": 95, "n_ast_nodes": 160, "n_identifiers": 13, "random_cut": "def clean(self):\n super().clean()\n\n interface = self.cleaned_data.get('interface')\n vminterface = self.cleaned_data.get('vminterface')\n vlan = self.cleaned_data.get('vlan')\n\n if not (interface or vminterface or vlan):\n raise ValidationError('A termination must specify an inter" }, { "id": 251338, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/connection.py", "file_name": "connection.py", "fun_name": "__setattr__", "commit_message": "make it black!", "code": "def __setattr__(self, name, value):\n if name in (\"address\", \"via\"):\n connection_open = (\n self.__dict__.get(\"state\", ConnectionState.CLOSED)\n is ConnectionState.OPEN\n )\n # assigning the current value is okay, that may be an artifact of calling .set_state().\n attr_changed = self.__dict__.get(name) != value\n if connection_open and attr_changed:\n raise RuntimeError(f\"Cannot change server.{name} on open connection.\")\n return super().__setattr__(name, value)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 164, "n_words": 51, "vocab_size": 46, "complexity": 4, "nloc": 10, "token_counts": 72, "n_ast_nodes": 121, "n_identifiers": 13, "random_cut": "def __setattr__(self, name, value):\n if name in (\"address\", \"via\"):\n connection_open = (\n self.__dict__.get(\"state\", ConnectionState.CLOSED)\n is ConnectionState.OPEN\n )\n # assigning the current value is okay, that may be an artifact of calling .set_state().\n attr_changed = self.__dict__.get(name) != value\n if connection_open and attr_changed:\n " }, { "id": 205473, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/expressions.py", "file_name": "expressions.py", "fun_name": "as_sql", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def as_sql(self, *args, **kwargs):\n raise ValueError(\n \"This queryset contains a reference to an outer query and may \"\n \"only be used in a subquery.\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 60, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 5, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def as_sql(self, *args, **kwargs):\n raise ValueError(\n \"This queryset contains a" }, { "id": 27367, "commit_id": "20675c93d276101412b22439794d645a0a2fb3bd", "repo": "saleor", "path": "saleor/graphql/app/types.py", "file_name": "types.py", "fun_name": "has_access_to_app_public_meta", "commit_message": "Improved metadata permissions for apps (#9726)\n\n* Add improvment for handling permissions for apps\r\n\r\n* Fix failing tests\r\n\r\n* Apply changes after review\r\n\r\n* Add missing space in query description\r\n\r\n* Use permission name fields\r\n\r\n* Apply changes after review\r\n\r\n* Use + instead of f for field descriptions", "code": "def has_access_to_app_public_meta(root, info) -> bool:\n auth_token = info.context.decoded_auth_token or {}\n if auth_token.get(\"type\") == JWT_THIRDPARTY_ACCESS_TYPE:\n _, app_id = from_global_id_or_error(auth_token[\"app\"], \"App\")\n else:\n app_id = info.context.app.id if info.context.app else None\n if app_id is not None and int(app_id) == root.id:\n return True\n requester = get_user_or_app_from_context(info.context)\n return requester.has_perm(AppPermission.MANAGE_APPS)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 81, "n_words": 43, "vocab_size": 33, "complexity": 6, "nloc": 10, "token_counts": 95, "n_ast_nodes": 153, "n_identifiers": 20, "random_cut": "def has_access_to_app_public_meta(root, info) -> bool:\n auth_token = info.context.decoded_auth_token or {}\n if auth_token.get(\"type\") == JWT_THIRDPARTY_ACCESS_TYPE:\n _, app_id = from_global_id_or_error(auth_token[\"app\"], \"App\")\n else:\n app_id = info.context.app.id if info.context.app else None\n if app_id is not None and int(app_id) == root.id:\n return True\n requester = get_user_or_app_from_context(info.context)\n return requester.has_perm(AppPermission.MANAGE_APPS)\n\n" }, { "id": 28777, "commit_id": "ac2d4ac172d37dd8e866b679b1a6538745b43c2b", "repo": "saleor", "path": "saleor/graphql/order/mutations/order_confirm.py", "file_name": "order_confirm.py", "fun_name": "perform_mutation", "commit_message": "Use dataloader for plugin manager (#10581)\n\n* Use dataloader for plugin manager\r\n\r\n* Temporary fix for context fixtures\r\n\r\n* Change layer where inactive user is rejected during auth\r\n\r\n* Missed merge\r\n\r\n* Rename 'load_pllugins' to 'load_plugin_manager'\r\n\r\n* Refactor middleware tests that turned to dataloaders tests to separate file\r\n\r\n* Use anonymous dataloader, remove TODOs\r\n\r\n* Use relative imports", "code": "def perform_mutation(cls, root, info, **data):\n order = cls.get_instance(info, **data)\n order.status = OrderStatus.UNFULFILLED\n order.save(update_fields=[\"status\", \"updated_at\"])\n order_info = fetch_order_info(order)\n payment = order_info.payment\n manager = load_plugin_manager(info.context)\n app = load_app(info.context)\n\n if payment_transactions := list(order.payment_transactions.all()):\n try:\n # We use the last transaction as we don't have a possibility to\n # provide way of handling multiple transaction here\n payment_transaction = payment_transactions[-1]\n request_charge_action(\n transaction=payment_transaction,\n manager=manager,\n charge_value=payment_transaction.authorized_value,\n channel_slug=order.channel.slug,\n user=info.context.user,\n app=app,\n )\n except PaymentError as e:\n raise ValidationError(\n str(e),\n code=OrderErrorCode.MISSING_TRANSACTION_ACTION_REQUEST_WEBHOOK,\n )\n elif payment and payment.is_authorized and payment.can_capture():\n gateway.capture(payment, manager, channel_slug=order.channel.slug)\n site = load_site(info.context)\n transaction.on_commit(\n lambda: order_captured(\n order_info,\n info.context.user,\n app,\n payment.total,\n payment,\n manager,\n site.settings,\n )\n )\n transaction.on_commit(\n lambda: order_confirmed(\n order,\n info.context.user,\n app,\n manager,\n send_confirmation_email=True,\n )\n )\n return OrderConfirm(order=order)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 774, "n_words": 108, "vocab_size": 85, "complexity": 6, "nloc": 48, "token_counts": 251, "n_ast_nodes": 380, "n_identifiers": 52, "random_cut": "def perform_mutation(cls, root, info, **data):\n order = cls.get_instance(info, **data)\n order.status = OrderStatus.UNFULFILLED\n order.save(update_fields=[\"status\", \"updated_at\"])\n order_info = fetch_order_info(order)\n payment = order_info.payment\n manager = load_plugin_manager(info.context)\n app = load_app(info.context)\n\n if payment_transactions := list(order.payment_transactions.all()):\n try:\n # We use the last transaction as we don't have a possibility to\n # provide way of handling multiple transaction here\n payment_transaction = payment_transactions[-1]\n request_charge_action(\n transaction=payment_transaction,\n manager=manager,\n charge_value=payment_transaction.authorized_value,\n channel_slug=order.channel.slug,\n user=info.context.user,\n app=app,\n )\n except PaymentError as e:\n raise ValidationError(\n str(e),\n code=OrderErrorCode.MISSING_TRANSACTION_ACTION_REQUEST_WEBHOOK,\n )\n elif payment and payment.is_authorized and payment.can_capture():\n gateway.capture(payment, manager, channel_slug=order.channel.slug)\n site = load_" }, { "id": 199275, "commit_id": "efb5f1f2b5a90d9542a4b4be7af75c9af079fa92", "repo": "sympy", "path": "sympy/physics/continuum_mechanics/beam.py", "file_name": "beam.py", "fun_name": "apply_moment_load", "commit_message": "Added solving for torsion for circular cross-sections", "code": "def apply_moment_load(self, value, start, order, dir=\"y\"):\n \n x = self.variable\n value = sympify(value)\n start = sympify(start)\n order = sympify(order)\n\n if dir == \"x\":\n if not order == -2:\n self._moment_load_vector[0] += value\n else:\n if start in list(self._torsion_moment):\n self._torsion_moment[start] += value\n else:\n self._torsion_moment[start] = value\n self._load_Singularity[0] += value*SingularityFunction(x, start, order)\n elif dir == \"y\":\n if not order == -2:\n self._moment_load_vector[1] += value\n self._load_Singularity[0] += value*SingularityFunction(x, start, order)\n else:\n if not order == -2:\n self._moment_load_vector[2] += value\n self._load_Singularity[0] += value*SingularityFunction(x, start, order)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 324, "n_words": 78, "vocab_size": 34, "complexity": 7, "nloc": 22, "token_counts": 177, "n_ast_nodes": 280, "n_identifiers": 14, "random_cut": "def apply_moment_load(self, value, start, order, dir=\"y\"):\n \n x = self.variable\n value = sympify(value)\n start = sympify(start)\n order = sympify(order)\n\n if dir == \"x\":\n if not order == -2:\n self._moment_load_vector[0] += value\n else:\n if start in list(self._torsion_moment):\n self._torsion_moment[start] += value\n else:\n self._torsion_moment[start] = value\n self._load_Singularity[0] += value*SingularityFunction(x, start, order)\n elif dir == \"y\":\n if not order == -2:\n self._moment_load_vector[1] += value\n self._load_Singularity[0] += value*SingularityFunction(x, start, order)\n else:\n if not order == -2:\n self._moment_load_vector[2] += value\n self._load_Singularity[0] += value*SingularityFunction(x, start, order)\n" }, { "id": 321830, "commit_id": "f7753550f2c1dcb2348e4779fd5287166754827e", "repo": "qutebrowser", "path": "tests/unit/keyinput/test_keyutils.py", "file_name": "test_keyutils.py", "fun_name": "test_is_special", "commit_message": "keyutils: Move public functions to KeyInfo\n\nThis avoids the temptation of creating a Qt.Key() manually, which needs\nto be checked for ValueError with PyQt 6.2 due to its handling of unknown enum\nvalues.\n\nThis is exactly what happened in RegisterKeyParser, which caused such a\nValueError:\nhttps://github.com/qutebrowser/qutebrowser/issues/7047#issuecomment-1163288560\n\nCloses #7047", "code": "def test_is_special(key, modifiers, special):\n assert keyutils.KeyInfo(key, modifiers).is_special() == special\n\n\n@pytest.mark.parametrize('key, ismodifier', [\n (Qt.Key.Key_Control, True),\n (Qt.Key.Key_X, False),\n (Qt.Key.Key_Super_L, False), # Modifier but not in _MODIFIER_MAP\n])", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('key, ismodifier', [\n (Qt.Key.Key_Control, True),\n (Qt.Key.Key_X, False),\n (Qt.Key.Key_Super_L, False), # Modifier but not in _MODIFIER_MAP\n])", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 35, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 97, "n_identifiers": 15, "random_cut": "def test_is_special(key, modifiers, special):\n assert keyutils.KeyInfo(key, modifiers).is_special() == special\n\n\n@pytest.mark.parametrize('key, ismodifier', [\n (Qt.Key.Key_Control, True),\n (Qt.Key.Key_X, False),\n (Qt.Key.Key_Super_L, False), # Modifier but not in _MODIFIER_MAP\n])" }, { "id": 93303, "commit_id": "5ceaca63890c6c660b4d061e800ccbf8c90c6e20", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_mep.py", "file_name": "test_organization_events_mep.py", "fun_name": "test_non_metrics_tag_with_implicit_format_metrics_dataset", "commit_message": "ref(MEP): Add new option to query tag values as strings from clickhouse (#36397)\n\nCo-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>", "code": "def test_non_metrics_tag_with_implicit_format_metrics_dataset(self):\n self.store_transaction_metric(\n 1,\n tags={\"environment\": \"staging\", \"transaction\": \"foo_transaction\"},\n timestamp=self.min_ago,\n )\n\n response = self.do_request(\n {\n \"field\": [\"test\", \"p50(transaction.duration)\"],\n \"query\": \"event.type:transaction\",\n \"dataset\": \"metrics\",\n \"per_page\": 50,\n }\n )\n assert response.status_code == 400, response.content\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 179, "n_words": 30, "vocab_size": 29, "complexity": 1, "nloc": 15, "token_counts": 69, "n_ast_nodes": 122, "n_identifiers": 10, "random_cut": "def test_non_metrics_tag_with_implicit_format_metrics_dataset(self):\n self.store_transaction_metric(\n 1,\n tags={\"environment\": \"staging\", \"transaction\": \"foo_transaction\"},\n timestamp=self.min_ago,\n )\n\n response = self.do_request(\n {\n \"field\": [\"test\", \"p50(transaction.duration)\"],\n \"query\": \"event.type:transaction\",\n \"dataset\": \"metrics\",\n \"per_page\": 50,\n }\n )\n assert response.status_code == 400, response.content\n" }, { "id": 222798, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/sdist.py", "file_name": "sdist.py", "fun_name": "_add_defaults_optional", "commit_message": "add python 3.10.4 for windows", "code": "def _add_defaults_optional(self):\n optional = ['test/test*.py', 'setup.cfg']\n for pattern in optional:\n files = filter(os.path.isfile, glob(pattern))\n self.filelist.extend(files)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 50, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 40, "n_ast_nodes": 66, "n_identifiers": 12, "random_cut": "def _add_defaults_optional(self):\n optional = ['test/test*.py'," }, { "id": 101460, "commit_id": "13cfb3f39e72e9ca181f173b7b3db2a048db0d08", "repo": "faceswap", "path": "plugins/extract/pipeline.py", "file_name": "pipeline.py", "fun_name": "input_queue", "commit_message": "extract: Add batch processing mode", "code": "def input_queue(self) -> EventQueue:\n \n qname = f\"extract{self._instance}_{self._current_phase[0]}_in\"\n retval = self._queues[qname]\n logger.trace(\"%s: %s\", qname, retval) # type: ignore\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 55, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 16, "token_counts": 32, "n_ast_nodes": 73, "n_identifiers": 10, "random_cut": "def input_queue(self) -> EventQueue:\n \n qname = f\"extract{self._instance}_{self._current_phase[0]}_in\"\n retval = self._queu" }, { "id": 207682, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_custom_admin_site_view", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_custom_admin_site_view(self):\n self.client.force_login(self.superuser)\n response = self.client.get(reverse(\"admin2:my_view\"))\n self.assertEqual(response.content, b\"Django is a magical pony!\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 10, "random_cut": "def test_custom_admin_site_view(self):\n self.client." }, { "id": 169146, "commit_id": "12dce19a74b7cd5badad0f61ca079b873c1b6089", "repo": "pandas", "path": "pandas/tests/extension/test_arrow.py", "file_name": "test_arrow.py", "fun_name": "test_setitem_mask_aligned", "commit_message": "BUG/TST: fix a bunch of arraymanager+pyarrow tests (#48428)\n\n* BUG/TST: fix a bunch of arraymanager+pyarrow tests\r\n\r\n* remove unnecessary using_array_manager", "code": "def test_setitem_mask_aligned(self, data, as_callable, setter, request):\n tz = getattr(data.dtype.pyarrow_dtype, \"tz\", None)\n if pa_version_under2p0 and tz not in (None, \"UTC\"):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(f\"Not supported by pyarrow < 2.0 with timestamp type {tz}\")\n )\n )\n super().test_setitem_mask_aligned(data, as_callable, setter)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 127, "n_words": 36, "vocab_size": 33, "complexity": 3, "nloc": 9, "token_counts": 71, "n_ast_nodes": 112, "n_identifiers": 18, "random_cut": "def test_setitem_mask_aligned(self, data, as_callable, setter, request):\n tz = getattr(data.dtype.pyarrow_dtype, \"tz\", None)\n if pa_version_under2p0 and tz not in (None, \"UTC\"):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=(f\"Not supported by pyarrow < 2.0 with timestamp type {tz}\")\n )\n )\n super().test_setitem_mask_aligned(data, as" }, { "id": 71907, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_contentstate.py", "file_name": "test_contentstate.py", "fun_name": "test_wrapped_block_retains_key", "commit_message": "Reformat with black", "code": "def test_wrapped_block_retains_key(self):\n # Test a block which uses a wrapper correctly receives the key defined on the inner element\n converter = ContentstateConverter(features=[\"h1\", \"ol\", \"bold\", \"italic\"])\n result = converter.to_database_format(\n json.dumps(\n {\n \"entityMap\": {},\n \"blocks\": [\n {\n \"inlineStyleRanges\": [],\n \"text\": \"The rules of Fight Club\",\n \"depth\": 0,\n \"type\": \"header-one\",\n \"key\": \"00000\",\n \"entityRanges\": [],\n },\n {\n \"inlineStyleRanges\": [],\n \"text\": \"You do not talk about Fight Club.\",\n \"depth\": 0,\n \"type\": \"ordered-list-item\",\n \"key\": \"00001\",\n \"entityRanges\": [],\n },\n {\n \"inlineStyleRanges\": [],\n \"text\": \"You do not talk about Fight Club.\",\n \"depth\": 0,\n \"type\": \"ordered-list-item\",\n \"key\": \"00002\",\n \"entityRanges\": [],\n },\n ],\n }\n )\n )\n self.assertHTMLEqual(\n result,\n ,\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 896, "n_words": 100, "vocab_size": 62, "complexity": 1, "nloc": 45, "token_counts": 141, "n_ast_nodes": 264, "n_identifiers": 10, "random_cut": "def test_wrapped_block_retains_key(self):\n # Test a block which uses a wrapper correctly receives the key defined on the inner element\n converter = ContentstateConverter(features=[\"h1\", \"ol\", \"bold\", \"italic\"])\n result = converter.to_database_format(\n json.dumps(\n {\n \"entityMap\": {},\n \"blocks\": [\n {\n \"inlineStyleRanges\": [],\n \"text\": \"The rules of Fight Club\",\n \"depth\": 0,\n \"type\": \"header-one\",\n \"key\": \"00000\",\n \"entityRanges\": [],\n },\n {\n \"inlineStyleRanges\": [],\n \"text\": \"You do not talk about Fight Club.\",\n \"depth\": 0,\n \"type\": \"ordered-list-item\",\n \"key\": \"00001\",\n \"entityRanges\": [],\n },\n {\n \"inlineStyleRanges\": [],\n \"text\": \"You do not talk about Fight Clu" }, { "id": 5688, "commit_id": "d79b319819650f99fae2ab8c6c8d3ab25d474cf1", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-mixpanel/unit_tests/test_property_transformation.py", "file_name": "test_property_transformation.py", "fun_name": "export_response", "commit_message": ":tada: Source Mixpanel: Beta preparation (#13372)\n\n* Add extra mode to Source, to allow run acceptance tests\r\n* move streams into distinct modules\r\n* Add property name transformation for Export stream for avoiding collisions\r\n* Update doc\r\n* Add `date_window_size`", "code": "def export_response():\n return setup_response(\n 200,\n {\n \"event\": \"Problem event\",\n \"properties\": {\n \"distinct_id\": \"1d694fd9-31a5-4b99-9eef-ae63112063ed\",\n \"$userName\": \"1\",\n \"userName\": \"2\",\n \"username\": \"3\",\n },\n },\n )\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 141, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 13, "token_counts": 38, "n_ast_nodes": 75, "n_identifiers": 2, "random_cut": "def export_response():\n return setup_response(\n 200,\n {\n \"event\": \"Problem event\",\n \"properties\": {\n \"distinct_id\": \"1d694fd9-31a5-4b99-9eef-ae6" }, { "id": 5054, "commit_id": "d4f8b25b8e3e109db866352cf1dcec0d73c92cbd", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-google-ads/unit_tests/test_source.py", "file_name": "test_source.py", "fun_name": "client_mock", "commit_message": "Source Google Ads: Improve unit and integration tests (#12651)\n\n* #12650 source Googel ads: tests\r\n\r\n* #12650 source google ads: add changelog item\r\n\r\n* #12650 source google ads: add comments to tests\r\n\r\n* auto-bump connector version\r\n\r\nCo-authored-by: Octavia Squidington III ", "code": "def client_mock(config):\n google_api = GoogleAds(credentials=config[\"credentials\"], customer_id=config[\"customer_id\"])\n client = AdGroupAdReport(\n start_date=config[\"start_date\"], api=google_api, conversion_window_days=config[\"conversion_window_days\"], time_zone=\"local\"\n )\n client._customer_id = \"1234567890\"\n return client\n\n\n@pytest.fixture()", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "@pytest.fixture()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 40, "n_words": 20, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 56, "n_ast_nodes": 105, "n_identifiers": 15, "random_cut": "def client_mock(config):\n google_api = GoogleAds(credentials=config[\"credentials\"], customer_id=config[\"customer_id\"])\n client = AdGroupAdReport(\n start_date=config[\"start_date\"], api=google_api, conversion_window_days=config[\"" }, { "id": 66855, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v14_0/rearrange_company_fields.py", "file_name": "rearrange_company_fields.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tcustom_fields = {\n\t\t\"Company\": [\n\t\t\tdict(\n\t\t\t\tfieldname=\"hra_section\",\n\t\t\t\tlabel=\"HRA Settings\",\n\t\t\t\tfieldtype=\"Section Break\",\n\t\t\t\tinsert_after=\"asset_received_but_not_billed\",\n\t\t\t\tcollapsible=1,\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tfieldname=\"basic_component\",\n\t\t\t\tlabel=\"Basic Component\",\n\t\t\t\tfieldtype=\"Link\",\n\t\t\t\toptions=\"Salary Component\",\n\t\t\t\tinsert_after=\"hra_section\",\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tfieldname=\"hra_component\",\n\t\t\t\tlabel=\"HRA Component\",\n\t\t\t\tfieldtype=\"Link\",\n\t\t\t\toptions=\"Salary Component\",\n\t\t\t\tinsert_after=\"basic_component\",\n\t\t\t),\n\t\t\tdict(fieldname=\"hra_column_break\", fieldtype=\"Column Break\", insert_after=\"hra_component\"),\n\t\t\tdict(\n\t\t\t\tfieldname=\"arrear_component\",\n\t\t\t\tlabel=\"Arrear Component\",\n\t\t\t\tfieldtype=\"Link\",\n\t\t\t\toptions=\"Salary Component\",\n\t\t\t\tinsert_after=\"hra_column_break\",\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tfieldname=\"non_profit_section\",\n\t\t\t\tlabel=\"Non Profit Settings\",\n\t\t\t\tfieldtype=\"Section Break\",\n\t\t\t\tinsert_after=\"arrear_component\",\n\t\t\t\tcollapsible=1,\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tfieldname=\"company_80g_number\",\n\t\t\t\tlabel=\"80G Number\",\n\t\t\t\tfieldtype=\"Data\",\n\t\t\t\tinsert_after=\"non_profit_section\",\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tfieldname=\"with_effect_from\",\n\t\t\t\tlabel=\"80G With Effect From\",\n\t\t\t\tfieldtype=\"Date\",\n\t\t\t\tinsert_after=\"company_80g_number\",\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tfieldname=\"non_profit_column_break\", fieldtype=\"Column Break\", insert_after=\"with_effect_from\"\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tfieldname=\"pan_details\",\n\t\t\t\tlabel=\"PAN Number\",\n\t\t\t\tfieldtype=\"Data\",\n\t\t\t\tinsert_after=\"non_profit_column_break\",\n\t\t\t),\n\t\t]\n\t}\n\n\tcreate_custom_fields(custom_fields, update=True)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 27, "n_words": 90, "vocab_size": 54, "complexity": 1, "nloc": 63, "token_counts": 230, "n_ast_nodes": 390, "n_identifiers": 11, "random_cut": "def execute():\n\tcustom_fields = {\n\t\t\"Company\": [\n\t\t\tdict(\n\t\t\t\tfieldname=\"hra_section\",\n\t\t\t\tlabel=\"HRA Settings\",\n\t\t\t\tfieldtype=\"Section Break\",\n\t\t\t\tinsert_after=\"asset_received_but_not_billed\",\n\t\t\t\tcollapsible=1,\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tfieldname=\"basic_component\",\n\t\t\t\tlabel=\"Basic Component\",\n\t\t\t\tfieldtype=\"Link\",\n\t\t\t\toptions=\"Salary Component\",\n\t\t\t\tinsert_after=\"hra_section\",\n\t\t\t),\n\t\t\tdict(\n\t\t\t\tfieldname=\"hra_component\",\n\t\t\t\tlabel=\"HRA Component\",\n\t\t\t\tfieldtype=\"Link\",\n\t\t\t\toptions=\"Salary Compo" }, { "id": 206308, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/templatetags/i18n.py", "file_name": "i18n.py", "fun_name": "do_get_available_languages", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def do_get_available_languages(parser, token):\n \n # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments\n args = token.contents.split()\n if len(args) != 3 or args[1] != \"as\":\n raise TemplateSyntaxError(\n \"'get_available_languages' requires 'as variable' (got %r)\" % args\n )\n return GetAvailableLanguagesNode(args[2])\n\n\n@register.tag(\"get_language_info\")", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@register.tag(\"get_language_info\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 80, "n_words": 41, "vocab_size": 39, "complexity": 3, "nloc": 7, "token_counts": 47, "n_ast_nodes": 95, "n_identifiers": 11, "random_cut": "def do_get_available_languages(parser, token):\n \n # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments\n args = token.contents.split()\n if len(args) != 3 or args[1] != \"as\":\n " }, { "id": 111655, "commit_id": "c56568c9733fa286c1753a54fab2fd892a5cf6d5", "repo": "nni", "path": "setup.py", "file_name": "setup.py", "fun_name": "_setup", "commit_message": "Bump Python version to 3.7~3.9 (#4475)", "code": "def _setup():\n setuptools.setup(\n name = 'nni',\n version = release or '999.dev0',\n description = 'Neural Network Intelligence project',\n long_description = open('README.md', encoding='utf-8').read(),\n long_description_content_type = 'text/markdown',\n url = 'https://github.com/Microsoft/nni',\n author = 'Microsoft NNI Team',\n author_email = 'nni@microsoft.com',\n license = 'MIT',\n classifiers = [\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n\n packages = _find_python_packages(),\n package_data = {\n 'nni': _find_requirements_txt() + _find_default_config(), # setuptools issue #1806\n 'nni_node': _find_node_files() # note: this does not work before building\n },\n\n data_files = _get_data_files(),\n\n python_requires = '>=3.7',\n install_requires = _read_requirements_txt('dependencies/required.txt'),\n extras_require = {\n 'SMAC': _read_requirements_txt('dependencies/required_extra.txt', 'SMAC'),\n 'BOHB': _read_requirements_txt('dependencies/required_extra.txt', 'BOHB'),\n 'PPOTuner': _read_requirements_txt('dependencies/required_extra.txt', 'PPOTuner'),\n 'DNGO': _read_requirements_txt('dependencies/required_extra.txt', 'DNGO'),\n },\n setup_requires = ['requests'],\n\n entry_points = {\n 'console_scripts' : [\n 'nnictl = nni.tools.nnictl.nnictl:parse_args'\n ]\n },\n\n cmdclass = {\n 'build': Build,\n 'build_ts': BuildTs,\n 'clean': Clean,\n 'develop': Develop,\n }\n )\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 550, "n_words": 162, "vocab_size": 114, "complexity": 2, "nloc": 46, "token_counts": 195, "n_ast_nodes": 348, "n_identifiers": 36, "random_cut": "def _setup():\n setuptools.setup(\n name = 'nni',\n version = release or '999.dev0',\n description = 'Neural Network Intelligence project',\n long_description = open('README.md', encoding='utf-8').read(),\n long_description_content_type = 'text/markdown',\n url = 'https://github.com/Microsoft/nni',\n author = 'Microsoft NNI Team',\n author_email = 'nni@microsoft.com',\n license = 'MIT',\n classifiers = [\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n\n packages = _find_python_packages(),\n package_data = {\n 'nni': _find_requirements_txt() + _find_default_config(), # setuptools issue #1806\n 'nni_node': _find_node_files() # note: this does not work before building\n },\n\n data_files = _get_data_files(),\n\n python_requires = '>=3.7',\n install_requires = _read_requirements_txt('dependencies/required.txt'),\n extras_require = {\n 'SMAC': _read_requirements_txt('dependencies/required_extra.txt', 'SMAC'),\n 'BOHB': _read_requirements_txt('dependencies/required_extra.txt', 'BOHB'),\n 'PPOTuner': _read_requirements_txt('dependencies/required_extra.txt', 'PPOTuner'),\n 'DNGO': _read_requirements_txt('dependencies/required_extra.txt', 'DNGO'),\n },\n setup_requires = ['requests'],\n\n entry_points " }, { "id": 38679, "commit_id": "adc0ff25028d29af30386f2d7d3f85e290fbef57", "repo": "transformers", "path": "tests/models/cvt/test_modeling_cvt.py", "file_name": "test_modeling_cvt.py", "fun_name": "create_and_test_config_common_properties", "commit_message": "Add CvT (#17299)\n\n* Adding cvt files\r\n\r\n* Adding cvt files\r\n\r\n* changes in init file\r\n\r\n* Adding cvt files\r\n\r\n* changes in init file\r\n\r\n* Style fixes\r\n\r\n* Address comments from code review\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Format lists in docstring\r\n\r\n* Fix copies\r\n\r\n* Apply suggestion from code review\r\n\r\nCo-authored-by: AnugunjNaman \r\nCo-authored-by: Ayushman Singh \r\nCo-authored-by: Niels Rogge \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def create_and_test_config_common_properties(self):\n config = self.config_class(**self.inputs_dict)\n self.parent.assertTrue(hasattr(config, \"embed_dim\"))\n self.parent.assertTrue(hasattr(config, \"num_heads\"))\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 71, "n_identifiers": 8, "random_cut": "def create_and_test_config_common_properties(self):\n config = self.config_class(**self.inputs_dict)\n self.parent.assertTrue(hasattr(config, \"embed_dim\"))\n self.parent.assertTrue(hasattr(c" }, { "id": 9777, "commit_id": "7f314ee10c9ad83816aa795c5ef6ebc378de3acf", "repo": "gensim", "path": "gensim/models/keyedvectors.py", "file_name": "keyedvectors.py", "fun_name": "distances", "commit_message": "Fix typo in word2vec and KeyedVectors docstrings (#3365)\n\n* Add missing word in word2vec docstring\r\n\r\n* Fix docstring typo in KeyedVectors distances()\r\n\r\nword_or_vector, not word_or_vectors", "code": "def distances(self, word_or_vector, other_words=()):\n \n if isinstance(word_or_vector, _KEY_TYPES):\n input_vector = self.get_vector(word_or_vector)\n else:\n input_vector = word_or_vector\n if not other_words:\n other_vectors = self.vectors\n else:\n other_indices = [self.get_index(word) for word in other_words]\n other_vectors = self.vectors[other_indices]\n return 1 - self.cosine_similarities(input_vector, other_vectors)\n", "url": "https://github.com/RaRe-Technologies/gensim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 133, "n_words": 36, "vocab_size": 28, "complexity": 4, "nloc": 33, "token_counts": 78, "n_ast_nodes": 124, "n_identifiers": 14, "random_cut": "def distances(self, word_or_vector, other_words=()):\n \n if isinstance(word_or_vector, _KEY_TYPES):\n input_vector = self.get_vector(word_or_vector)\n else:\n input_vector = word_or_vector\n if not other_words:\n other_vectors = self.vectors\n else:\n other_indices = [self.get_index(word) for word in " }, { "id": 207066, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_default_site/tests.py", "file_name": "tests.py", "fun_name": "test_use_custom_admin_site", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_use_custom_admin_site(self):\n self.assertEqual(admin.site.__class__.__name__, \"CustomAdminSite\")\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 7, "random_cut": "def test_use_custom_admin_site(self):\n self.assertEqual(admin.site.__class__.__name__, \"CustomAdminSite\")\n\n" }, { "id": 255671, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/shape_inference_test.py", "file_name": "shape_inference_test.py", "fun_name": "test_conv_transpose_with_pads", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_conv_transpose_with_pads(self) -> None:\n graph = self._make_graph(\n [('X', TensorProto.FLOAT, (25, 48, 16, 16)),\n ('W', TensorProto.FLOAT, (48, 32, 3, 3))],\n [make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2])],\n [])\n self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 30, 30))])\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 95, "n_words": 37, "vocab_size": 33, "complexity": 1, "nloc": 7, "token_counts": 116, "n_ast_nodes": 165, "n_identifiers": 11, "random_cut": "def test_conv_transpose_with_pads(self) -> None:\n graph = self._make_graph(\n [('X', TensorProto.FLOAT, (25, 48, 16, 16)),\n ('W', TensorProto.FLOAT," }, { "id": 20773, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/rich/live_render.py", "file_name": "live_render.py", "fun_name": "position_cursor", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def position_cursor(self) -> Control:\n \n if self._shape is not None:\n _, height = self._shape\n return Control(\n ControlType.CARRIAGE_RETURN,\n (ControlType.ERASE_IN_LINE, 2),\n *(\n (\n (ControlType.CURSOR_UP, 1),\n (ControlType.ERASE_IN_LINE, 2),\n )\n * (height - 1)\n )\n )\n return Control()\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 250, "n_words": 33, "vocab_size": 27, "complexity": 2, "nloc": 20, "token_counts": 70, "n_ast_nodes": 105, "n_identifiers": 10, "random_cut": "def position_cursor(self) -> Control:\n \n if self._shape is not None:\n _, height = self._shap" }, { "id": 321989, "commit_id": "54f2f0694a6451934f997753b07d3587ad6345b9", "repo": "PaddleNLP", "path": "applications/sentiment_analysis/predict.py", "file_name": "predict.py", "fun_name": "predict_cls", "commit_message": "refine readme and fix dtype problem in windows (#1568)\n\n* sentiment analysis initializing\r\n\r\n* modify README.md\r\n\r\n* sentiment analysis initializing\r\n\r\n* modify data_ext and data_cls link in README.md\r\n\r\n* sentiment analysis initializing\r\n\r\n* sentiment analysis initializing\r\n\r\n* sentiment analysis initializing\r\n\r\n* delete unuseful info.\r\n\r\n* sentiment analysis initializing\r\n\r\n* sentiment analysis initializing\r\n\r\n* sentiment analysis initializing\r\n\r\n* sentiment analysis initializing\r\n\r\n* delete sentiment_system.png\r\n\r\n* add sentiment_system.png\r\n\r\n* refine readme.md\r\n\r\n* sentiment analysis intializing\r\n\r\n* mv data and checkpoints in sub_dir to parent_dir\r\n\r\n* refine readme.md\r\n\r\n* refine readme\r\n\r\n* refine readme.md, modify requirements\r\n\r\n* refine readme.md\r\n\r\n* refine readme.md\r\n\r\n* refine readme.md\r\n\r\n* mv run_export.sh to run_export_model.sh\r\n\r\n* refine readme.md\r\n\r\n* remove some\r\n\r\n* remove some unnecessary packages\r\n\r\n* delete unuseful compute_md5 method\r\n\r\n* sentiment analysis initializing\r\n\r\n* sentiment analysis initializing\r\n\r\n* refine readme.md\r\n\r\n* set CUDA_VISIBLE_DEVICES=0\r\n\r\n* refine style with pre-commit\r\n\r\n* use ppminilm in ernie instead of offline ppminilm\r\n\r\n* refine readme\r\n\r\n* load ppminilm in transformers.PPMiniLMModel, not load in ErnieModel\r\n\r\n* use model in transformers, not implement it personally\r\n\r\n* refine style with pre-commit\r\n\r\n* set paddlenlp version == 2.2.2\r\n\r\n* refine readme\r\n\r\n* refine readme\r\n\r\n* refine readme\r\n\r\n* fix bug, make sure that length of input_text and label is equal when deocding\r\n\r\n* fix bug, make sure that the length of input_text and label is equal\r\n\r\n* fix some mistakes in readme\r\n\r\n* refine procedure\r\n\r\n* refine readme\r\n\r\n* set dtype=64 to fix dtype problem in windows\r\n\r\n* set dtype=64 to fix dtype problem in windows\r\n\r\n* set 2 max_seq_len for ext_ and cls_ model\r\n\r\n* refine readme\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: chenxiaozeng ", "code": "def predict_cls(args, ext_results):\n # load dict\n model_name = \"skep_ernie_1.0_large_ch\"\n cls_label2id, cls_id2label = load_dict(args.cls_label_path)\n\n tokenizer = SkepTokenizer.from_pretrained(model_name)\n test_ds = MapDataset(ext_results)\n trans_func = partial(\n convert_example_to_feature_cls,\n tokenizer=tokenizer,\n label2id=cls_label2id,\n max_seq_len=args.cls_max_seq_len,\n is_test=True)\n test_ds = test_ds.map(trans_func, lazy=False)\n\n batchify_fn = lambda samples, fn=Tuple(\n Pad(axis=0, pad_val=tokenizer.pad_token_id),\n Pad(axis=0, pad_val=tokenizer.pad_token_type_id),\n Stack(dtype=\"int64\")): fn(samples)\n\n # set shuffle is False\n test_batch_sampler = paddle.io.BatchSampler(\n test_ds, batch_size=args.batch_size, shuffle=False)\n test_loader = paddle.io.DataLoader(\n test_ds, batch_sampler=test_batch_sampler, collate_fn=batchify_fn)\n print(\"test data loaded.\")\n\n # load cls model\n cls_state_dict = paddle.load(args.cls_model_path)\n cls_model = SkepForSequenceClassification.from_pretrained(\n model_name, num_classes=len(cls_label2id))\n cls_model.load_dict(cls_state_dict)\n print(\"classification model loaded.\")\n\n cls_model.eval()\n\n results = []\n for bid, batch_data in enumerate(test_loader):\n input_ids, token_type_ids, seq_lens = batch_data\n logits = cls_model(input_ids, token_type_ids=token_type_ids)\n\n predictions = logits.argmax(axis=1).numpy().tolist()\n results.extend(predictions)\n\n results = [cls_id2label[pred_id] for pred_id in results]\n return results\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 279, "n_words": 109, "vocab_size": 81, "complexity": 3, "nloc": 35, "token_counts": 268, "n_ast_nodes": 420, "n_identifiers": 66, "random_cut": "def predict_cls(args, ext_results):\n # load dict\n model_name = \"skep_ernie_1.0_large_ch\"\n cls_label2id, cls_i" }, { "id": 171821, "commit_id": "5206bb52c3d0daff00dde93e360920ad02995998", "repo": "pandas", "path": "pandas/tests/dtypes/test_missing.py", "file_name": "test_missing.py", "fun_name": "test_decimal", "commit_message": "DEPR: remove some more Int/UInt/Float64Index from tests (#50075)\n\n* DEPR: remove some more Int/UInt/Float64Index from tests\r\n\r\n* fix failed tests\r\n\r\n* fix failed code check\r\n\r\n* fix tests\r\n\r\n* fix tests 2\r\n\r\n* fix tests 3\r\n\r\n* fix tests\r\n\r\nCo-authored-by: Terji Petersen \r\nCo-authored-by: Terji Petersen ", "code": "def test_decimal(self):\n # scalars GH#23530\n a = Decimal(1.0)\n assert isna(a) is False\n assert notna(a) is True\n\n b = Decimal(\"NaN\")\n assert isna(b) is True\n assert notna(b) is False\n\n # array\n arr = np.array([a, b])\n expected = np.array([False, True])\n result = isna(arr)\n tm.assert_numpy_array_equal(result, expected)\n\n result = notna(arr)\n tm.assert_numpy_array_equal(result, ~expected)\n\n # series\n ser = Series(arr)\n expected = Series(expected)\n result = isna(ser)\n tm.assert_series_equal(result, expected)\n\n result = notna(ser)\n tm.assert_series_equal(result, ~expected)\n\n # index\n idx = Index(arr)\n expected = np.array([False, True])\n result = isna(idx)\n tm.assert_numpy_array_equal(result, expected)\n\n result = notna(idx)\n tm.assert_numpy_array_equal(result, ~expected)\n\n\n@pytest.mark.parametrize(\"dtype_equal\", [True, False])", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"dtype_equal\", [True, False])", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 281, "n_words": 87, "vocab_size": 46, "complexity": 1, "nloc": 25, "token_counts": 188, "n_ast_nodes": 325, "n_identifiers": 22, "random_cut": "def test_decimal(self):\n # scalars GH#23530\n a = Decimal(1.0)\n assert isna(a) is False\n assert notna(a) is True\n\n b = Decimal(\"NaN\")\n assert isna(b) is True\n assert notna(b) is False\n\n # array\n arr = np.array([a, b])\n expected = np.array([False, True])\n result = isna(arr)\n tm.ass" }, { "id": 62991, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pep517/in_process/_in_process.py", "file_name": "_in_process.py", "fun_name": "_dist_info_files", "commit_message": "upd; format", "code": "def _dist_info_files(whl_zip):\n \n res = []\n for path in whl_zip.namelist():\n m = re.match(r'[^/\\\\]+-[^/\\\\]+\\.dist-info/', path)\n if m:\n res.append(path)\n if res:\n return res\n raise Exception(\"No .dist-info folder found in wheel\")\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 27, "vocab_size": 23, "complexity": 4, "nloc": 9, "token_counts": 49, "n_ast_nodes": 84, "n_identifiers": 10, "random_cut": "def _dist_info_files(whl_zip):\n \n res = []\n for " }, { "id": 150195, "commit_id": "c9d46a5237a251193390a4c511737741773f851c", "repo": "freqtrade", "path": "freqtrade/freqai/data_drawer.py", "file_name": "data_drawer.py", "fun_name": "create_follower_dict", "commit_message": "finish bringing follow_mode up to date", "code": "def create_follower_dict(self):\n \n\n whitelist_pairs = self.config.get(\"exchange\", {}).get(\"pair_whitelist\")\n\n exists = self.follower_dict_path.is_file()\n\n if exists:\n logger.info(\"Found an existing follower dictionary\")\n\n for pair in whitelist_pairs:\n self.follower_dict[pair] = {}\n\n with open(self.follower_dict_path, \"w\") as fp:\n json.dump(self.follower_dict, fp, default=self.np_encoder)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 106, "n_words": 31, "vocab_size": 29, "complexity": 3, "nloc": 9, "token_counts": 84, "n_ast_nodes": 144, "n_identifiers": 18, "random_cut": "def create_follower_dict(self):\n \n\n whitelist_pairs = self.config.get(\"exchange\", {}).get(\"pair_whitelist\")\n\n exists = self.follower_dict_path.is_file()\n\n if exists:\n logger.info(\"Found an existing follower dictionary\")\n\n for pair in whiteli" }, { "id": 47252, "commit_id": "6933022e94acf139b2dea9a589bb8b25c62a5d20", "repo": "airflow", "path": "tests/providers/google/cloud/operators/test_dlp.py", "file_name": "test_dlp.py", "fun_name": "test_get_deidentify_template", "commit_message": "Fix new MyPy errors in main (#22884)\n\nThose MyPe errors are side effect of some new dependencies.", "code": "def test_get_deidentify_template(self, mock_hook):\n mock_hook.return_value.get_deidentify_template.return_value = mock.MagicMock()\n operator = CloudDLPGetDeidentifyTemplateOperator(\n template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id=\"id\"\n )\n operator.execute(context=None)\n mock_hook.assert_called_once_with(\n gcp_conn_id=GCP_CONN_ID,\n impersonation_chain=None,\n )\n mock_hook.return_value.get_deidentify_template.assert_called_once_with(\n template_id=TEMPLATE_ID,\n organization_id=ORGANIZATION_ID,\n project_id=None,\n retry=DEFAULT,\n timeout=None,\n metadata=(),\n )\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 180, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 18, "token_counts": 91, "n_ast_nodes": 136, "n_identifiers": 25, "random_cut": "def test_get_deidentify_template(self, mock_hook):\n mock_hook.return_value.get_deidentify_template.return_value = mock.MagicMock()\n operator = CloudDLPGetDeidentifyT" }, { "id": 109989, "commit_id": "cf8e04ddc1686dd285afdcc6e3ea8d9f29ff869b", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_triangulation.py", "file_name": "test_triangulation.py", "fun_name": "test_triinterpcubic_cg_solver", "commit_message": "Make all matplotlib.tri submodules private\n\nUsers should access all elements through the outer namespace\nmatplotlib.tri.\n\nBack-compatibility for the old module names will be added in a separate\ncommit. If done in the same commit, git would interpret this as\na modified file plus a new file and not as a rename. With the separation\nand the rename we keep the history.", "code": "def test_triinterpcubic_cg_solver():\n # Now 3 basic tests of the Sparse CG solver, used for\n # TriCubicInterpolator with *kind* = 'min_E'\n # 1) A commonly used test involves a 2d Poisson matrix.", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 4, "n_whitespaces": 39, "n_words": 31, "vocab_size": 28, "complexity": 3, "nloc": 40, "token_counts": 587, "n_ast_nodes": 11, "n_identifiers": 1, "random_cut": "def test_triinterpcubic_cg_solver():" }, { "id": 198307, "commit_id": "d76ce9791b7c7bf5190bf17b8e67042af05dc4f8", "repo": "sympy", "path": "sympy/functions/elementary/miscellaneous.py", "file_name": "miscellaneous.py", "fun_name": "__new__", "commit_message": "Update sympy/functions/elementary/miscellaneous.py\n\nCo-authored-by: Christopher Smith ", "code": "def __new__(cls, *args, **assumptions):\n evaluate = assumptions.pop('evaluate', True)\n args = (sympify(arg) for arg in args)\n\n # first standard filter, for cls.zero and cls.identity\n # also reshape Max(a, Max(b, c)) to Max(a, b, c)\n\n if evaluate:\n try:\n args = frozenset(cls._new_args_filter(args))\n except ShortCircuit:\n return cls.zero\n # remove redundant args that are easily identified\n args = cls._collapse_arguments(args, **assumptions)\n # find local zeros\n args = cls._find_localzeros(args, **assumptions)\n args = frozenset(args)\n\n if not args:\n return cls.identity\n\n if len(args) == 1:\n return list(args).pop()\n\n # base creation\n obj = Expr.__new__(cls, *ordered(args), **assumptions)\n obj._argset = args\n return obj\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 291, "n_words": 90, "vocab_size": 61, "complexity": 6, "nloc": 18, "token_counts": 133, "n_ast_nodes": 217, "n_identifiers": 21, "random_cut": "def __new__(cls, *args, **assumptions):\n evaluate = assumptions.pop('evaluate', True)\n args = (sympify(arg) for arg in args)\n\n # first standard filter, for cls.zero and cls.identity\n # also reshape Max(a, Max(b, c)) to Max(a, b, c)\n\n if evaluate:\n try:\n args = frozenset(cls._new_args_filter(args))\n except ShortCircuit:\n return cls.zero\n # remove redundant args that are easily identified\n args = cls._collapse_arguments(args, **assumptions)\n # find local zeros\n args = cls._find_localzeros(args, **assumptions)\n args = frozenset(args)\n\n if not " }, { "id": 255166, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/automatic_upgrade_test.py", "file_name": "automatic_upgrade_test.py", "fun_name": "test_IsInf", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_IsInf(self) -> None:\n self._test_op_upgrade('IsInf', 10, [[2, 3]], [[2, 3]], output_types=[TensorProto.BOOL])\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 17, "n_words": 11, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 39, "n_ast_nodes": 57, "n_identifiers": 6, "random_cut": "def test_IsInf(self) -> None:\n self._test_op_upgrade('IsInf', 10, [[2, 3]], [[2, 3]], output_types=[TensorProto.BOOL" }, { "id": 261311, "commit_id": "2a6703d9e8d1e54d22dd07f2bfff3c92adecd758", "repo": "scikit-learn", "path": "sklearn/pipeline.py", "file_name": "pipeline.py", "fun_name": "set_output", "commit_message": "ENH Introduces set_output API for pandas output (#23734)\n\n* Introduces set_output API for all transformers\r\n\r\n* TransformerMixin inherits from _SetOutputMixin\r\n\r\n* Adds tests\r\n\r\n* Adds whatsnew\r\n\r\n* Adds example on using set_output API\r\n\r\n* Adds developer docs for set_output", "code": "def set_output(self, transform=None):\n \n for _, _, step in self._iter():\n _safe_set_output(step, transform=transform)\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 33, "n_ast_nodes": 52, "n_identifiers": 7, "random_cut": "def set_output(self, transform=None):\n " }, { "id": 209142, "commit_id": "c8a2e6686c75eab130d7ac4c7ddb2ba726a73896", "repo": "scapy", "path": "scapy/utils.py", "file_name": "utils.py", "fun_name": "_write_block_idb", "commit_message": "Basic pcapng writing support (#3588)", "code": "def _write_block_idb(self):\n # type: () -> None\n\n # Block Type\n block_type = struct.pack(self.endian + \"I\", 1)\n # LinkType\n block_idb = struct.pack(self.endian + \"H\", self.linktype)\n # Reserved\n block_idb += struct.pack(self.endian + \"H\", 0)\n # SnapLen\n block_idb += struct.pack(self.endian + \"I\", 262144)\n\n self.f.write(self.build_block(block_type, block_idb))\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 111, "n_words": 42, "vocab_size": 26, "complexity": 1, "nloc": 6, "token_counts": 78, "n_ast_nodes": 132, "n_identifiers": 11, "random_cut": "def _write_block_idb(self):\n # type: () -> None\n\n # Block Type\n block_type = struct.pack(sel" }, { "id": 90647, "commit_id": "a0ec5a7af5835413781c5c26fa81fbe41b1e9879", "repo": "sentry", "path": "src/sentry/search/events/builder.py", "file_name": "builder.py", "fun_name": "resolve_granularity", "commit_message": "feat(mep): Allow some leeway on query period to use larger granularities (#35027)\n\n- This fixes a bug where duration was actually just the number of\r\n seconds in the timedelta instead of the total number of seconds\r\n- This allows queries over 3 days (chosen arbitrarily) to use the daily\r\n granularity, even if the start and end aren't perfectly happening at\r\n midnight\r\n- This allows queries over 12 hours (chosen arbitrarily) to use the\r\n horuly granularity, even if the start and end aren't perfectly\r\n happening on the hour", "code": "def resolve_granularity(self) -> Granularity:\n \n duration = (self.end - self.start).total_seconds()\n\n near_midnight: Callable[[datetime], bool] = lambda time: (\n time.minute <= 30 and time.hour == 0\n ) or (time.minute >= 30 and time.hour == 23)\n near_hour: Callable[[datetime], bool] = lambda time: time.minute <= 15 or time.minute >= 15\n\n if (\n # precisely going hour to hour\n self.start.minute\n == self.end.minute\n == duration % 3600\n == 0\n ):\n # we're going from midnight -> midnight which aligns with our daily buckets\n if self.start.hour == self.end.hour == duration % 86400 == 0:\n granularity = 86400\n # we're roughly going from start of hour -> next which aligns with our hourly buckets\n else:\n granularity = 3600\n elif (\n # Its over 30d, just use the daily granularity\n duration\n >= 86400 * 30\n ):\n granularity = 86400\n elif (\n # more than 3 days\n duration\n >= 86400 * 3\n ):\n # Allow 30 minutes for the daily buckets\n if near_midnight(self.start) and near_midnight(self.end):\n granularity = 86400\n else:\n granularity = 3600\n elif (\n # more than 12 hours\n (duration >= 3600 * 12)\n # Allow 15 minutes for the hourly buckets\n and near_hour(self.start)\n and near_hour(self.end)\n ):\n granularity = 3600\n # We're going from one random minute to another, we could use the 10s bucket, but no reason for that precision\n # here\n else:\n granularity = 60\n return Granularity(granularity)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 694, "n_words": 218, "vocab_size": 101, "complexity": 14, "nloc": 53, "token_counts": 219, "n_ast_nodes": 351, "n_identifiers": 16, "random_cut": "def resolve_granularity(self) -> Granularity:\n \n duration = (self.end - self.start).total_seconds()\n\n near_midnight: Callable[[datetime], bool] = lambda time: (\n time." }, { "id": 4146, "commit_id": "706d7f16868f062d89e9d24e37ab059ae1a6d8b2", "repo": "airbyte", "path": "octavia-cli/unit_tests/test_apply/test_resources.py", "file_name": "test_resources.py", "fun_name": "resource", "commit_message": "🐙 octavia-cli: implement `apply` (#10703)", "code": "def resource(self, patch_base_class, mock_api_client, local_configuration):\n return resources.BaseResource(mock_api_client, \"workspace_id\", local_configuration, \"bar.yaml\")\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "def resource(self, patch_base_class, mock_api_client, local_configuration):\n return resources.BaseResource(mock_api_client, \"workspace_id\", local_configuration, \"bar.yaml\")\n" }, { "id": 150256, "commit_id": "a4bada3ebe5ff927503b067545260ef5fe608c87", "repo": "freqtrade", "path": "freqtrade/persistence/migrations.py", "file_name": "migrations.py", "fun_name": "check_migrate", "commit_message": "Partial exit using average price (#6545)\n\nIntroduce Partial exits", "code": "def check_migrate(engine, decl_base, previous_tables) -> None:\n \n inspector = inspect(engine)\n\n cols_trades = inspector.get_columns('trades')\n cols_orders = inspector.get_columns('orders')\n cols_pairlocks = inspector.get_columns('pairlocks')\n tabs = get_table_names_for_table(inspector, 'trades')\n table_back_name = get_backup_name(tabs, 'trades_bak')\n order_tabs = get_table_names_for_table(inspector, 'orders')\n order_table_bak_name = get_backup_name(order_tabs, 'orders_bak')\n pairlock_tabs = get_table_names_for_table(inspector, 'pairlocks')\n pairlock_table_bak_name = get_backup_name(pairlock_tabs, 'pairlocks_bak')\n\n # Check if migration necessary\n # Migrates both trades and orders table!\n # if ('orders' not in previous_tables\n # or not has_column(cols_orders, 'stop_price')):\n if not has_column(cols_trades, 'realized_profit'):\n logger.info(f\"Running database migration for trades - \"\n f\"backup: {table_back_name}, {order_table_bak_name}\")\n migrate_trades_and_orders_table(\n decl_base, inspector, engine, table_back_name, cols_trades,\n order_table_bak_name, cols_orders)\n\n if not has_column(cols_pairlocks, 'side'):\n logger.info(f\"Running database migration for pairlocks - \"\n f\"backup: {pairlock_table_bak_name}\")\n\n migrate_pairlocks_table(\n decl_base, inspector, engine, pairlock_table_bak_name, cols_pairlocks\n )\n if 'orders' not in previous_tables and 'trades' in previous_tables:\n raise OperationalException(\n \"Your database seems to be very old. \"\n \"Please update to freqtrade 2022.3 to migrate this database or \"\n \"start with a fresh database.\")\n\n set_sqlite_to_wal(engine)\n fix_old_dry_orders(engine)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 350, "n_words": 144, "vocab_size": 97, "complexity": 5, "nloc": 33, "token_counts": 179, "n_ast_nodes": 323, "n_identifiers": 26, "random_cut": "def check_migrate(engine, decl_base, previous_tables) -> None:\n \n inspector = inspect(engine)\n\n cols_trades = inspector.get_columns('trades')\n cols_orders = inspector.get_columns('orders')\n cols_pairlocks = inspector.get_columns('pairlocks')\n tabs = get_table_names_for_table(inspector, 'trades')\n table_back_name = get_backup_name(tabs, 'trades_bak')\n order_tabs = get_table_names_for_table(inspector, 'orders')\n order_table_bak_name = get_backup_name(order_tabs, 'orders_bak')\n pairlock_tabs = get_table_names_for_table(inspector, 'pairlocks')\n pairlock_table_bak_name = get_backup_name(pairlock_tabs, 'pairlocks_bak')\n\n # Check if migration necessary\n # Migrates both trades and orders table!\n # if ('orders' not in previous_tables\n # or not has_column(cols_orders, 'stop_price')):\n if not has_column(cols_trades, 'realized_profit'):\n logger.info(f\"Running database migration for trades - \"\n f\"backup: {table_back_name}, {order_table_bak_name}\")\n migrate_trades_and_orders_table(\n decl_base, inspector, engine, table_back_name, cols_trades,\n order_table_bak_name, cols_orders)\n\n if not has_column(cols_pairlocks, 's" }, { "id": 70597, "commit_id": "b3c765f14d347e615cbdca5278003ea74edc3fc3", "repo": "wagtail", "path": "wagtail/admin/tests/test_workflows.py", "file_name": "test_workflows.py", "fun_name": "test_search", "commit_message": "Split search/pagination actions into their own view\n\nrather than switching behaviour based on the presence of certain URL parameters", "code": "def test_search(self):\n response = self.client.get(reverse('wagtailadmin_workflows:task_chooser_results') + '?q=foo')\n\n self.assertEqual(response.status_code, 200)\n\n self.assertTemplateUsed(response, \"wagtailadmin/workflows/task_chooser/includes/results.html\")\n self.assertTemplateNotUsed(response, \"wagtailadmin/workflows/task_chooser/chooser.html\")\n self.assertTrue(response.context['searchform'].is_searching())\n self.assertEqual(response.context['query_string'], 'foo')\n # check that only active (non-disabled) tasks are listed\n self.assertEqual([task.name for task in response.context['tasks'].object_list], ['Enabled foo'])\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 87, "n_words": 32, "vocab_size": 32, "complexity": 2, "nloc": 8, "token_counts": 99, "n_ast_nodes": 170, "n_identifiers": 16, "random_cut": "def test_search(self):\n response = self.client.get(reverse('wagtailadmin_workflows:task_chooser_results') + '?q=foo')\n\n sel" }, { "id": 187039, "commit_id": "b72f23fd699de9730e9009ac319b84da68f15a73", "repo": "streamlink", "path": "src/streamlink/stream/http.py", "file_name": "http.py", "fun_name": "url", "commit_message": "docs: update API page, add type annotations", "code": "def url(self) -> str:\n \n\n args = self.args.copy()\n method = args.pop(\"method\", \"GET\")\n return requests.Request(method=method, **valid_args(args)).prepare().url\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 42, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 48, "n_ast_nodes": 83, "n_identifiers": 11, "random_cut": "def url(self) -> str:\n \n\n args = self.args.copy()\n method = args.pop(\"method\", \"GET\")\n return requests.Request(method=method, " }, { "id": 72417, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/generic/multiple_upload.py", "file_name": "multiple_upload.py", "fun_name": "get_context_data", "commit_message": "Reformat with black", "code": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # Instantiate a dummy copy of the form that we can retrieve validation messages and media from;\n # actual rendering of forms will happen on AJAX POST rather than here\n upload_form_class = self.get_upload_form_class()\n self.form = upload_form_class(user=self.request.user)\n\n collections = self.permission_policy.collections_user_has_permission_for(\n self.request.user, \"add\"\n )\n if len(collections) < 2:\n # no need to show a collections chooser\n collections = None\n\n context.update(\n {\n \"help_text\": self.form.fields[\"file\"].help_text,\n \"collections\": collections,\n \"form_media\": self.form.media,\n }\n )\n\n return context\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 251, "n_words": 75, "vocab_size": 63, "complexity": 2, "nloc": 17, "token_counts": 101, "n_ast_nodes": 170, "n_identifiers": 18, "random_cut": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # Instantiate a dummy copy of the for" }, { "id": 220305, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/base_events.py", "file_name": "base_events.py", "fun_name": "_asyncgen_firstiter_hook", "commit_message": "add python 3.10.4 for windows", "code": "def _asyncgen_firstiter_hook(self, agen):\n if self._asyncgens_shutdown_called:\n warnings.warn(\n f\"asynchronous generator {agen!r} was scheduled after \"\n f\"loop.shutdown_asyncgens() call\",\n ResourceWarning, source=self)\n\n self._asyncgens.add(agen)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 87, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 35, "n_ast_nodes": 60, "n_identifiers": 10, "random_cut": "def _asyncgen_firstiter_hook(self, agen):\n if self._asyncgens_shutdown_called:\n warnings.warn(\n f\"asynchronous generator {age" }, { "id": 146334, "commit_id": "0c5440ee724a9f2b0fd94b7e6055c5be71968a84", "repo": "ray", "path": "python/ray/runtime_env.py", "file_name": "runtime_env.py", "fun_name": "get_extension", "commit_message": "[runtime env] Deletes the proto cache on RuntimeEnv (#22944)\n\nMainly the following things:\r\n- This PR deletes the proto cache on RuntimeEnv, ensuring that the user's modification of RuntimeEnv can take effect in the Proto message.\r\n- validate whole runtime env when serialize runtime_env. \r\n- overload method `__setitem__` to parse and validate field when it has to modify.", "code": "def get_extension(self, key) -> Optional[str]:\n if key not in RuntimeEnv.extensions_fields:\n raise ValueError(\n f\"Extension key must be one of {RuntimeEnv.extensions_fields}, \"\n f\"got: {key}\"\n )\n return self.get(key)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 90, "n_words": 25, "vocab_size": 24, "complexity": 2, "nloc": 7, "token_counts": 35, "n_ast_nodes": 66, "n_identifiers": 9, "random_cut": "def get_extension(self, key) -> Optional[str]:\n if key not in " }, { "id": 217837, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/cookies.py", "file_name": "cookies.py", "fun_name": "_quote", "commit_message": "add python 3.10.4 for windows", "code": "def _quote(str):\n r\n if str is None or _is_legal_key(str):\n return str\n else:\n return '\"' + str.translate(_Translator) + '\"'\n\n\n_OctalPatt = re.compile(r\"\\\\[0-3][0-7][0-7]\")\n_QuotePatt = re.compile(r\"[\\\\].\")\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 44, "n_words": 24, "vocab_size": 19, "complexity": 3, "nloc": 11, "token_counts": 32, "n_ast_nodes": 87, "n_identifiers": 9, "random_cut": "def _quote(str):\n r\n if str is None or _is_legal_key(str):\n return str\n else:\n" }, { "id": 124209, "commit_id": "5a094f1d18f7a1f1ea87ec10539c562445c46b6d", "repo": "ray", "path": "python/ray/workflow/tests/test_events.py", "file_name": "test_events.py", "fun_name": "test_sleep_checkpointing", "commit_message": "[workflow] Deprecate workflow.create (#26106)", "code": "def test_sleep_checkpointing(workflow_start_regular_shared):\n \n sleep_step = workflow.sleep(2)\n time.sleep(2)\n start_time = time.time()\n workflow.run(sleep_step)\n end_time = time.time()\n duration = end_time - start_time\n assert 1 < duration\n\n\n@pytest.mark.parametrize(\n \"workflow_start_regular_shared\",\n [\n {\n \"num_cpus\": 4, # TODO (Alex): When we switch to the efficient event\n # implementation we shouldn't need these extra cpus.\n }\n ],\n indirect=True,\n)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"workflow_start_regular_shared\",\n [\n {\n \"num_cpus\": 4, # TODO (Alex): When we switch to the efficient event\n # implementation we shouldn't need these extra cpus.\n }\n ],\n indirect=True,\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 121, "n_words": 50, "vocab_size": 41, "complexity": 1, "nloc": 8, "token_counts": 49, "n_ast_nodes": 123, "n_identifiers": 14, "random_cut": "def test_sleep_checkpointing(workflow_start_regular_shared):\n \n sleep_step = workflow.sleep(2)\n time.sleep(2)\n start_time = time.time()\n workflow.run(sleep_step)\n end_time = time.time()\n duration = end_time - start_time\n assert 1 < duration\n\n\n@pytest.mark.parametrize(\n \"workflow_start_" }, { "id": 178860, "commit_id": "667e1fca682b57d08065be5ad72c643aa88e69af", "repo": "Nuitka", "path": "nuitka/Options.py", "file_name": "Options.py", "fun_name": "commentArgs", "commit_message": "Modules: Added ability to make \"__name__\" and \"__package__\" static as well.\n\n* This allows for more static optimization and is often what people\n think is supposed to happen.", "code": "def commentArgs():\n \n # A ton of cases to consider, pylint: disable=too-many-branches,too-many-statements\n\n # Inform the user about potential issues with the running version. e.g. unsupported\n # version.\n if python_version_str not in getSupportedPythonVersions():\n # Do not disturb run of automatic tests with, detected from the presence of\n # that environment variable.\n if \"PYTHON\" not in os.environ:\n Tracing.general.warning(\n \"The version %r is not currently supported. Expect problems.\"\n % python_version_str,\n )\n\n default_reference_mode = (\n \"runtime\" if shallMakeModule() or isStandaloneMode() else \"original\"\n )\n\n if getFileReferenceMode() is None:\n options.file_reference_mode = default_reference_mode\n else:\n if options.file_reference_mode != default_reference_mode:\n Tracing.options_logger.warning(\n \"Using non-default file reference mode '%s' rather than '%s' may cause runtime issues.\"\n % (getFileReferenceMode(), default_reference_mode)\n )\n else:\n Tracing.options_logger.info(\n \"Using default file reference mode '%s' need not be specified.\"\n % default_reference_mode\n )\n\n default_mode_name_mode = \"runtime\" if shallMakeModule() else \"original\"\n\n if getModuleNameMode() is None:\n options.module_name_mode = default_mode_name_mode\n elif getModuleNameMode() == default_mode_name_mode:\n Tracing.options_logger.info(\n \"Using module name mode '%s' need not be specified.\"\n % default_mode_name_mode\n )\n\n # TODO: Not all of these are usable with MSYS2 really, split those off.\n if getOS() != \"Windows\":\n # Too many Windows specific options clearly\n if (\n getWindowsIconExecutablePath()\n or shallAskForWindowsAdminRights()\n or shallAskForWindowsUIAccessRights()\n or getWindowsCompanyName()\n or getWindowsProductName()\n or getWindowsProductVersion()\n or getWindowsFileVersion()\n or getForcedStderrPath() # not yet for other platforms\n or getForcedStdoutPath()\n or getWindowsSplashScreen()\n ):\n Tracing.options_logger.warning(\n \"Using Windows specific options has no effect on other platforms.\"\n )\n\n if options.mingw64 or options.msvc_version:\n Tracing.options_logger.warning(\n \"Requesting Windows specific compilers has no effect on other platforms.\"\n )\n\n if isMingw64() and getMsvcVersion():\n Tracing.options_logger.sysexit(\n \"Requesting both Windows specific compilers makes no sense.\"\n )\n\n if getMsvcVersion() and getMsvcVersion() not in (\"list\", \"latest\"):\n if getMsvcVersion().count(\".\") != 1 or not all(\n x.isdigit() for x in getMsvcVersion().split(\".\")\n ):\n Tracing.options_logger.sysexit(\n \"For --msvc only values 'latest', 'info', and 'X.Y' values are allowed, but not %r.\"\n % getMsvcVersion()\n )\n\n if isOnefileMode():\n standalone_mode = \"onefile\"\n elif isStandaloneMode():\n standalone_mode = \"standalone\"\n else:\n standalone_mode = None\n\n if standalone_mode and not hasStandaloneSupportedOS():\n Tracing.options_logger.warning(\n \"Standalone mode on %s is not known to be supported, might fail to work.\"\n % getOS()\n )\n\n if options.follow_all and shallMakeModule():\n Tracing.optimization_logger.sysexit(\n \n )\n\n if options.follow_all and standalone_mode:\n Tracing.options_logger.info(\n \"Following all imports is the default for %s mode and need not be specified.\"\n % standalone_mode\n )\n\n if options.follow_none and standalone_mode:\n Tracing.options_logger.warning(\n \"Following no imports is unlikely to work for %s mode and should not be specified.\"\n % standalone_mode\n )\n\n if options.follow_stdlib and not standalone_mode:\n Tracing.options_logger.warning(\n \"Following imports to stdlib is unlikely to work without --standalone/--onefile and should not be specified.\"\n )\n\n if (\n not shallDumpBuiltTreeXML()\n and not standalone_mode\n and not options.follow_all\n and not options.follow_none\n and not options.follow_modules\n and not options.follow_stdlib\n and not options.include_modules\n and not options.include_packages\n and not options.include_extra\n and not options.follow_not_modules\n ):\n Tracing.options_logger.warning(\n \n % (\"module\" if shallMakeModule() else \"program\")\n )\n\n if options.dependency_tool:\n Tracing.options_logger.warning(\n \"Using removed option '--windows-dependency-tool' is deprecated and has no impact anymore.\"\n )\n\n if shallMakeModule() and options.static_libpython == \"yes\":\n Tracing.options_logger.warning(\n \"In module mode, providing '--static-libpython' has no effect, it's not used.\"\n )\n\n options.static_libpython = \"no\"\n\n if (\n not isPgoMode()\n and not isPythonPgoMode()\n and (getPgoArgs() or getPgoExecutable())\n ):\n Tracing.optimization_logger.warning(\n \"Providing PGO arguments without enabling PGO mode has no effect.\"\n )\n\n if isPgoMode():\n if isStandaloneMode():\n Tracing.optimization_logger.warning(\n \"Using PGO with standalone/onefile mode is not currently working. Expect errors.\"\n )\n\n if shallMakeModule():\n Tracing.optimization_logger.warning(\n \"Using PGO with module mode is not currently working. Expect errors.\"\n )\n\n if (\n options.static_libpython == \"auto\"\n and not shallMakeModule()\n and not shallDumpBuiltTreeXML()\n and not shallUseStaticLibPython()\n and getSystemStaticLibPythonPath() is not None\n ):\n Tracing.options_logger.info(\n \n )\n\n if not shallExecuteImmediately():\n if shallRunInDebugger():\n Tracing.options_logger.warning(\n \"The '--debugger' option has no effect outside of '--debug' without '--run' option.\"\n )\n\n if not shallClearPythonPathEnvironment():\n Tracing.options_logger.warning(\n \"The '--execute-with-pythonpath' option has no effect without '--run' option.\"\n )\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1878, "n_words": 586, "vocab_size": 244, "complexity": 71, "nloc": 158, "token_counts": 635, "n_ast_nodes": 1144, "n_identifiers": 64, "random_cut": "def commentArgs():\n \n # A ton of cases to consider, pylint: disable=too-many-branches,too-many-statements\n\n # Inform the user about potential issues with the running version. e.g. unsupported\n # version.\n if python_version_str not in getSupportedPythonVersions():\n # Do not disturb run of automatic tests with, detected from the presence of\n # that environment variable.\n if \"PYTHON\" not in os.environ:\n Tracing.general.warning(\n \"The version %r is not currently supported. Expect problems.\"\n % python_version_str,\n )\n\n default_reference_mode = (\n \"runtime\" if shallMakeModule() or isStandaloneMode() else \"original\"\n )\n\n if getFileReferenceMode() is None:\n options.file_reference_mode = default_reference_mode\n else:\n if options.file_reference_mode != default_reference_mode:\n Tracing.options_logger.warning(\n \"Using non-default file reference mode '%s' rather than '%s' may cause runtime issues.\"\n % (getFileReferenceMode(), default_reference_mode)\n )\n else:\n Tracing.options_logger.info(\n \"Using default file reference mode '%s' need not be specified.\"\n % default_reference_mode\n )\n\n default_mode_name_mode = \"runtime\" if shallMakeModule() else \"original\"\n\n if getModuleNameMode() is None:\n options.module_name_mode = default_mode_name_mode\n elif getModuleNameMode() == default_mode_name_mode:\n Tracing.options_logger.info(\n \"Using module name mode '%s' need not be specified.\"\n % default_mode_name_mode\n )\n\n # TODO: Not all of these are usable with MSYS2 really, split those off.\n if getOS() != \"Windows\":\n # Too many Windows specific options clearly\n if (\n getWindowsIconExecutablePath()\n or shallAskForWindowsAdminRights()\n or shallAskForWindowsUIAccessRights()\n or getWindowsCompanyName()\n or getWindowsProductName()\n or getWindowsProductVersion()\n or getWindowsFileVersion()\n or getForcedStderrPath() # not yet for other platforms\n or getForcedStdoutPath()\n or getWindowsSplashScreen()\n ):\n Tracing.options_logger.warning(\n \"Using Windows specific opti" }, { "id": 78546, "commit_id": "1d7a4b64e3b1d20a57b222b64253478fba871920", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_preview.py", "file_name": "test_preview.py", "fun_name": "test_preview_on_create_with_invalid_data", "commit_message": "Extract generic preview views from page preview", "code": "def test_preview_on_create_with_invalid_data(self):\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_add\",\n args=(\"tests\", \"eventpage\", self.home_page.id),\n )\n\n preview_session_key = \"wagtail-preview-tests-eventpage-{}\".format(\n self.home_page.id\n )\n self.assertNotIn(preview_session_key, self.client.session)\n\n response = self.client.post(preview_url, {**self.post_data, \"title\": \"\"})\n\n # Check the JSON response\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(\n response.content.decode(),\n {\"is_valid\": False, \"is_available\": False},\n )\n\n # The invalid data should not be saved in the session\n self.assertNotIn(preview_session_key, self.client.session)\n\n response = self.client.get(preview_url)\n\n # The preview should still be unavailable\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"wagtailadmin/generic/preview_error.html\")\n self.assertContains(\n response,\n \"Wagtail - Preview not available\",\n html=True,\n )\n self.assertContains(\n response,\n '

    Preview not available

    ',\n html=True,\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 341, "n_words": 81, "vocab_size": 57, "complexity": 1, "nloc": 29, "token_counts": 170, "n_ast_nodes": 277, "n_identifiers": 24, "random_cut": "def test_preview_on_create_with_invalid_data(self):\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_add\",\n args=(\"tests\", \"eventpage\", self.home_page.id),\n )\n\n preview_session_key = \"wagtail-preview-tests-eventpage-{}\".format(\n self.home_page.id\n )\n self.assertNotIn(preview_session_key, self.client.session)\n\n response = self.client.post(preview_url, {**self.post_data, \"title\": \"\"})\n\n # Check the JSON response\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(\n response.content.decode(),\n {\"is_valid\": False, \"is_available\": False},\n )\n\n # The invalid data should not be s" }, { "id": 180290, "commit_id": "ed8301d6584310b23b2bc59e8ef769b951f2a94a", "repo": "gradio", "path": "gradio/components.py", "file_name": "components.py", "fun_name": "postprocess", "commit_message": "gifs in readme (#1298)\n\n* hello world\r\n\r\n* gif\r\n\r\n* sepia\r\n\r\n* replace calc gifs\r\n\r\n* crop calculator gif\r\n\r\n* final gifs\r\n\r\nCo-authored-by: Dawood \r\nCo-authored-by: AK391 <81195143+AK391@users.noreply.github.com>", "code": "def postprocess(self, y):\n \n if y is None:\n return None\n elif isinstance(y, (ModuleType, matplotlib.pyplot.Figure)):\n dtype = \"matplotlib\"\n out_y = processing_utils.encode_plot_to_base64(y)\n elif isinstance(y, dict):\n dtype = \"bokeh\"\n out_y = json.dumps(y)\n else:\n dtype = \"plotly\"\n out_y = y.to_json()\n return {\"type\": dtype, \"plot\": out_y}\n\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 159, "n_words": 40, "vocab_size": 28, "complexity": 4, "nloc": 13, "token_counts": 83, "n_ast_nodes": 142, "n_identifiers": 16, "random_cut": "def postprocess(self, y):\n \n if y is None:\n return None\n elif isinstance(y, (ModuleType, matplotlib.pyplot.Figure)):\n dtype = \"matplotlib\"\n out_y = processing_utils.encode_plot_to_base64(y)\n elif isinstance(y, dict):\n dtype = \"bokeh\"\n out_y = json.dumps(y)\n else:\n dtype = \"plotly\"\n out_y = y.to_json()\n return {\"type\": dtype, " }, { "id": 152951, "commit_id": "be10ba93380044d90c17c7d2fd5a0fc39fcf6870", "repo": "modin", "path": "modin/experimental/pandas/test/test_io_exp.py", "file_name": "test_io_exp.py", "fun_name": "test_read_csv_without_glob", "commit_message": "DOCS-#3766: update 'read_csv_glob' dispatcher, parser and docstring (#3797)\n\nCo-authored-by: Devin Petersohn \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nSigned-off-by: Anatoly Myachev ", "code": "def test_read_csv_without_glob(self):\n with pytest.warns(UserWarning, match=r\"Shell-style wildcard\"):\n with pytest.raises(FileNotFoundError):\n pd.read_csv_glob(\"s3://nyc-tlc/trip data/yellow_tripdata_2020-\")\n\n\n@pytest.mark.skipif(\n Engine.get() != \"Ray\", reason=\"Currently only support Ray engine for glob paths.\"\n)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(\n Engine.get() != \"Ray\", reason=\"Currently only support Ray engine for glob paths.\"\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 56, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 92, "n_identifiers": 15, "random_cut": "def test_read_csv_without_glob(self):\n with pytest.warns(UserWarning, match=r\"Shell-style wildcard\"):\n with pytest.raises(FileNotFoundError):\n pd.read_csv_glob(\"s3://nyc-tlc/trip data/yellow_tripdata_2020-\")\n\n\n@pytest.mark.skipif(\n Engine.get() != \"Ray\", reason=\"Currently only support Ray engine for glob pat" }, { "id": 168744, "commit_id": "786c28fe929ed65298bfc723aa1cdbe49a68ae0c", "repo": "pandas", "path": "pandas/tests/extension/test_string.py", "file_name": "test_string.py", "fun_name": "test_sort_values_frame", "commit_message": "TST: Filter/test pyarrow PerformanceWarnings (#48093)", "code": "def test_sort_values_frame(self, data_for_sorting, ascending):\n with tm.maybe_produces_warning(\n PerformanceWarning,\n pa_version_under7p0\n and getattr(data_for_sorting.dtype, \"storage\", \"\") == \"pyarrow\",\n ):\n super().test_sort_values_frame(data_for_sorting, ascending)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 74, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 7, "token_counts": 43, "n_ast_nodes": 71, "n_identifiers": 11, "random_cut": "def test_sort_values_frame(self, data_for_sorting, ascending):\n with tm.maybe_produces_warning(\n PerformanceWarning,\n pa_version_under7p0\n and getattr(data_for_sorting.dtype, \"storage\", \"\") == \"pyarrow\",\n ):\n super().test_sort_values_frame(data_for_sorting, ascending)\n\n" }, { "id": 304486, "commit_id": "b88e71762de349faea2469823d4741bb6937b825", "repo": "core", "path": "homeassistant/components/cups/sensor.py", "file_name": "sensor.py", "fun_name": "update", "commit_message": "Improve type hint in cups sensor entity (#77030)", "code": "def update(self) -> None:\n \n self.data.update()\n assert self.name is not None\n assert self.data.printers is not None\n self._printer = self.data.printers.get(self.name)\n self._attr_available = self.data.available\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 21, "vocab_size": 16, "complexity": 1, "nloc": 7, "token_counts": 56, "n_ast_nodes": 90, "n_identifiers": 9, "random_cut": "def update(self) -> None:\n \n " }, { "id": 66382, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/bom/bom.py", "file_name": "bom.py", "fun_name": "add_additional_cost", "commit_message": "style: format code with black", "code": "def add_additional_cost(stock_entry, work_order):\n\t# Add non stock items cost in the additional cost\n\tstock_entry.additional_costs = []\n\texpenses_included_in_valuation = frappe.get_cached_value(\n\t\t\"Company\", work_order.company, \"expenses_included_in_valuation\"\n\t)\n\n\tadd_non_stock_items_cost(stock_entry, work_order, expenses_included_in_valuation)\n\tadd_operations_cost(stock_entry, work_order, expenses_included_in_valuation)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 29, "vocab_size": 25, "complexity": 1, "nloc": 7, "token_counts": 43, "n_ast_nodes": 68, "n_identifiers": 10, "random_cut": "def add_additional_cost(stock_entry, work_order):\n\t# Add non stock items cost in the additional cost\n\tstock_entry.additional_costs = []\n\texpenses_included_in_valuation = frappe.get_cached_value(\n\t\t\"Company\", work_order.company, \"expenses_included_in_valuation\"\n\t)\n\n\tadd_non_sto" }, { "id": 169381, "commit_id": "58f3afc5dd8a071f5222fdb4a7149db64ae1caef", "repo": "pandas", "path": "pandas/tests/tools/test_to_datetime.py", "file_name": "test_to_datetime.py", "fun_name": "test_to_datetime_mixed_tzaware_timestamp_utc_true", "commit_message": "BUG: to_datetime(tz_mix, utc=True) converts to UTC (#48686)\n\nCo-authored-by: Patrick Hoefler <61934744+phofl@users.noreply.github.com>", "code": "def test_to_datetime_mixed_tzaware_timestamp_utc_true(arg, tz_aware_first):\n # GH 48678\n exp_arg = [\"1724-12-20 20:20:20\", \"2022-01-01 00:00:00\"]\n if not tz_aware_first:\n arg.reverse()\n exp_arg.reverse()\n result = to_datetime(arg, utc=True)\n expected = DatetimeIndex(exp_arg).tz_localize(\"UTC\")\n tm.assert_index_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 57, "n_words": 26, "vocab_size": 24, "complexity": 2, "nloc": 8, "token_counts": 57, "n_ast_nodes": 98, "n_identifiers": 13, "random_cut": "def test_to_datetime_mixed_tzaware_timestamp_utc_true(arg, tz_aware_first):\n # GH 48678\n exp_arg = [\"1724-12-20 20:20:20\", \"2022" }, { "id": 101456, "commit_id": "13cfb3f39e72e9ca181f173b7b3db2a048db0d08", "repo": "faceswap", "path": "plugins/extract/pipeline.py", "file_name": "pipeline.py", "fun_name": "_add_queues", "commit_message": "extract: Add batch processing mode", "code": "def _add_queues(self) -> Dict[str, EventQueue]:\n \n queues = {}\n tasks = [f\"extract{self._instance}_{phase}_in\" for phase in self._flow]\n tasks.append(f\"extract{self._instance}_{self._final_phase}_out\")\n for task in tasks:\n # Limit queue size to avoid stacking ram\n queue_manager.add_queue(task, maxsize=self._queue_size)\n queues[task] = queue_manager.get_queue(task)\n logger.debug(\"Queues: %s\", queues)\n return queues\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 120, "n_words": 38, "vocab_size": 33, "complexity": 3, "nloc": 10, "token_counts": 74, "n_ast_nodes": 144, "n_identifiers": 20, "random_cut": "def _add_queues(self) -> Dict[str, EventQueue]:\n \n queues = {}\n tasks = [f\"extract{self._instance}_{phase}_in\" for phase in self._flow]\n tasks.append(f\"extract{self._instance}_{self._final_phase}_out\")\n for task in tasks:\n # Limit queue size to avoid stacking ram\n queue_manager.add_queue(task, maxsize=self._queue_size" }, { "id": 263975, "commit_id": "af2cd6999c2c4b6a9648fa057290c0cb68efbade", "repo": "pyinstaller", "path": "PyInstaller/depend/bindepend.py", "file_name": "bindepend.py", "fun_name": "_select_destination_directory", "commit_message": "depend: DLL parent path preservation: work around for pywin32\n\nThe `win32uiole` extension is linked agains `win32ui` extension,\nbut due to the `sys.path` manipulation, the ` win32uiole` is a top\nlevel module, and is collected to top-level directory. Therefore,\nthe same treatment needs to be applied to the `win32ui` - we should\nnot preserve its parent directory structure. Apply the same exception\nto the whole `site-packages/pythonwin` directory...", "code": "def _select_destination_directory(src_filename, parent_dir_preservation_paths):\n # Special handling for pywin32 on Windows, because its .pyd extensions end up linking each other, but, due to\n # sys.path modifications the packages perform, they all end up as top-modules and should be collected into\n # top-level directory... i.e., we must NOT preserve the directory layout in this case.\n if compat.is_win:\n # match <...>/site-packages/pythonwin\n parent_dir = src_filename.parent\n if parent_dir.name == \"pythonwin\" and parent_dir.parent in parent_dir_preservation_paths:\n # Collect into top-level directory.\n return src_filename.name\n\n # Check parent directory preservation paths\n for parent_dir_preservation_path in parent_dir_preservation_paths:\n if parent_dir_preservation_path in src_filename.parents:\n # Collect into corresponding sub-directory.\n return src_filename.relative_to(parent_dir_preservation_path)\n\n # Collect into top-level directory.\n return src_filename.name\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 199, "n_words": 104, "vocab_size": 73, "complexity": 6, "nloc": 9, "token_counts": 57, "n_ast_nodes": 98, "n_identifiers": 11, "random_cut": "def _select_destination_directory(src_filename, parent_dir_preservation_paths):\n # Special handling for pywin32 on Windows, because its .pyd extensions end up linking each other, but, due to\n # sys.path modifications the packages perform, they all end up as top-modules and should be collected into\n # top-level directory... i.e., we must NOT preserve the directory layout in this case.\n if compat.is_win:\n # match <...>/site-packages/pythonwin\n parent_dir = src_filename.parent\n if parent_dir.name == \"pythonwin\" and parent_dir.parent in parent_dir_preservation_paths:\n # Collect into top-level directory.\n" }, { "id": 257313, "commit_id": "738e008020f146ff9820c290311782f515749c48", "repo": "haystack", "path": "test/test_extractor.py", "file_name": "test_extractor.py", "fun_name": "test_extractor_batch_multiple_queries", "commit_message": "Add `run_batch` method to all nodes and `Pipeline` to allow batch querying (#2481)\n\n* Add run_batch methods for batch querying\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix mypy\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix mypy\r\n\r\n* Fix linter\r\n\r\n* Fix tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix mypy\r\n\r\n* Fix rest api test\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Add Doc strings\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Add batch_size as attribute to nodes supporting batching\r\n\r\n* Adapt error messages\r\n\r\n* Adapt type of filters in retrievers\r\n\r\n* Revert change about truncation_warning in summarizer\r\n\r\n* Unify multiple_doc_lists tests\r\n\r\n* Use smaller models in extractor tests\r\n\r\n* Add return types to JoinAnswers and RouteDocuments\r\n\r\n* Adapt return statements in reader's run_batch method\r\n\r\n* Allow list of filters\r\n\r\n* Adapt error messages\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix tests\r\n\r\n* Fix mypy\r\n\r\n* Adapt print_questions\r\n\r\n* Remove disabling warning about too many public methods\r\n\r\n* Add flag for pylint to disable warning about too many public methods in pipelines/base.py and document_stores/base.py\r\n\r\n* Add type check\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Adapt tutorial 11\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Add query_batch method for DCDocStore\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_extractor_batch_multiple_queries(document_store_with_docs):\n\n es_retriever = BM25Retriever(document_store=document_store_with_docs)\n ner = EntityExtractor()\n reader = FARMReader(model_name_or_path=\"deepset/tinyroberta-squad2\", num_processes=0)\n\n pipeline = Pipeline()\n pipeline.add_node(component=es_retriever, name=\"ESRetriever\", inputs=[\"Query\"])\n pipeline.add_node(component=ner, name=\"NER\", inputs=[\"ESRetriever\"])\n pipeline.add_node(component=reader, name=\"Reader\", inputs=[\"NER\"])\n\n prediction = pipeline.run_batch(\n queries=[\"Who lives in Berlin?\", \"Who lives in New York?\"],\n params={\"ESRetriever\": {\"top_k\": 1}, \"Reader\": {\"top_k\": 1}},\n )\n entities_carla = [entity[\"word\"] for entity in prediction[\"answers\"][0][0].meta[\"entities\"]]\n entities_paul = [entity[\"word\"] for entity in prediction[\"answers\"][1][0].meta[\"entities\"]]\n assert \"Carla\" in entities_carla\n assert \"Berlin\" in entities_carla\n assert \"Paul\" in entities_paul\n assert \"New York\" in entities_paul\n\n\n@pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True)", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 134, "n_words": 77, "vocab_size": 52, "complexity": 3, "nloc": 18, "token_counts": 192, "n_ast_nodes": 357, "n_identifiers": 29, "random_cut": "def test_extractor_batch_multiple_queries(document_store_with_docs):\n\n es_retriever = BM25Retriever(document_store=document_store_with_docs)\n ner = EntityExtractor()\n reader = FARMReader(model_name_or_path=\"deepset/tinyroberta-squad2\", num_processes=0)\n\n pipeline = Pipeline()\n pipeline.add_node(component=es_retriever, name=\"ESRetriever\", inputs=[\"Query\"])\n pipeline.add_node(component=ner, name=\"NER\", inputs=[\"ESRetriever\"])\n pipeline.add_node(component=reader, name=\"Reader\", inputs=[\"NER\"])\n\n prediction = pipeline.run_batch(\n queries=[\"Who lives in Berlin?\", \"Who lives in New York?\"],\n params={\"ESRetriever\": {\"top_k\": 1}, \"Reader\": {\"top_k\": 1}},\n )\n entities_carla = [entity[\"word\"] for entity in prediction[\"answers\"][0][0].meta[\"entities\"]]\n entities_paul = [entity[\"word\"] for entity in prediction[\"answers\"" }, { "id": 173307, "commit_id": "49655e9f2db89cfc77ddbd2c2c3646ea929edd40", "repo": "calibre-web", "path": "cps/admin.py", "file_name": "admin.py", "fun_name": "edit_scheduledtasks", "commit_message": "More bugfixes for time and datetime.time", "code": "def edit_scheduledtasks():\n content = config.get_scheduled_task_settings()\n time_field = list()\n duration_field = list()\n\n for n in range(24):\n time_field.append((n, format_time(datetime_time(hour=n), format=\"short\",)))\n for n in range(5, 65, 5):\n t = timedelta(hours=n // 60, minutes=n % 60)\n duration_field.append((n, format_timedelta(t, threshold=.9)))\n\n return render_title_template(\"schedule_edit.html\",\n config=content,\n starttime=time_field,\n duration=duration_field,\n title=_(u\"Edit Scheduled Tasks Settings\"))\n\n\n@admi.route(\"/admin/scheduledtasks\", methods=[\"POST\"])\n@login_required\n@admin_required", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "@admi.route(\"/admin/scheduledtasks\", methods=[\"POST\"])\n@login_required\n@admin_required", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 211, "n_words": 48, "vocab_size": 41, "complexity": 3, "nloc": 14, "token_counts": 115, "n_ast_nodes": 218, "n_identifiers": 30, "random_cut": "def edit_scheduledtasks():\n content = config.get_scheduled_task_settings()\n time_field = list()\n duration_field = list()\n\n for n in range(24):\n time_field.append((n, format_time(datetime_time(hour=n), format=\"short\",)))\n for n in range(5, 65, 5):\n t" }, { "id": 157839, "commit_id": "b60c25f66c31a8e467fdd21a766c0fc963944400", "repo": "d2l-zh", "path": "d2l/paddle.py", "file_name": "paddle.py", "fun_name": "train_ch8", "commit_message": "[Paddle]Add chapter_recurrent-neural-networks (#1157)\n\n* [Paddle]Add chapter_recurrent-neural-networks\r\n\r\n* resolve the bug\r\n\r\n* update paddle.py\r\n\r\n* fix the bug\r\n\r\n* Fix reshape bug and updater bug\r\n\r\n* remove redundant code\r\n\r\n* test\r\n\r\n* return to original mxnet.py and torch.py\r\n\r\n* Fix gradient clip bug\r\n\r\n* remove log\r\n\r\n* force to run rnn concise\r\n\r\n* return the blank space\r\n\r\nCo-authored-by: w5688414 ", "code": "def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n \n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',\n legend=['train'], xlim=[10, num_epochs])\n # 初始化\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(\n learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n # 训练和预测\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(\n net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n print(predict('time traveller'))\n animator.add(epoch + 1, [ppl])\n print(f'困惑度 {ppl:.1f}, {speed:.1f} 词元/秒 {str(device)}')\n print(predict('time traveller'))\n print(predict('traveller'))\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 222, "n_words": 83, "vocab_size": 66, "complexity": 4, "nloc": 19, "token_counts": 196, "n_ast_nodes": 327, "n_identifiers": 40, "random_cut": "def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):\n \n loss = nn.CrossEntropyLoss()\n animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',\n legend=['train'], xlim=[10, num_epochs])\n # 初始化\n if isinstance(net, nn.Layer):\n updater = paddle.optimizer.SGD(\n learning_rate=lr, parameters=net.parameters())\n else:\n updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)\n predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)\n # 训练和预测\n for epoch in range(num_epochs):\n ppl, speed = train_epoch_ch8(\n net, train_iter, loss, updater, device, use_random_iter)\n if (epoch + 1) % 10 == 0:\n pr" }, { "id": 217570, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/gettext.py", "file_name": "gettext.py", "fun_name": "install", "commit_message": "add python 3.10.4 for windows", "code": "def install(self, names=None):\n import builtins\n builtins.__dict__['_'] = self.gettext\n if names is not None:\n allowed = {'gettext', 'lgettext', 'lngettext',\n 'ngettext', 'npgettext', 'pgettext'}\n for name in allowed & set(names):\n builtins.__dict__[name] = getattr(self, name)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 31, "vocab_size": 28, "complexity": 3, "nloc": 8, "token_counts": 65, "n_ast_nodes": 108, "n_identifiers": 10, "random_cut": "def install(self, names=None):\n import builtins\n builtins.__dict__['_'] = self.gettext\n if names is not None:\n allowed = {'gettext', 'lgettext', 'lngettext',\n 'ngettext', 'npgettext', 'pgettext'}\n for name in" }, { "id": 211316, "commit_id": "e55e41945d42db787a0f7c557d53d06a6b24536b", "repo": "PaddleDetection", "path": "configs/rotate/tools/convert.py", "file_name": "convert.py", "fun_name": "data_to_coco", "commit_message": "Refactor rbox (#6704)\n\n* refactor rbox\r\n\r\n* modify the code of save results\r\n\r\n* fix some problem\r\n\r\n* add .gitignore in dataset/dota\r\n\r\n* fix test anno path", "code": "def data_to_coco(infos, output_path, class_names, num_process):\n data_dict = dict()\n data_dict['categories'] = []\n\n for i, name in enumerate(class_names):\n data_dict['categories'].append({\n 'id': i + 1,\n 'name': name,\n 'supercategory': name\n })\n\n pbar = tqdm(total=len(infos), desc='data to coco')\n images, annotations = [], []\n if num_process > 1:\n pool = Pool(num_process)\n results = []\n for i, info in enumerate(infos):\n image_id = i + 1\n results.append(\n pool.apply_async(\n process_single_sample, (info, image_id, class_names),\n callback=lambda x: pbar.update()))\n\n pool.close()\n pool.join()\n\n for result in results:\n single_image, single_anno = result.get()\n images.append(single_image)\n annotations += single_anno\n\n else:\n for i, info in enumerate(infos):\n image_id = i + 1\n single_image, single_anno = process_single_sample(info, image_id,\n class_names)\n images.append(single_image)\n annotations += single_anno\n pbar.update()\n\n pbar.close()\n\n for i, anno in enumerate(annotations):\n anno['id'] = i + 1\n\n data_dict['images'] = images\n data_dict['annotations'] = annotations\n\n with open(output_path, 'w') as f:\n json.dump(data_dict, f)\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 495, "n_words": 126, "vocab_size": 79, "complexity": 7, "nloc": 41, "token_counts": 269, "n_ast_nodes": 440, "n_identifiers": 39, "random_cut": "def data_to_coco(infos, output_path, class_names, num_process):\n data_dict = dict()\n data_dict['categories'] = []\n\n for i, name in enumerate(class_names):\n data_dict['categories'].append({\n 'id': i + 1,\n 'name': name,\n 'supercategory': name\n })\n\n pbar = tqdm(total=len(infos), desc='data to coco')\n images, annotations = [], []\n if num_process > 1:\n pool = Pool(num_process)\n results = []\n for i, info in enumerate(infos):\n image_id = i + 1\n results.append(\n pool.apply_async(\n process_single_sample, (info, image_id, class_names),\n callback=lambda x: pbar.update()))\n\n pool.close()\n pool.join()\n\n for result in results:\n single_image, single_anno = result.get()\n images.append(single_image)\n annotations += single_anno\n\n else:\n for i, info in enumerate(infos):\n image_id = i + 1\n single_image, single_anno = process_single_sample(info, image_id,\n class_names)\n images.append(single_image)\n annotations += single_anno\n " }, { "id": 213898, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy_tests/test_core/test_general.py", "file_name": "test_general.py", "fun_name": "test_ones_like", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def test_ones_like(x, dtype, tensor_fn, dev, call):\n # smoke test\n if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:\n # mxnet does not support 0-dimensional variables\n pytest.skip()\n x = tensor_fn(x, dtype, dev)\n ret = ivy.ones_like(x, dtype, dev)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n assert ret.shape == x.shape\n # value test\n assert np.allclose(call(ivy.ones_like, x, dtype, dev),\n np.asarray(ivy.backends.numpy.ones_like(ivy.to_numpy(x), dtype)))\n # compilation test\n if call in [helpers.torch_call]:\n # pytorch scripting cannot assign a torch.device value with a string\n return\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.ones_like)\n\n\n# one_hot\n@pytest.mark.parametrize(\n \"ind_n_depth\", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)])\n@pytest.mark.parametrize(\n \"dtype\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"ind_n_depth\", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)])\n@pytest.mark.parametrize(\n \"dtype\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 209, "n_words": 109, "vocab_size": 81, "complexity": 6, "nloc": 13, "token_counts": 142, "n_ast_nodes": 352, "n_identifiers": 30, "random_cut": "def test_ones_like(x, dtype, tensor_fn, dev, call):\n # smoke test\n if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:\n # mxnet does not support 0-dimensional variables\n pytest.skip()\n x = tensor_fn(x, dtype, dev)\n ret = ivy.ones_like(x, dtype, dev)\n # type test\n assert ivy.is_array(ret)\n # cardinality test\n assert ret.shape == x.shape\n # value test\n assert np.allclose(call(ivy.ones_like, x, dtype, dev),\n np.asarray(ivy.backends.numpy.ones_like(ivy.to_numpy(x), dtype)))\n " }, { "id": 23294, "commit_id": "a85a009e1314df3a6a007a65c8199114307aa8e1", "repo": "PaddleOCR", "path": "PPOCRLabel/libs/keyDialog.py", "file_name": "keyDialog.py", "fun_name": "getFlags", "commit_message": "pop up a key list dialog when finish a new shape", "code": "def getFlags(self):\n flags = {}\n for i in range(self.flagsLayout.count()):\n item = self.flagsLayout.itemAt(i).widget()\n flags[item.text()] = item.isChecked()\n return flags\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 59, "n_words": 17, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 53, "n_ast_nodes": 87, "n_identifiers": 12, "random_cut": "def getFlags(self):\n flags = {}\n for i in range(self." }, { "id": 119161, "commit_id": "27f285782bed30aa85842a46b4a3900ca1375e33", "repo": "jax", "path": "jax/_src/lax/linalg.py", "file_name": "linalg.py", "fun_name": "cholesky_jvp_rule", "commit_message": "linalg_test: disable implicit rank promotion", "code": "def cholesky_jvp_rule(primals, tangents):\n x, = primals\n sigma_dot, = tangents\n L = jnp.tril(cholesky_p.bind(x))\n\n # Forward-mode rule from https://arxiv.org/pdf/1602.07527.pdf\n def phi(X):\n l = jnp.tril(X)\n return l / lax.expand_dims(jnp._constant_like(X, 1) + jnp.eye(X.shape[-1], dtype=X.dtype),\n range(l.ndim - 2))\n\n tmp = triangular_solve(L, sigma_dot, left_side=False, transpose_a=True,\n conjugate_a=True, lower=True)\n L_dot = lax.batch_matmul(L, phi(triangular_solve(\n L, tmp, left_side=True, transpose_a=False, lower=True)),\n precision=lax.Precision.HIGHEST)\n return L, L_dot\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 131, "n_words": 54, "vocab_size": 43, "complexity": 1, "nloc": 11, "token_counts": 96, "n_ast_nodes": 225, "n_identifiers": 32, "random_cut": "def cholesky_jvp_rule(primals, tangents):\n x, = primals\n sigma_dot, = tangents\n L = jnp.tril(cholesky_p.bind(x))\n\n # Forward-mode rule from https://arxiv." }, { "id": 125932, "commit_id": "c7ae787cc8468d8087aa3483b472f40624bd4a22", "repo": "ray", "path": "rllib/algorithms/tests/test_worker_failures.py", "file_name": "test_worker_failures.py", "fun_name": "test_fatal", "commit_message": "[RLlib] Beef up worker failure test. (#26953)", "code": "def test_fatal(self):\n # Test the case where all workers fail (w/o recovery).\n self._do_test_fault_fatal(\"PG\", {\"optimizer\": {}})\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 34, "n_identifiers": 3, "random_cut": "def test_fatal(self):\n # Test the case where " }, { "id": 216481, "commit_id": "c78f1ee4f49df35ab04e921a45de0878716d8bf5", "repo": "salt", "path": "salt/client/mixins.py", "file_name": "mixins.py", "fun_name": "_proc_function_remote", "commit_message": "Implement ``__getstate__`` and ``__setstate__`` instead of using ``classmethod``\n\nSigned-off-by: Pedro Algarvio ", "code": "def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True):\n \n if daemonize and not salt.utils.platform.is_windows():\n # Shutdown the multiprocessing before daemonizing\n salt.log.setup.shutdown_multiprocessing_logging()\n\n salt.utils.process.daemonize()\n\n # Reconfigure multiprocessing logging after daemonizing\n salt.log.setup.setup_multiprocessing_logging()\n\n # pack a few things into low\n low[\"__jid__\"] = jid\n low[\"__user__\"] = user\n low[\"__tag__\"] = tag\n\n try:\n return self.cmd_sync(low)\n except salt.exceptions.EauthAuthenticationError as exc:\n log.error(exc)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 186, "n_words": 53, "vocab_size": 47, "complexity": 4, "nloc": 12, "token_counts": 105, "n_ast_nodes": 175, "n_identifiers": 22, "random_cut": "def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True):\n \n if daemonize and not salt.utils.platform.is_windows():\n " }, { "id": 74364, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_page_model.py", "file_name": "test_page_model.py", "fun_name": "test_unpublish_also_unpublishes_aliases", "commit_message": "Reformat with black", "code": "def test_unpublish_also_unpublishes_aliases(self):\n event_page = EventPage.objects.get(url_path=\"/home/events/christmas/\")\n alias = event_page.create_alias(update_slug=\"new-event-page\")\n alias_alias = alias.create_alias(update_slug=\"new-event-page-2\")\n\n self.assertTrue(event_page.live)\n self.assertTrue(alias.live)\n self.assertTrue(alias_alias.live)\n\n PageLogEntry.objects.all().delete()\n\n # Unpublish the event page\n event_page.unpublish()\n\n alias.refresh_from_db()\n alias_alias.refresh_from_db()\n self.assertFalse(event_page.live)\n self.assertFalse(alias.live)\n self.assertFalse(alias_alias.live)\n\n # Check log entries were created for the aliases\n self.assertTrue(\n PageLogEntry.objects.filter(page=alias, action=\"wagtail.unpublish\").exists()\n )\n self.assertTrue(\n PageLogEntry.objects.filter(\n page=alias_alias, action=\"wagtail.unpublish\"\n ).exists()\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 225, "n_words": 45, "vocab_size": 39, "complexity": 1, "nloc": 22, "token_counts": 157, "n_ast_nodes": 268, "n_identifiers": 23, "random_cut": "def test_unpublish_also_unpublishes_aliases(self):\n event_page = EventPage.objects.get(url_path=\"/home/events/christmas/\")\n alias = event_page.create_alias(update_slug=\"new-event-page\")\n alias_alias = alias.create_alias(update_slug=\"new-event-page-2\")\n\n self.assertTrue(event_page.live)\n self.assertTrue(alias.live)\n self.assertTrue(alias_alias." }, { "id": 34160, "commit_id": "22454ae492eca4bb749fa6d770dffc91d17dab87", "repo": "transformers", "path": "tests/test_tokenization_realm.py", "file_name": "test_tokenization_realm.py", "fun_name": "test_basic_tokenizer_no_lower", "commit_message": "Add REALM (#13292)\n\n* REALM initial commit\r\n\r\n* Retriever OK (Update new_gelu).\r\n\r\n* Encoder prediction score OK\r\n\r\n* Encoder pretrained model OK\r\n\r\n* Update retriever comments\r\n\r\n* Update docs, tests, and imports\r\n\r\n* Prune unused models\r\n\r\n* Make embedder as a module `RealmEmbedder`\r\n\r\n* Add RealmRetrieverOutput\r\n\r\n* Update tokenization\r\n\r\n* Pass all tests in test_modeling_realm.py\r\n\r\n* Prune RealmModel\r\n\r\n* Update docs\r\n\r\n* Add training test.\r\n\r\n* Remove completed TODO\r\n\r\n* Style & Quality\r\n\r\n* Prune `RealmModel`\r\n\r\n* Fixup\r\n\r\n* Changes:\r\n1. Remove RealmTokenizerFast\r\n2. Update docstrings\r\n3. Add a method to RealmTokenizer to handle candidates tokenization.\r\n\r\n* Fix up\r\n\r\n* Style\r\n\r\n* Add tokenization tests\r\n\r\n* Update `from_pretrained` tests\r\n\r\n* Apply suggestions\r\n\r\n* Style & Quality\r\n\r\n* Copy BERT model\r\n\r\n* Fix comment to avoid docstring copying\r\n\r\n* Make RealmBertModel private\r\n\r\n* Fix bug\r\n\r\n* Style\r\n\r\n* Basic QA\r\n\r\n* Save\r\n\r\n* Complete reader logits\r\n\r\n* Add searcher\r\n\r\n* Complete searcher & reader\r\n\r\n* Move block records init to constructor\r\n\r\n* Fix training bug\r\n\r\n* Add some outputs to RealmReader\r\n\r\n* Add finetuned checkpoint variable names parsing\r\n\r\n* Fix bug\r\n\r\n* Update REALM config\r\n\r\n* Add RealmForOpenQA\r\n\r\n* Update convert_tfrecord logits\r\n\r\n* Fix bugs\r\n\r\n* Complete imports\r\n\r\n* Update docs\r\n\r\n* Update naming\r\n\r\n* Add brute-force searcher\r\n\r\n* Pass realm model tests\r\n\r\n* Style\r\n\r\n* Exclude RealmReader from common tests\r\n\r\n* Fix\r\n\r\n* Fix\r\n\r\n* convert docs\r\n\r\n* up\r\n\r\n* up\r\n\r\n* more make style\r\n\r\n* up\r\n\r\n* upload\r\n\r\n* up\r\n\r\n* Fix\r\n\r\n* Update src/transformers/__init__.py\r\n\r\n* adapt testing\r\n\r\n* change modeling code\r\n\r\n* fix test\r\n\r\n* up\r\n\r\n* up\r\n\r\n* up\r\n\r\n* correct more\r\n\r\n* make retriever work\r\n\r\n* update\r\n\r\n* make style\r\n\r\n* finish main structure\r\n\r\n* Resolve merge conflict\r\n\r\n* Make everything work\r\n\r\n* Style\r\n\r\n* Fixup\r\n\r\n* Fixup\r\n\r\n* Update training test\r\n\r\n* fix retriever\r\n\r\n* remove hardcoded path\r\n\r\n* Fix\r\n\r\n* Fix modeling test\r\n\r\n* Update model links\r\n\r\n* Initial retrieval test\r\n\r\n* Fix modeling test\r\n\r\n* Complete retrieval tests\r\n\r\n* Fix\r\n\r\n* style\r\n\r\n* Fix tests\r\n\r\n* Fix docstring example\r\n\r\n* Minor fix of retrieval test\r\n\r\n* Update license headers and docs\r\n\r\n* Apply suggestions from code review\r\n\r\n* Style\r\n\r\n* Apply suggestions from code review\r\n\r\n* Add an example to RealmEmbedder\r\n\r\n* Fix\r\n\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def test_basic_tokenizer_no_lower(self):\n tokenizer = BasicTokenizer(do_lower_case=False)\n\n self.assertListEqual(\n tokenizer.tokenize(\" \\tHeLLo!how \\n Are yoU? \"), [\"HeLLo\", \"!\", \"how\", \"Are\", \"yoU\", \"?\"]\n )\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 52, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 5, "token_counts": 38, "n_ast_nodes": 71, "n_identifiers": 7, "random_cut": "def test_basic_tokenizer_no_lower(self):\n tokenizer = BasicTokenizer" }, { "id": 81336, "commit_id": "fd671ecc9d49845f1e0fa09ec178d1d6518061fe", "repo": "awx", "path": "awx/main/utils/update_model.py", "file_name": "update_model.py", "fun_name": "update_model", "commit_message": "Give specific messages if job was killed due to SIGTERM or SIGKILL (#12435)\n\n* Reap jobs on dispatcher startup to increase clarity, replace existing reaping logic\r\n\r\n* Exit jobs if receiving SIGTERM signal\r\n\r\n* Fix unwanted reaping on shutdown, let subprocess close out\r\n\r\n* Add some sanity tests for signal module\r\n\r\n* Add a log for an unhandled dispatcher error\r\n\r\n* Refine wording of error messages\r\n\r\nCo-authored-by: Elijah DeLee ", "code": "def update_model(model, pk, _attempt=0, _max_attempts=5, select_for_update=False, **updates):\n \n try:\n with transaction.atomic():\n # Retrieve the model instance.\n if select_for_update:\n instance = model.objects.select_for_update().get(pk=pk)\n else:\n instance = model.objects.get(pk=pk)\n\n # Update the appropriate fields and save the model\n # instance, then return the new instance.\n if updates:\n update_fields = ['modified']\n for field, value in updates.items():\n setattr(instance, field, value)\n update_fields.append(field)\n if field == 'status':\n update_fields.append('failed')\n instance.save(update_fields=update_fields)\n return instance\n except (DatabaseError, InterfaceError) as e:\n # Log out the error to the debug logger.\n logger.debug('Database error updating %s, retrying in 5 seconds (retry #%d): %s', model._meta.object_name, _attempt + 1, e)\n\n # Attempt to retry the update, assuming we haven't already\n # tried too many times.\n if _attempt < _max_attempts:\n for i in range(5):\n time.sleep(1)\n if signal_callback():\n raise RuntimeError(f'Could not fetch {pk} because of receiving abort signal')\n return update_model(model, pk, _attempt=_attempt + 1, _max_attempts=_max_attempts, **updates)\n else:\n logger.error('Failed to update %s after %d retries.', model._meta.object_name, _attempt)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 518, "n_words": 146, "vocab_size": 109, "complexity": 9, "nloc": 26, "token_counts": 212, "n_ast_nodes": 350, "n_identifiers": 33, "random_cut": "def update_model(model, pk, _attempt=0, _max_attempts=5, select_for_update=False, **updates):\n \n try:\n with transaction.atomic():\n # Retrieve the model instance.\n if select_for_update:\n instance = model.objects.select_for_update().get(pk=pk)\n else:\n instance = model.objects.get(pk=pk)\n\n # Update the appropriate fields and save the model\n # instance, then return the new instance.\n if updates:\n update_fields = ['modified']\n for field, value in updates.items():\n " }, { "id": 32423, "commit_id": "8e8384663d716d4b5a4f510070ff954fc0ba4a52", "repo": "transformers", "path": "src/transformers/models/convnext/modeling_tf_convnext.py", "file_name": "modeling_tf_convnext.py", "fun_name": "serving", "commit_message": "Update serving code to enable `saved_model=True` (#18153)\n\n* Add serving_output and serving methods to some vision models\r\n\r\n* Add serving outputs for DeiT\r\n\r\n* Don't convert hidden states - differing shapes\r\n\r\n* Make saveable\r\n\r\n* Fix up\r\n\r\n* Make swin saveable\r\n\r\n* Add in tests\r\n\r\n* Fix funnel tests (can't convert to tensor)\r\n\r\n* Fix numpy call\r\n\r\n* Tidy up a bit\r\n\r\n* Add in hidden states - resnet\r\n\r\n* Remove numpy\r\n\r\n* Fix failing tests - tensor shape and skipping tests\r\n\r\n* Remove duplicated function\r\n\r\n* PR comments - formatting and var names\r\n\r\n* PR comments\r\nAdd suggestions made by Joao Gante:\r\n* Use tf.shape instead of shape_list\r\n* Use @tooslow decorator on tests\r\n* Simplify some of the logic\r\n\r\n* PR comments\r\nAddress Yih-Dar Sheih comments - making tensor names consistent and make types float\r\n\r\n* Types consistent with docs; disable test on swin (slow)\r\n\r\n* CI trigger\r\n\r\n* Change input_features to float32\r\n\r\n* Add serving_output for segformer\r\n\r\n* Fixup\r\n\r\nCo-authored-by: Amy Roberts ", "code": "def serving(self, inputs):\n \n output = self.call(inputs)\n return self.serving_output(output)\n\n\nCONVNEXT_START_DOCSTRING = r\n\nCONVNEXT_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare ConvNext model outputting raw features without any specific head on top.\",\n CONVNEXT_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"The bare ConvNext model outputting raw features without any specific head on top.\",\n CONVNEXT_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 53, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 67, "n_identifiers": 9, "random_cut": "def serving(self, inputs):\n \n output = self.call(inputs)\n return self.serving_output(output)\n\n\nCONVNEXT_START_DOCS" }, { "id": 104780, "commit_id": "126e2ec73b8a3808a1bdf0db4d961627353145ba", "repo": "datasets", "path": "tests/test_load.py", "file_name": "test_load.py", "fun_name": "test_HubDatasetModuleFactoryWithoutScript_with_data_dir", "commit_message": "Fix splits in local packaged modules, local datasets without script and hub datasets without script (#4144)\n\n* infer patterns locally if data_dir is provided\r\n\r\n* pass data_dir as base path to resolve patterns from\r\n\r\n* align LocalDatasetModuleFactoryWithoutScript and HubDatasetModuleFactoryWithScript with new data_dir logic\r\n\r\n* modify test for packaged dataset to check for test split too, add tests for hub dataset and local dataset without script with data_dir\r\n\r\n* use data_dir in lLocalDataset as a relative path\r\n\r\n* modify tests for Local and Hub datasets (with relative data dirs)\r\n\r\n* add tests for data files with base_path\r\n\r\n* fix number of files in data files according to new tmp dir structure", "code": "def test_HubDatasetModuleFactoryWithoutScript_with_data_dir(self):\n data_dir = \"data2\"\n factory = HubDatasetModuleFactoryWithoutScript(\n SAMPLE_DATASET_IDENTIFIER3, data_dir=data_dir, download_config=self.download_config\n )\n module_factory_result = factory.get_module()\n assert importlib.import_module(module_factory_result.module_path) is not None\n assert module_factory_result.builder_kwargs[\"base_path\"].startswith(config.HF_ENDPOINT)\n assert (\n module_factory_result.builder_kwargs[\"data_files\"] is not None\n and len(module_factory_result.builder_kwargs[\"data_files\"][\"train\"]) == 1\n and len(module_factory_result.builder_kwargs[\"data_files\"][\"test\"]) == 1\n )\n assert all(\n data_dir in Path(data_file).parts\n for data_file in module_factory_result.builder_kwargs[\"data_files\"][\"train\"]\n + module_factory_result.builder_kwargs[\"data_files\"][\"test\"]\n )\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 195, "n_words": 49, "vocab_size": 34, "complexity": 4, "nloc": 18, "token_counts": 133, "n_ast_nodes": 220, "n_identifiers": 21, "random_cut": "def test_HubDatasetModuleFactoryWithoutScript_with_data_dir(self):\n data_dir = \"data2\"\n factory = HubDatasetModuleFactoryWithoutScript(\n SAMPLE_DATASET_IDENTIFIER3, data_dir=data_dir, download_config=self.download_config\n )\n module_factory_result = factory.get_module()\n assert importlib.import_module(module_factory_result.module_path) is not None\n assert module_factory_result.builder_kwargs[\"base_path\"].startswith(config.HF_ENDPOINT)\n assert (\n module_factory_result.builder_kwargs[\"data_files\"] is not None\n and len(module_factory_result.builder_kwargs[\"data_files\"][\"train\"]) == 1\n and len(module_factory_result.builder_kwargs[\"data_files\"][\"test\"]) == 1\n )\n assert all(\n data_dir in Path(data_file).parts\n for data_file in module_factor" }, { "id": 116348, "commit_id": "c1f75cd0334996baa3fb7c3029de652d5656ac65", "repo": "mindsdb", "path": "mindsdb/integrations/utilities/utils.py", "file_name": "utils.py", "fun_name": "get_model_name", "commit_message": "ml handler query", "code": "def get_model_name(handler, stmt):\n \n side = None\n models = handler.get_tables() # .data_frame['model_name'].values\n if type(stmt.from_table) == Join:\n model_name = stmt.from_table.right.parts[-1]\n side = 'right'\n if model_name not in models:\n model_name = stmt.from_table.left.parts[-1]\n side = 'left'\n alias = str(getattr(stmt.from_table, side).alias)\n else:\n model_name = stmt.from_table.parts[-1]\n alias = None # todo: fix this\n\n if model_name not in models:\n raise Exception(\"Error, not found. Please create this predictor first.\")\n\n return model_name, alias, side", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 158, "n_words": 65, "vocab_size": 40, "complexity": 4, "nloc": 16, "token_counts": 114, "n_ast_nodes": 189, "n_identifiers": 17, "random_cut": "def get_model_name(handler, stmt):\n \n side = None\n models = handler.get_tables() # .data_frame['model_name'].values\n if type(stmt.from_table) == Join:\n model_name = stmt.from_table.right.parts[-1]\n side = 'right'\n if model_name not in models:\n model_name = stmt.from_table.left.parts[-1]\n side = 'left'\n alias = str(getattr(stmt.from_table, side).alias)\n else:\n model_name = stmt.from_tab" }, { "id": 9941, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/proto/jina_pb2_grpc.py", "file_name": "jina_pb2_grpc.py", "fun_name": "process_control", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def process_control(self, request, context):\n \n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 10, "random_cut": "def process_control(self, request, context):\n \n context.set_code(grpc." }, { "id": 243528, "commit_id": "cc45886bc3821b60ec911f90dfd77381f7a25f1b", "repo": "Pillow", "path": "src/PIL/BmpImagePlugin.py", "file_name": "BmpImagePlugin.py", "fun_name": "_bitmap", "commit_message": "Revert unintentional change", "code": "def _bitmap(self, header=0, offset=0):\n \n read, seek = self.fp.read, self.fp.seek\n if header:\n seek(header)\n # read bmp header size @offset 14 (this is part of the header size)\n file_info = {\"header_size\": i32(read(4)), \"direction\": -1}\n\n # -------------------- If requested, read header at a specific position\n # read the rest of the bmp header, without its size\n header_data = ImageFile._safe_read(self.fp, file_info[\"header_size\"] - 4)\n\n # -------------------------------------------------- IBM OS/2 Bitmap v1\n # ----- This format has different offsets because of width/height types\n if file_info[\"header_size\"] == 12:\n file_info[\"width\"] = i16(header_data, 0)\n file_info[\"height\"] = i16(header_data, 2)\n file_info[\"planes\"] = i16(header_data, 4)\n file_info[\"bits\"] = i16(header_data, 6)\n file_info[\"compression\"] = self.RAW\n file_info[\"palette_padding\"] = 3\n\n # --------------------------------------------- Windows Bitmap v2 to v5\n # v3, OS/2 v2, v4, v5\n elif file_info[\"header_size\"] in (40, 64, 108, 124):\n file_info[\"y_flip\"] = header_data[7] == 0xFF\n file_info[\"direction\"] = 1 if file_info[\"y_flip\"] else -1\n file_info[\"width\"] = i32(header_data, 0)\n file_info[\"height\"] = (\n i32(header_data, 4)\n if not file_info[\"y_flip\"]\n else 2**32 - i32(header_data, 4)\n )\n file_info[\"planes\"] = i16(header_data, 8)\n file_info[\"bits\"] = i16(header_data, 10)\n file_info[\"compression\"] = i32(header_data, 12)\n # byte size of pixel data\n file_info[\"data_size\"] = i32(header_data, 16)\n file_info[\"pixels_per_meter\"] = (\n i32(header_data, 20),\n i32(header_data, 24),\n )\n file_info[\"colors\"] = i32(header_data, 28)\n file_info[\"palette_padding\"] = 4\n self.info[\"dpi\"] = tuple(x / 39.3701 for x in file_info[\"pixels_per_meter\"])\n if file_info[\"compression\"] == self.BITFIELDS:\n if len(header_data) >= 52:\n for idx, mask in enumerate(\n [\"r_mask\", \"g_mask\", \"b_mask\", \"a_mask\"]\n ):\n file_info[mask] = i32(header_data, 36 + idx * 4)\n else:\n # 40 byte headers only have the three components in the\n # bitfields masks, ref:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx\n # See also\n # https://github.com/python-pillow/Pillow/issues/1293\n # There is a 4th component in the RGBQuad, in the alpha\n # location, but it is listed as a reserved component,\n # and it is not generally an alpha channel\n file_info[\"a_mask\"] = 0x0\n for mask in [\"r_mask\", \"g_mask\", \"b_mask\"]:\n file_info[mask] = i32(read(4))\n file_info[\"rgb_mask\"] = (\n file_info[\"r_mask\"],\n file_info[\"g_mask\"],\n file_info[\"b_mask\"],\n )\n file_info[\"rgba_mask\"] = (\n file_info[\"r_mask\"],\n file_info[\"g_mask\"],\n file_info[\"b_mask\"],\n file_info[\"a_mask\"],\n )\n else:\n raise OSError(f\"Unsupported BMP header type ({file_info['header_size']})\")\n\n # ------------------ Special case : header is reported 40, which\n # ---------------------- is shorter than real size for bpp >= 16\n self._size = file_info[\"width\"], file_info[\"height\"]\n\n # ------- If color count was not found in the header, compute from bits\n file_info[\"colors\"] = (\n file_info[\"colors\"]\n if file_info.get(\"colors\", 0)\n else (1 << file_info[\"bits\"])\n )\n if offset == 14 + file_info[\"header_size\"] and file_info[\"bits\"] <= 8:\n offset += 4 * file_info[\"colors\"]\n\n # ---------------------- Check bit depth for unusual unsupported values\n self.mode, raw_mode = BIT2MODE.get(file_info[\"bits\"], (None, None))\n if self.mode is None:\n raise OSError(f\"Unsupported BMP pixel depth ({file_info['bits']})\")\n\n # ---------------- Process BMP with Bitfields compression (not palette)\n decoder_name = \"raw\"\n if file_info[\"compression\"] == self.BITFIELDS:\n SUPPORTED = {\n 32: [\n (0xFF0000, 0xFF00, 0xFF, 0x0),\n (0xFF000000, 0xFF0000, 0xFF00, 0x0),\n (0xFF000000, 0xFF0000, 0xFF00, 0xFF),\n (0xFF, 0xFF00, 0xFF0000, 0xFF000000),\n (0xFF0000, 0xFF00, 0xFF, 0xFF000000),\n (0x0, 0x0, 0x0, 0x0),\n ],\n 24: [(0xFF0000, 0xFF00, 0xFF)],\n 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],\n }\n MASK_MODES = {\n (32, (0xFF0000, 0xFF00, 0xFF, 0x0)): \"BGRX\",\n (32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): \"XBGR\",\n (32, (0xFF000000, 0xFF0000, 0xFF00, 0xFF)): \"ABGR\",\n (32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): \"RGBA\",\n (32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): \"BGRA\",\n (32, (0x0, 0x0, 0x0, 0x0)): \"BGRA\",\n (24, (0xFF0000, 0xFF00, 0xFF)): \"BGR\",\n (16, (0xF800, 0x7E0, 0x1F)): \"BGR;16\",\n (16, (0x7C00, 0x3E0, 0x1F)): \"BGR;15\",\n }\n if file_info[\"bits\"] in SUPPORTED:\n if (\n file_info[\"bits\"] == 32\n and file_info[\"rgba_mask\"] in SUPPORTED[file_info[\"bits\"]]\n ):\n raw_mode = MASK_MODES[(file_info[\"bits\"], file_info[\"rgba_mask\"])]\n self.mode = \"RGBA\" if \"A\" in raw_mode else self.mode\n elif (\n file_info[\"bits\"] in (24, 16)\n and file_info[\"rgb_mask\"] in SUPPORTED[file_info[\"bits\"]]\n ):\n raw_mode = MASK_MODES[(file_info[\"bits\"], file_info[\"rgb_mask\"])]\n else:\n raise OSError(\"Unsupported BMP bitfields layout\")\n else:\n raise OSError(\"Unsupported BMP bitfields layout\")\n elif file_info[\"compression\"] == self.RAW:\n if file_info[\"bits\"] == 32 and header == 22: # 32-bit .cur offset\n raw_mode, self.mode = \"BGRA\", \"RGBA\"\n elif file_info[\"compression\"] == self.RLE8:\n decoder_name = \"bmp_rle8\"\n elif file_info[\"compression\"] == self.RLE4:\n decoder_name = \"bmp_rle4\"\n else:\n raise OSError(f\"Unsupported BMP compression ({file_info['compression']})\")\n\n # --------------- Once the header is processed, process the palette/LUT\n if self.mode == \"P\": # Paletted for 1, 4 and 8 bit images\n\n # ---------------------------------------------------- 1-bit images\n if not (0 < file_info[\"colors\"] <= 65536):\n raise OSError(f\"Unsupported BMP Palette size ({file_info['colors']})\")\n else:\n padding = file_info[\"palette_padding\"]\n palette = read(padding * file_info[\"colors\"])\n greyscale = True\n indices = (\n (0, 255)\n if file_info[\"colors\"] == 2\n else list(range(file_info[\"colors\"]))\n )\n\n # ----------------- Check if greyscale and ignore palette if so\n for ind, val in enumerate(indices):\n rgb = palette[ind * padding : ind * padding + 3]\n if rgb != o8(val) * 3:\n greyscale = False\n\n # ------- If all colors are grey, white or black, ditch palette\n if greyscale:\n self.mode = \"1\" if file_info[\"colors\"] == 2 else \"L\"\n raw_mode = self.mode\n else:\n self.mode = \"P\"\n self.palette = ImagePalette.raw(\n \"BGRX\" if padding == 4 else \"BGR\", palette\n )\n\n # ---------------------------- Finally set the tile data for the plugin\n self.info[\"compression\"] = file_info[\"compression\"]\n self.tile = [\n (\n decoder_name,\n (0, 0, file_info[\"width\"], file_info[\"height\"]),\n offset or self.fp.tell(),\n (\n raw_mode,\n ((file_info[\"width\"] * file_info[\"bits\"] + 31) >> 3) & (~3),\n file_info[\"direction\"],\n ),\n )\n ]\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 3238, "n_words": 792, "vocab_size": 378, "complexity": 36, "nloc": 154, "token_counts": 1242, "n_ast_nodes": 2033, "n_identifiers": 47, "random_cut": "def _bitmap(self, header=0, offset=0):\n \n read, seek = self.fp.read, self.fp.seek\n if header:\n seek(header)\n # read bmp header size @offset 14 (this is part of the header size)\n file_info = {\"header_size\": i32(read(4)), \"direction\": -1}\n\n # -------------------- If requested, read header at a specific position\n # read the rest of the bmp header, without its size\n header_data = ImageFile._safe_read(self.fp, file_info[\"header_size\"] - 4)\n\n # -------------------------------------------------- IBM OS/2 Bitmap v1\n # ----- This format has different offsets because of width/height types\n if file_info[\"header_size\"] == 12:\n file_info[\"width\"] = i16(header_data, 0)\n file_info[\"height\"] = i16(header_data, 2)\n file_info[\"planes\"] = i16(header_data, 4)\n file_info[\"bits\"] = i16(header_data, 6)\n file_info[\"compression\"] = self.RAW\n file_info[\"palette_padding\"] = 3\n\n # --------------------------------------------- Windows Bitmap v2 to v5\n # v3, OS/2 v2, v4, v5\n elif file_info[\"header_size\"] in (40, 64, 108, 124):\n file_info[\"y_flip\"] = header_data[7] == 0xFF\n file_info[\"direction\"] = 1 if file_info[\"y_flip\"] else -1\n file_info[\"width\"] = i32(header_data, 0)\n file_info[\"height\"] = (\n i32(header_data, 4)\n if not file_info[\"y_flip\"]\n else 2**32 - i32(header_data, 4)\n )\n file_info[\"planes\"] = i16(header_data, 8)\n file_info[\"bits\"] = i16(header_data, 10)\n file_info[\"compression\"] = i32(header_data, 12)\n # byte size of pixel data\n file_info[\"data_size\"] = i32(header_data, 16)\n file_info[\"pixels_per_meter\"] = (\n i32(header_data, 20),\n i32(header_data, 24),\n )\n file_info[\"colors\"] = i32(header_data, 28)\n file_info[\"palette_padding\"] = 4\n self.info[\"dpi\"] = tuple(x / 39.3701 for x in file_info[\"pixels_per_meter\"])\n if file_info[\"compression\"] == self.BITFIELDS:\n if len(header_data) >= 52:\n for idx, mask in enumerate(\n [\"r_mask\", \"g_mask\", \"b_mask\", \"a_mask\"]\n ):\n file_info[mask] = i32(header_data, 36 + idx * 4)\n else:\n # 40 byte headers only have the three components in the\n # bitfields masks, ref:\n # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx\n # See also\n # https://github.com/python-pillow/Pillow/issues/1293\n # There is a 4th component in the RGBQuad, in the alpha\n # location, but it is listed as a reserved component,\n # and it is not generally an alpha channel\n file_info[\"a_mask\"] = 0x0\n for mask in [\"r_mask\", \"g_mask\", \"b_mask\"]:\n file_info[mask] = i32(read(4))\n file_info[\"rgb_mask\"] = (\n file_info[\"r_mask\"],\n file_info[\"g_mask\"],\n file_info[\"b_mask\"],\n )\n file_info[\"rgba_mask\"] = (\n file_info[\"r_mask\"],\n file_info[\"g_mask\"],\n file_info[\"b_mask\"],\n file_info[\"a_mask\"],\n )\n else:\n raise OSError(f\"Unsupported BMP header type ({file_info['header_size']})\")\n\n # ------------------ Special case : header is reported 40, which\n # ---------------------- is shorter than real size for bpp >= 16\n self._size = file_info[\"width\"], file_info[\"height\"]\n\n # ------- If color count was not found in the header, compute from bits\n file_info[\"colors\"] = (\n file_info[\"colors\"]\n if file_info.get(\"colors\", 0)\n else (1 << file_info[\"bits\"])\n )\n if offset == 14 + file_info[\"header_size\"] and file_info[\"bits\"] <= 8:\n offset += 4 * file_info[\"colors\"]\n\n # ---------------------- Check bit depth for unusual unsupported values\n self.mode, raw_mode = BIT2MODE.get(file_info[\"bits\"], (None, None))\n if self.mode is None:\n raise OSError(f\"Unsupported BMP pixel depth ({file_info['bits']})\")\n\n # ---------------- Process BMP with Bitfields compression (not palette)\n decoder_name = \"raw\"\n if file_info[\"compression\"] == self.BITFIELDS:\n SUPPORTED = {\n 32: [\n (0xFF0000, 0xFF00, 0xFF, 0x0),\n (0xFF000000, 0xFF0000, 0xFF00, 0x0),\n (0xFF000000, 0xFF0000, 0xFF00, 0xFF),\n (0xFF, 0xFF00, 0xFF0000, 0xFF000000),\n (0xFF0000, 0xFF00, 0xFF, 0xFF000000),\n (0x0, 0x0, 0x0, 0x0),\n ],\n 24: [(0xFF0000, 0xFF00, 0xFF)],\n 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],\n }\n MA" }, { "id": 63525, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/requests/cookies.py", "file_name": "cookies.py", "fun_name": "cookiejar_from_dict", "commit_message": "upd; format", "code": "def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):\n \n if cookiejar is None:\n cookiejar = RequestsCookieJar()\n\n if cookie_dict is not None:\n names_from_jar = [cookie.name for cookie in cookiejar]\n for name in cookie_dict:\n if overwrite or (name not in names_from_jar):\n cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))\n\n return cookiejar\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 97, "n_words": 38, "vocab_size": 27, "complexity": 7, "nloc": 9, "token_counts": 72, "n_ast_nodes": 111, "n_identifiers": 10, "random_cut": "def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):\n \n if cookiejar is None:\n cookiejar = RequestsCookieJar()\n\n if cookie_dict is not None:\n names_from_jar = [cookie.name for cookie in cookiejar]\n for name in cookie_dict:\n if overwrite or (name not in names_from_jar):\n cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))\n\n return cookiejar\n\n" }, { "id": 212261, "commit_id": "b570c028db1ef739f86edeab1affd1f73467aace", "repo": "bokeh", "path": "tests/unit/bokeh/core/property/test_singletons.py", "file_name": "test_singletons.py", "fun_name": "test_Undefined", "commit_message": "Fix Comparing singleton primitives with equality checking (#12100)\n\n* Fix issue singleton-equality-checking found at https://codereview.doctor\r\n\r\n* Fix awkward C-esque usage\r\n\r\n* Fix awkward C-esque test (pt 2)\r\n\r\n* Fix awkward C-esque test (pt 3)\r\n\r\nCo-authored-by: Bryan Van de Ven ", "code": "def test_Undefined() -> None:\n assert (bcpu.Undefined == bcpu.Undefined) is True\n assert (bcpu.Undefined != bcpu.Undefined) is False\n assert (bcpu.Undefined is bcpu.Undefined) is True\n assert (bcpu.Undefined is not bcpu.Undefined) is False\n assert (copy(bcpu.Undefined) is bcpu.Undefined) is True\n assert (copy(bcpu.Undefined) is not bcpu.Undefined) is False\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 42, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 86, "n_ast_nodes": 131, "n_identifiers": 4, "random_cut": "def test_Undefined() -> None:\n assert (bcpu.Undefined == bcpu.Undefined) is True\n assert (bcpu.Undefined != bcpu.Undefined) is False\n assert (bcpu.Undefined is bcpu.Undefined) is True\n assert (bcpu.Undefined is not bcpu.Undefined) is False\n assert (copy(bcpu.Undefined) is bcpu.Undefined) is True\n assert (copy(bcpu.Undefined) is not bcpu.Undefined) is False\n" }, { "id": 54735, "commit_id": "78824a8b694fdd68082948f22a72883d44fb90a0", "repo": "prefect", "path": "tests/orion/models/test_flow_runs.py", "file_name": "test_flow_runs.py", "fun_name": "test_read_flow_runs_with_only_one_column", "commit_message": "Add name to flow run sorts", "code": "async def test_read_flow_runs_with_only_one_column(self, flow_runs, db, session):\n # clear the session to erase cached versions of these flow runs and\n # force all data to be reloaded\n session.expunge_all()\n\n result = await models.flow_runs.read_flow_runs(\n session=session, columns=[db.FlowRun.id]\n )\n\n assert {r.id for r in result} == {fr.id for fr in flow_runs}\n\n # name and state_type were not loaded and raise an error\n # because the async session is closed\n for r in result:\n with pytest.raises(sa.exc.MissingGreenlet):\n r.name\n with pytest.raises(sa.exc.MissingGreenlet):\n r.state_type\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 199, "n_words": 74, "vocab_size": 58, "complexity": 4, "nloc": 11, "token_counts": 94, "n_ast_nodes": 151, "n_identifiers": 21, "random_cut": "async def test_read_flow_runs_with_only_one_column(self, flow_runs, db, session):\n # clear the session to erase cached versions of these flow runs and\n # force all data to be reloaded\n session.expunge_all()\n\n result = await models.flow_runs.read_flow_runs(\n session=session, columns=[db.FlowRun.id]\n )\n\n assert {r.id for r in result} == {fr.id for fr in flow_runs}\n\n # name and state_type were not loaded and raise an error\n # because the async session is closed\n for r in result:\n with pytest.raises(sa.exc.MissingGreenlet):\n r.name\n with pytest.raises(sa.exc.MissingGreenlet):\n r.state_type\n" }, { "id": 33906, "commit_id": "2e9af294940083915ccb2740a7c8d5b154194f15", "repo": "transformers", "path": "tests/test_modeling_tf_clip.py", "file_name": "test_modeling_tf_clip.py", "fun_name": "test_inference", "commit_message": "[CLIP] Fix TF test (#15042)", "code": "def test_inference(self):\n model_name = \"openai/clip-vit-base-patch32\"\n model = TFCLIPModel.from_pretrained(model_name)\n processor = CLIPProcessor.from_pretrained(model_name)\n\n image = prepare_img()\n inputs = processor(\n text=[\"a photo of a cat\", \"a photo of a dog\"], images=image, padding=True, return_tensors=\"tf\"\n )\n\n outputs = model(**inputs, training=False)\n\n # verify the logits\n self.assertEqual(\n outputs.logits_per_image.shape,\n tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),\n )\n self.assertEqual(\n outputs.logits_per_text.shape,\n tf.TensorShape((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),\n )\n\n expected_logits = tf.constant([[24.5701, 19.3049]])\n\n tf.debugging.assert_near(outputs.logits_per_image, expected_logits, atol=1e-3)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 208, "n_words": 56, "vocab_size": 44, "complexity": 1, "nloc": 19, "token_counts": 172, "n_ast_nodes": 256, "n_identifiers": 30, "random_cut": "def test_inference(self):\n model_name = \"openai/clip-vit-base-patch32\"\n model = TFCLIPModel.from_pretrained(model_name)\n processor = CLIPProcessor.from_pretrained(model_name)\n\n image = prepare_img()\n inputs = processor(\n text=[\"a photo of a cat\", \"a photo of a dog\"], images=image, padding=True, return_tensors=\"tf\"\n )\n\n outputs = model(**inputs, training=False)\n\n # verify the logits\n self.assertEqual(\n outputs.logits_per_image.shape,\n tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),\n )\n self.assertEqual(\n outputs.logits_per_text.shape,\n tf.TensorShape((inputs" }, { "id": 243262, "commit_id": "3353ea80e1c873acdb11636cf3d387b8e59580c4", "repo": "Pillow", "path": "Tests/test_image_resize.py", "file_name": "test_image_resize.py", "fun_name": "test_reducing_gap_3", "commit_message": "Further parametrizations", "code": "def test_reducing_gap_3(self, gradients_image, box, epsilon):\n ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)\n im = gradients_image.resize(\n (52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=3.0\n )\n\n with pytest.raises(AssertionError):\n assert_image_equal(ref, im)\n\n assert_image_similar(ref, im, epsilon)\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 83, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 83, "n_ast_nodes": 117, "n_identifiers": 17, "random_cut": "def test_reducing_gap_3(self, gradients_image, box, epsilon):\n ref = g" }, { "id": 72912, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/api/v2/tests/tests.py", "file_name": "tests.py", "fun_name": "test_valid_star_with_additional_field", "commit_message": "Reformat with black", "code": "def test_valid_star_with_additional_field(self):\n # Note: '*,test' is not allowed but '*,test(foo)' is\n parsed = parse_fields_parameter(\"*,test(foo)\")\n\n self.assertEqual(\n parsed,\n [\n (\"*\", False, None),\n (\n \"test\",\n False,\n [\n (\"foo\", False, None),\n ],\n ),\n ],\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 235, "n_words": 31, "vocab_size": 25, "complexity": 1, "nloc": 15, "token_counts": 47, "n_ast_nodes": 73, "n_identifiers": 5, "random_cut": "def test_valid_star_with_additional_field(self):\n # Note: '*,test' is not allowed but '*,test(foo)' is\n parsed = parse_fields_parameter(\"*,test(foo)\")\n\n self.assertEqual(\n parsed,\n [\n (\"*\", False, None),\n (\n \"test\",\n False,\n [\n (\"foo\", " }, { "id": 200032, "commit_id": "2ddc46704dffa81a5a2a8df4348bf98dff07ebd5", "repo": "sympy", "path": "sympy/physics/continuum_mechanics/truss.py", "file_name": "truss.py", "fun_name": "draw", "commit_message": "subs_dict added to the draw method", "code": "def draw(self, subs_dict=None):\n \n if not numpy:\n raise ImportError(\"To use this function numpy module is required\")\n\n x = Symbol('x')\n\n markers = []\n annotations = []\n rectangles = []\n\n node_markers = self._draw_nodes(subs_dict)\n markers += node_markers\n\n member_rectangles = self._draw_members()\n rectangles += member_rectangles\n\n support_markers = self._draw_supports()\n markers += support_markers\n\n load_annotations = self._draw_loads()\n annotations += load_annotations\n\n xmax = -INF\n xmin = INF\n ymax = -INF\n ymin = INF\n\n for node in list(self._node_coordinates):\n xmax = max(xmax, self._node_coordinates[node][0])\n xmin = min(xmin, self._node_coordinates[node][0])\n ymax = max(ymax, self._node_coordinates[node][1])\n ymin = min(ymin, self._node_coordinates[node][1])\n\n lim = max(xmax*1.1-xmin*0.8+1, ymax*1.1-ymin*0.8+1)\n\n if lim==xmax*1.1-xmin*0.8+1:\n sing_plot = plot(1, (x, 1, 1), markers=markers, show=False, annotations=annotations, xlim=(xmin-0.05*lim, xmax*1.1), ylim=(xmin-0.05*lim, xmax*1.1), axis=False, rectangles=rectangles)\n else:\n sing_plot = plot(1, (x, 1, 1), markers=markers, show=False, annotations=annotations, xlim=(ymin-0.05*lim, ymax*1.1), ylim=(ymin-0.05*lim, ymax*1.1), axis=False, rectangles=rectangles)\n\n return sing_plot\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 360, "n_words": 122, "vocab_size": 69, "complexity": 4, "nloc": 30, "token_counts": 359, "n_ast_nodes": 495, "n_identifiers": 35, "random_cut": "def draw(self, subs_dict=None):\n \n if not numpy:\n raise ImportError(\"To use this function numpy module is required\")\n\n x = Symbol('x')\n\n markers = []\n annotations = []\n rectangles = []\n\n node_markers = self._draw_nodes(subs_dict)\n markers += node_markers\n\n member_rectangles = self._draw_members()\n rectangles += member_rectangles\n\n support_markers = self._draw_supports()\n markers += support_markers\n\n load_annotations = self._draw_loads()\n annotations += load_annotations\n\n xmax = -INF\n xmin = INF\n ymax = -INF\n ymin = INF\n\n for node in list(self._node_coordinates):\n xmax = max(xmax, self._node_coordinates[node][0])\n xmin = min(xmin, self._node_coordinates[node][0])\n ymax = max(ymax, self._node_coordinates[node][1])\n ymin =" }, { "id": 261949, "commit_id": "c1119bc29159f48fd91d986984772b2bac9fc9cd", "repo": "TTS", "path": "TTS/tts/utils/text/phonemizers/base.py", "file_name": "base.py", "fun_name": "_phonemize_postprocess", "commit_message": "Implement BasePhonemizer", "code": "def _phonemize_postprocess(self, phonemized, punctuations) -> str:\n \n if self._keep_puncs:\n return self._punctuator.restore(phonemized, punctuations)[0]\n return phonemized[0]\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 8, "token_counts": 36, "n_ast_nodes": 55, "n_identifiers": 8, "random_cut": "def _phonemize_postprocess(self, phonemized, punctuations) -> str:\n \n if self._keep_puncs:\n return self._punctuator.restore(phonemized, punctuations)[0]\n return phonemized[0]\n" }, { "id": 44017, "commit_id": "717169987e76f332d7ce92bc361d2fff6966f6f0", "repo": "airflow", "path": "tests/www/views/test_views_acl.py", "file_name": "test_views_acl.py", "fun_name": "acl_app", "commit_message": "Remove code duplication in the test suite test_views_acl.py (#20887)", "code": "def acl_app(app):\n security_manager = app.appbuilder.sm\n for username, (role_name, kwargs) in USER_DATA.items():\n if not security_manager.find_user(username=username):\n role = security_manager.add_role(role_name)\n security_manager.add_user(\n role=role,\n username=username,\n **kwargs,\n )\n\n role_permissions = {\n 'dag_acl_tester': [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n (permissions.ACTION_CAN_EDIT, 'DAG:example_bash_operator'),\n (permissions.ACTION_CAN_READ, 'DAG:example_bash_operator'),\n ],\n 'all_dag_role': [\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n ],\n 'User': [\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n ],\n 'dag_acl_read_only': [\n (permissions.ACTION_CAN_READ, 'DAG:example_bash_operator'),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n ],\n 'dag_acl_faker': [(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)],\n }\n\n for _role, _permissions in role_permissions.items():\n role = security_manager.find_role(_role)\n for _action, _perm in _permissions:\n perm = security_manager.get_permission(_action, _perm)\n security_manager.add_permission_to_role(role, perm)\n\n yield app\n\n for username, _ in USER_DATA.items():\n user = security_manager.find_user(username=username)\n if user:\n security_manager.del_register_user(user)\n\n\n@pytest.fixture(scope=\"module\")", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@pytest.fixture(scope=\"module\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 458, "n_words": 98, "vocab_size": 59, "complexity": 7, "nloc": 43, "token_counts": 290, "n_ast_nodes": 449, "n_identifiers": 35, "random_cut": "def acl_app(app):\n security_manager = app.appbuilder.sm\n for username, (role_name, kwargs) in USER_DATA.items():\n if not security_manager.find_user(username=username):\n role = security_manager.add_role(role_name)\n security_manager.add_user(\n role=role,\n username=username,\n **kwargs,\n )\n\n role_permissions = {\n 'dag_acl_tester': [\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n (permissions.ACTION_CAN_EDIT, 'DAG:example_bash_operator'),\n (permissions.ACTION_CAN_READ, 'DAG:example_bash_operator'),\n ],\n 'all_dag_role': [\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n ],\n 'User': [\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n ],\n 'dag_acl_read_only': [\n (permissions.ACTION_CAN_READ, 'DAG:example_bash_operator'),\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),\n ],\n 'dag_acl_faker': [(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)],\n }\n\n for _role, _permissions in role_permissions.items():\n role = security_manager.find_role(_role)\n for _action, _perm in _permissions:\n perm = security_manager.get_" }, { "id": 68099, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/templates/pages/search_help.py", "file_name": "search_help.py", "fun_name": "prepare_api_results", "commit_message": "style: format code with black", "code": "def prepare_api_results(api, topics_data):\n\tif not topics_data:\n\t\ttopics_data = []\n\n\tresults = []\n\tfor topic in topics_data:\n\t\troute = api.base_url + \"/\" + (api.post_route + \"/\" if api.post_route else \"\")\n\t\tfor key in api.post_route_key_list.split(\",\"):\n\t\t\troute += str(topic[key])\n\n\t\tresults.append(\n\t\t\tfrappe._dict(\n\t\t\t\t{\n\t\t\t\t\t\"title\": topic[api.post_title_key],\n\t\t\t\t\t\"preview\": html2text(topic[api.post_description_key]),\n\t\t\t\t\t\"route\": route,\n\t\t\t\t}\n\t\t\t)\n\t\t)\n\treturn results[:5]\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 32, "n_words": 50, "vocab_size": 38, "complexity": 5, "nloc": 18, "token_counts": 109, "n_ast_nodes": 179, "n_identifiers": 18, "random_cut": "def prepare_api_results(api, topics_data):\n\tif not topics_data:\n\t\ttopics_data = []\n\n\tresults = []\n\tfor topic in topics_data:\n\t\troute = api.base_url + \"/\" + (api.post_route + \"/\" if api.post_route else \"\")\n\t\tfor key in api.post_route_key_list.split(\",\"):\n\t\t\troute += str(topic[key])\n\n\t\tresults.append(\n\t\t\tfrappe._dict(\n\t\t\t\t{\n\t\t\t\t\t\"title\": topic[api.post_title_key],\n\t\t\t\t\t\"preview\": html2text(topic[api.pos" }, { "id": 271298, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/functional_test.py", "file_name": "functional_test.py", "fun_name": "testBasicNetwork", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def testBasicNetwork(self):\n with tf.Graph().as_default():\n # minimum viable network\n x = input_layer_lib.Input(shape=(32,))\n dense = layers.Dense(2)\n y = dense(x)\n network = functional.Functional(x, y, name=\"dense_network\")\n\n # test basic attributes\n self.assertEqual(network.name, \"dense_network\")\n self.assertEqual(len(network.layers), 2) # InputLayer + Dense\n self.assertEqual(network.layers[1], dense)\n self._assertAllIs(network.weights, dense.weights)\n self._assertAllIs(\n network.trainable_weights, dense.trainable_weights\n )\n self._assertAllIs(\n network.non_trainable_weights, dense.non_trainable_weights\n )\n\n # test callability on Input\n x_2 = input_layer_lib.Input(shape=(32,))\n y_2 = network(x_2)\n self.assertEqual(y_2.shape.as_list(), [None, 2])\n\n # test callability on regular tensor\n x_2 = tf.compat.v1.placeholder(dtype=\"float32\", shape=(None, 32))\n y_2 = network(x_2)\n self.assertEqual(y_2.shape.as_list(), [None, 2])\n\n # test network `trainable` attribute\n network.trainable = False\n self._assertAllIs(network.weights, dense.weights)\n self.assertEqual(network.trainable_weights, [])\n self._assertAllIs(\n network.non_trainable_weights,\n dense.trainable_weights + dense.non_trainable_weights,\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 470, "n_words": 95, "vocab_size": 59, "complexity": 1, "nloc": 29, "token_counts": 257, "n_ast_nodes": 407, "n_identifiers": 31, "random_cut": "def testBasicNetwork(self):\n with tf.Graph().as_default():\n # minimum viable network\n x = input_layer_lib.Input(shape=(32,))\n dense = layers.Dense(2)\n y = dense(x)\n network = functional.Functional(x, y, name=\"dense_network\")\n\n # test basic attributes\n self.assertEqual(network.name, \"dense_network\")\n self.assertEqual(len(network.layers), 2) # InputLayer + Dense\n self.assertEqual(network.layers[1], dense)\n self._assertAllIs(network.weights, dense.weights)\n self._assertAllIs(\n network.trainable_weights, dense.trainable_weights\n )\n self._assertAllIs(\n network.non_trainable_weights, dense.non_trainable_weigh" }, { "id": 167498, "commit_id": "ed55bdf198590dd572f2e546c7b2afe7ae98ba74", "repo": "pandas", "path": "pandas/tests/reshape/test_from_dummies.py", "file_name": "test_from_dummies.py", "fun_name": "test_no_prefix_string_cats_basic_mixed_bool_values", "commit_message": "Initial draft: from_dummies (#41902)", "code": "def test_no_prefix_string_cats_basic_mixed_bool_values():\n dummies = DataFrame(\n {\"a\": [1, 0, 0, 1], \"b\": [False, True, False, False], \"c\": [0, 0, 1, 0]}\n )\n expected = DataFrame({\"\": [\"a\", \"b\", \"c\", \"a\"]})\n result = from_dummies(dummies)\n tm.assert_frame_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 33, "vocab_size": 29, "complexity": 1, "nloc": 7, "token_counts": 78, "n_ast_nodes": 124, "n_identifiers": 8, "random_cut": "def test_no_prefix_string_cats_basic_mixed_bool_values():\n " }, { "id": 248839, "commit_id": "96cf81e312407f0caba1b45ba9899906b1dcc098", "repo": "synapse", "path": "tests/rest/client/test_rooms.py", "file_name": "test_rooms.py", "fun_name": "test_any_room_override_defeats_config_override", "commit_message": "Use HTTPStatus constants in place of literals in tests. (#13297)", "code": "def test_any_room_override_defeats_config_override(self) -> None:\n # Given the server has config allowing normal users to post my event type\n # And I am a normal member of a room\n # But the room was created with special permissions\n extra_content: Dict[str, Any] = {\n \"power_level_content_override\": {\"events\": {}},\n }\n room_id = self.helper.create_room_as(\n \"@some_other_guy:red\", extra_content=extra_content\n )\n self.helper.join(room=room_id, user=self.user_id)\n\n # When I send a state event\n path = \"/rooms/{room_id}/state/custom.event/my_state_key\".format(\n room_id=urlparse.quote(room_id),\n )\n channel = self.make_request(\"PUT\", path, \"{}\")\n\n # Then I am not allowed\n self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result[\"body\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 210, "n_words": 80, "vocab_size": 63, "complexity": 1, "nloc": 13, "token_counts": 106, "n_ast_nodes": 177, "n_identifiers": 25, "random_cut": "def test_any_room_override_defeats_config_override(self) -> None:\n # Given the server has config allowing normal users to post my event type\n # And I am a normal member of a room\n # But the room was created with special permissions\n extra_content: Dict[str, Any] = {\n \"power_level_content_override\": {\"events\": {}},\n }\n room_id = self.helper" }, { "id": 63691, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/resolvelib/resolvers.py", "file_name": "resolvers.py", "fun_name": "_backtrack", "commit_message": "upd; format", "code": "def _backtrack(self):\n \n while len(self._states) >= 3:\n # Remove the state that triggered backtracking.\n del self._states[-1]\n\n # Retrieve the last candidate pin and known incompatibilities.\n broken_state = self._states.pop()\n name, candidate = broken_state.mapping.popitem()\n incompatibilities_from_broken = [\n (k, list(v.incompatibilities))\n for k, v in broken_state.criteria.items()\n ]\n\n # Also mark the newly known incompatibility.\n incompatibilities_from_broken.append((name, [candidate]))\n\n self._r.backtracking(candidate=candidate)\n\n # Create a new state from the last known-to-work one, and apply\n # the previously gathered incompatibility information.", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 246, "n_words": 70, "vocab_size": 55, "complexity": 4, "nloc": 17, "token_counts": 111, "n_ast_nodes": 152, "n_identifiers": 20, "random_cut": "def _backtrack(self):\n \n while len(self._states) >= 3:\n # Remove the state that triggered backtracking.\n del self._states[-1]\n\n # Retrieve the last candidate pin and known incompatibilities.\n broken_state = self._states.pop()\n name, candidate = broken_state.mapping.popitem()\n incompatibilities_from_broken = [\n (k, list(v.incompatibilities))\n " }, { "id": 268627, "commit_id": "da3a7618baa500899d11bb9a80863fdb1f80e3f1", "repo": "ansible", "path": "lib/ansible/cli/galaxy.py", "file_name": "galaxy.py", "fun_name": "execute_search", "commit_message": "give a warning when no roles match the search instead of returning rc 1 (#79195)\n\n* give a warning when no roles match the search instead of returning rc 1\r\n\r\n* porting guide note\r\n\r\n* Update docs/docsite/rst/porting_guides/porting_guide_core_2.15.rst\r\n\r\nCo-authored-by: Sviatoslav Sydorenko ", "code": "def execute_search(self):\n \n page_size = 1000\n search = None\n\n if context.CLIARGS['args']:\n search = '+'.join(context.CLIARGS['args'])\n\n if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:\n raise AnsibleError(\"Invalid query. At least one search term, platform, galaxy tag or author must be provided.\")\n\n response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],\n tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)\n\n if response['count'] == 0:\n display.warning(\"No roles match your search.\")\n return 0\n\n data = [u'']\n\n if response['count'] > page_size:\n data.append(u\"Found %d roles matching your search. Showing first %s.\" % (response['count'], page_size))\n else:\n data.append(u\"Found %d roles matching your search:\" % response['count'])\n\n max_len = []\n for role in response['results']:\n max_len.append(len(role['username'] + '.' + role['name']))\n name_len = max(max_len)\n format_str = u\" %%-%ds %%s\" % name_len\n data.append(u'')\n data.append(format_str % (u\"Name\", u\"Description\"))\n data.append(format_str % (u\"----\", u\"-----------\"))\n for role in response['results']:\n data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))\n\n data = u'\\n'.join(data)\n self.pager(data)\n\n return 0\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 410, "n_words": 135, "vocab_size": 91, "complexity": 10, "nloc": 30, "token_counts": 298, "n_ast_nodes": 506, "n_identifiers": 25, "random_cut": "def execute_search(self):\n \n page_size = 1000\n search = None\n\n if" }, { "id": 303944, "commit_id": "1e9ede25ad253bc42bfd764435a9d37bd4fd3a80", "repo": "core", "path": "tests/components/fully_kiosk/conftest.py", "file_name": "conftest.py", "fun_name": "mock_setup_entry", "commit_message": "Add Fully Kiosk Browser integration with initial binary sensor platform (#76737)\n\nCo-authored-by: Franck Nijhof ", "code": "def mock_setup_entry() -> Generator[AsyncMock, None, None]:\n \n with patch(\n \"homeassistant.components.fully_kiosk.async_setup_entry\", return_value=True\n ) as mock_setup:\n yield mock_setup\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 38, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 28, "n_ast_nodes": 57, "n_identifiers": 8, "random_cut": "def mock_setup_entry() -> Generator[AsyncMock, None, None]:\n \n with patch(\n \"homeassistant.components.fully_kiosk.async_setup_entry\", return_value=True\n ) as mock_setup:\n yield mock_setu" }, { "id": 30652, "commit_id": "71e602725b90f63f404109bae9f72cbdf755477b", "repo": "transformers", "path": "src/transformers/models/gpt_neox/modeling_gpt_neox.py", "file_name": "modeling_gpt_neox.py", "fun_name": "_attn", "commit_message": "[WIP] Adding GPT-NeoX-20B (#16659)\n\n* initial\r\n\r\n* first try\r\n\r\n* working 20B\r\n\r\n* 20B tokenizers\r\n\r\n* Docs\r\n\r\n* Import fixes for missing classes\r\n\r\n* Update docs, fixup\r\n\r\n* black formatting\r\n\r\n* isort\r\n\r\n* flake\r\n\r\n* dummy objects\r\n\r\n* documentation\r\n\r\n* Documentation yml\r\n\r\n* more docs\r\n\r\n* tweaks for tests\r\n\r\n* tokenization auto\r\n\r\n* fix neox tests\r\n\r\n* test\r\n\r\n* test\r\n\r\n* einsum\r\n\r\n* address PR feedback\r\n\r\n* Documentation\r\n\r\n* Update README.md\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/gpt_neox/__init__.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/gpt_neox/configuration_gpt_neox.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Remove undefined LaTeX syntax\r\n\r\n* Update to full url to avoid confusion about if that's supposed to refer to the Hub\r\n\r\n* fix auto\r\n\r\n* move tests\r\n\r\n* documentation fix\r\n\r\n* more doc fixes\r\n\r\n* test refactor\r\n\r\n* fix import\r\n\r\n* fix import\r\n\r\n* fix import\r\n\r\n* fix import\r\n\r\n* fix import\r\n\r\n* style fixes\r\n\r\n* More modeling fixes\r\n\r\nCo-authored-by: Jason Phang \r\nCo-authored-by: Stella Biderman \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _attn(self, query, key, value, attention_mask=None, head_mask=None):\n # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]\n # compute causal mask from causal mask buffer\n batch_size, num_attention_heads, query_length, attn_head_size = query.size()\n key_length = key.size(-2)\n\n causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()\n\n query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)\n key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)\n attn_scores = torch.einsum(\"bik,bjk->bij\", query, key) / self.norm_factor\n if torch.isnan(attn_scores).any():\n raise RuntimeError()\n attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)\n\n attn_scores = torch.where(causal_mask, attn_scores, self.masked_bias.to(attn_scores.dtype))\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_scores = attn_scores + attention_mask\n\n attn_weights = nn.functional.softmax(attn_scores, dim=-1)\n if torch.isnan(attn_weights).any():\n raise RuntimeError()\n attn_weights = attn_weights.to(value.dtype)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n if torch.isnan(attn_output).any():\n raise RuntimeError()\n return attn_output, attn_weights\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 337, "n_words": 132, "vocab_size": 80, "complexity": 6, "nloc": 23, "token_counts": 252, "n_ast_nodes": 388, "n_identifiers": 35, "random_cut": "def _attn(self, query, key, value, attention_mask=None, head_mask=None):\n # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]\n # compute causal mask from causal mask buffer\n batch_size, num_attention_heads, query_length, attn_head_size = query.size()\n key_length = key.size(-2)\n\n causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()\n\n query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)\n key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)\n attn_scores = torch.einsum(\"bik,bjk->bij\", query, key) / self.norm_factor\n if torch.isnan(attn_scores).any():\n raise RuntimeError()\n attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)\n\n attn_scores = torch.where(causal_mask, attn_scores, se" }, { "id": 27295, "commit_id": "60b1c18f600a7667bf55dc6f3df035235d771b86", "repo": "saleor", "path": "saleor/graphql/order/mutations/order_mark_as_paid.py", "file_name": "order_mark_as_paid.py", "fun_name": "clean_billing_address", "commit_message": "Split order mutations files into file per mutation (#9747)", "code": "def clean_billing_address(cls, instance):\n if not instance.billing_address:\n raise ValidationError(\n \"Order billing address is required to mark order as paid.\",\n code=OrderErrorCode.BILLING_ADDRESS_NOT_SET,\n )\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 20, "vocab_size": 20, "complexity": 2, "nloc": 6, "token_counts": 25, "n_ast_nodes": 40, "n_identifiers": 8, "random_cut": "def clean_billing_address(cls, instance):\n if not instance.billing_address:\n raise ValidationError(\n \"Order billing address is required to mark order as paid.\",\n code=OrderErrorCode.BILLING_ADDRESS_NOT_SET,\n )\n" }, { "id": 337113, "commit_id": "e713346ad13b16c09cac6a7ebfd7757c7582818f", "repo": "diffusers", "path": "tests/test_pipelines.py", "file_name": "test_pipelines.py", "fun_name": "test_stable_diffusion_no_safety_checker", "commit_message": "Give more customizable options for safety checker (#815)\n\n* Give more customizable options for safety checker\r\n\r\n* Apply suggestions from code review\r\n\r\n* Update src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py\r\n\r\n* Finish\r\n\r\n* make style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Pedro Cuenca \r\n\r\n* up\r\n\r\nCo-authored-by: Pedro Cuenca ", "code": "def test_stable_diffusion_no_safety_checker(self):\n pipe = StableDiffusionPipeline.from_pretrained(\n \"hf-internal-testing/tiny-stable-diffusion-lms-pipe\", safety_checker=None\n )\n assert isinstance(pipe, StableDiffusionPipeline)\n assert isinstance(pipe.scheduler, LMSDiscreteScheduler)\n assert pipe.safety_checker is None\n\n image = pipe(\"example prompt\", num_inference_steps=2).images[0]\n assert image is not None\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 87, "n_words": 28, "vocab_size": 21, "complexity": 1, "nloc": 9, "token_counts": 59, "n_ast_nodes": 92, "n_identifiers": 12, "random_cut": "def test_stable_diffusion_no_safety_checker(self):\n pipe = StableDiffusionPipeline.from_pretrained(\n \"hf-internal-testing/tiny-stable-diffusion-lms-pipe\", safety_checker=None\n )\n assert isinstance(pipe, StableDiffusionPipeline)\n assert isinstance(pipe.sched" }, { "id": 102077, "commit_id": "48c886b3dce3d3117ad16edaf35c8abd28dc51f5", "repo": "faceswap", "path": "lib/sysinfo.py", "file_name": "sysinfo.py", "fun_name": "_installed_conda", "commit_message": "Allow decoding errors", "code": "def _installed_conda(self):\n \n if not self._is_conda:\n return None\n with Popen(\"conda list\", shell=True, stdout=PIPE, stderr=PIPE) as conda:\n stdout, stderr = conda.communicate()\n if stderr:\n return \"Could not get package list\"\n installed = stdout.decode(self._encoding, errors=\"replace\").splitlines()\n return \"\\n\".join(installed)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 108, "n_words": 33, "vocab_size": 28, "complexity": 3, "nloc": 9, "token_counts": 73, "n_ast_nodes": 128, "n_identifiers": 16, "random_cut": "def _installed_conda(self):\n \n if not self._is_conda:\n return None\n with Popen(\"conda list\", shell=True, stdout=PIPE, stderr=PIPE) as conda:\n " }, { "id": 68847, "commit_id": "74a782d81d8f8c4a4d9214a9c06377e5e6e464dd", "repo": "erpnext", "path": "erpnext/hr/report/employee_exits/employee_exits.py", "file_name": "employee_exits.py", "fun_name": "get_data", "commit_message": "refactor: DB independent quoting and truthy/falsy values (#31358)\n\n* refactor: DB independent quoting and truthy/falsy values\r\n\r\n* style: reformat to black spec\r\n\r\n* fix: ifnull -> coalesce\r\n\r\n* fix: coalesce -> Coalesce\r\n\r\n* fix: revert pypika comparison\r\n\r\n* refactor: convert queries to QB\r\n\r\n* fix: incorrect value types for query\r\n\r\n`=` query makes no sense with list of values\r\n\r\n* fix: remove warehouse docstatus condition\r\n\r\n* fix: keep using base rate as rate\r\n\r\nCo-authored-by: Ankush Menat ", "code": "def get_data(filters):\n\temployee = frappe.qb.DocType(\"Employee\")\n\tinterview = frappe.qb.DocType(\"Exit Interview\")\n\tfnf = frappe.qb.DocType(\"Full and Final Statement\")\n\n\tquery = (\n\t\tfrappe.qb.from_(employee)\n\t\t.left_join(interview)\n\t\t.on(interview.employee == employee.name)\n\t\t.left_join(fnf)\n\t\t.on(fnf.employee == employee.name)\n\t\t.select(\n\t\t\temployee.name.as_(\"employee\"),\n\t\t\temployee.employee_name.as_(\"employee_name\"),\n\t\t\temployee.date_of_joining.as_(\"date_of_joining\"),\n\t\t\temployee.relieving_date.as_(\"relieving_date\"),\n\t\t\temployee.department.as_(\"department\"),\n\t\t\temployee.designation.as_(\"designation\"),\n\t\t\temployee.reports_to.as_(\"reports_to\"),\n\t\t\tinterview.name.as_(\"exit_interview\"),\n\t\t\tinterview.status.as_(\"interview_status\"),\n\t\t\tinterview.employee_status.as_(\"employee_status\"),\n\t\t\tinterview.reference_document_name.as_(\"questionnaire\"),\n\t\t\tfnf.name.as_(\"full_and_final_statement\"),\n\t\t)\n\t\t.distinct()\n\t\t.where(\n\t\t\t(fn.Coalesce(fn.Cast(employee.relieving_date, \"char\"), \"\") != \"\")\n\t\t\t& ((interview.name.isnull()) | ((interview.name.isnotnull()) & (interview.docstatus != 2)))\n\t\t\t& ((fnf.name.isnull()) | ((fnf.name.isnotnull()) & (fnf.docstatus != 2)))\n\t\t)\n\t\t.orderby(employee.relieving_date, order=Order.asc)\n\t)\n\n\tquery = get_conditions(filters, query, employee, interview, fnf)\n\tresult = query.run(as_dict=True)\n\n\treturn result\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 26, "n_whitespaces": 45, "n_words": 80, "vocab_size": 61, "complexity": 1, "nloc": 35, "token_counts": 323, "n_ast_nodes": 525, "n_identifiers": 40, "random_cut": "def get_data(filters):\n\temployee = frappe.qb.DocType(\"Employee\")\n\tinterview = frappe.qb.DocType(\"Exit Interview\")\n\tfnf = frappe.qb.DocType(\"Full and Final Statement\")\n\n\tquery = (\n\t\tfrappe.qb.from_(employee)\n\t\t.left_join(interview)\n\t\t.on(interview.employee == employee.name)\n\t\t.left_join(fnf)\n\t\t.on(fnf.employee == employee.name)\n\t\t.select(\n\t\t\temployee.name.as_(\"employee\"),\n\t\t\temployee.employee_name.as_(\"employee_name\"),\n\t\t\temployee.date_of_joining.as_(\"date_of_joining\"),\n\t\t\temployee.relieving_date.as_(\"relieving_date\"),\n\t\t\temployee.department.as_(\"department\"),\n\t\t\temployee.designation.as_(\"designation\"),\n\t\t\temployee.r" }, { "id": 143209, "commit_id": "75d08b06328d213656e7280639b35ccecdfc34d0", "repo": "ray", "path": "python/ray/tune/tests/test_searchers.py", "file_name": "test_searchers.py", "fun_name": "testOptunaReportTooOften", "commit_message": "[tune/structure] Refactor `suggest` into `search` package (#26074)\n\nThis PR renames the `suggest` package to `search` and alters the layout slightly.\r\n\r\nIn the new package, the higher-level abstractions are on the top level and the search algorithms have their own subdirectories.\r\n\r\nIn a future refactor, we can turn algorithms such as PBT into actual `SearchAlgorithm` classes and move them into the `search` package. \r\n\r\nThe main reason to keep algorithms and searchers in the same directory is to avoid user confusion - for a user, `Bayesopt` is as much a search algorithm as e.g. `PBT`, so it doesn't make sense to split them up.", "code": "def testOptunaReportTooOften(self):\n from ray.tune.search.optuna import OptunaSearch\n from optuna.samplers import RandomSampler\n\n searcher = OptunaSearch(\n sampler=RandomSampler(seed=1234),\n space=OptunaSearch.convert_search_space(self.config),\n metric=\"metric\",\n mode=\"max\",\n )\n searcher.suggest(\"trial_1\")\n searcher.on_trial_result(\"trial_1\", {\"training_iteration\": 1, \"metric\": 1})\n searcher.on_trial_complete(\"trial_1\", {\"training_iteration\": 2, \"metric\": 1})\n\n # Report after complete should not fail\n searcher.on_trial_result(\"trial_1\", {\"training_iteration\": 3, \"metric\": 1})\n\n searcher.on_trial_complete(\"trial_1\", {\"training_iteration\": 4, \"metric\": 1})\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 159, "n_words": 46, "vocab_size": 33, "complexity": 1, "nloc": 14, "token_counts": 124, "n_ast_nodes": 212, "n_identifiers": 20, "random_cut": "def testOptunaReportTooOften(self):\n from ray.tune.search.optuna import OptunaSearch\n from optuna.samplers import RandomSampler\n\n searcher = OptunaSearch(\n sampler=RandomSampler(seed=1234),\n space=OptunaSearch.convert_search_space(self.config),\n metric=\"metric\",\n mode=\"max\",\n )\n searcher.suggest(\"trial_1\")\n searcher.on_trial_result(\"trial_1\", {\"training_iteration\": 1, \"metric\": 1})\n searcher.on_trial_complete(\"trial_1\", {\"training_iteration\": 2, \"metric\": 1})\n\n # Report after complete should not fail\n searcher.on_trial_result(\"trial_1\", {\"training_iteration\": 3, \"metric\": 1})\n\n searcher.on_trial_compl" }, { "id": 188619, "commit_id": "df51c82cfd34c308fb1065f311501d71e808ba21", "repo": "jumpserver", "path": "apps/rbac/tree.py", "file_name": "tree.py", "fun_name": "_sort_action", "commit_message": "perf: 优化Migration,删掉原来的 content type (#7835)\n\nCo-authored-by: ibuler \r\nCo-authored-by: Jiangjie.Bai <32935519+BaiJiangJie@users.noreply.github.com>", "code": "def _sort_action(node):\n if node.isParent:\n return ['zz', 0]\n\n action_resource = node.title.split('.')[-1]\n action, resource = action_resource.split('_', 2)\n action_value_mapper = {\n 'view': 2,\n 'add': 4,\n 'change': 6,\n 'delete': 8\n }\n v = action_value_mapper.get(action, 10)\n return [resource, v]\n\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 89, "n_words": 34, "vocab_size": 30, "complexity": 2, "nloc": 13, "token_counts": 77, "n_ast_nodes": 128, "n_identifiers": 11, "random_cut": "def _sort_action(node):\n if node.isParent:\n re" }, { "id": 185670, "commit_id": "7de6ceea9c13b9b1b2555fe99456468a99709c06", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "flush", "commit_message": "writer thread", "code": "def flush(self) -> None:\n event = threading.Event()\n self._queue.put(event)\n event.wait()\n self._file.flush()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 37, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 34, "n_ast_nodes": 58, "n_identifiers": 9, "random_cut": "def flush(self) -> None:\n event = threa" }, { "id": 77007, "commit_id": "7ad87c7d93a78067ddc71300675139ab355691c2", "repo": "wagtail", "path": "wagtail/documents/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "get", "commit_message": "Ensure 'next' param on image / doc listings always links back to index page, not results view\n\nFixes #8291. When results listings are generated as partial AJAX responses through the listing_results review, the 'next' parameter on those results should point back to the main 'index' view so that on return from the edit view, the user gets back a full page rather than a partial response.", "code": "def get(self, params={}):\n return self.client.get(reverse(\"wagtaildocs:listing_results\"), params)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 39, "n_identifiers": 5, "random_cut": "def get(self, params={}):\n return self.client.get(reverse(\"wagtaildocs:listi" }, { "id": 184447, "commit_id": "ac24e77ecfff2be073a289096a9f3738dfe5c42c", "repo": "textual", "path": "src/textual/message_pump.py", "file_name": "message_pump.py", "fun_name": "close_messages", "commit_message": "fix screenshots", "code": "async def close_messages(self) -> None:\n \n if self._closed or self._closing:\n return\n self._closing = True\n stop_timers = list(self._timers)\n for timer in stop_timers:\n await timer.stop()\n self._timers.clear()\n await self._message_queue.put(MessagePriority(None))\n\n if self._task is not None and asyncio.current_task() != self._task:\n # Ensure everything is closed before returning\n await self._task\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 143, "n_words": 43, "vocab_size": 37, "complexity": 6, "nloc": 12, "token_counts": 83, "n_ast_nodes": 140, "n_identifiers": 16, "random_cut": "async def close_messages(self) -> None:\n \n if self._closed or self._closing:\n return\n " }, { "id": 101536, "commit_id": "dc18c74eea0c7837a820d27628cb12b0824fa30e", "repo": "faceswap", "path": "lib/gui/utils.py", "file_name": "utils.py", "fun_name": "_dir", "commit_message": "Bugfix: Preview for extract in batch mode", "code": "def _dir(self) -> str:\n \n logger.debug(\"Popping Dir browser\")\n return filedialog.askdirectory(**self._kwargs)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 24, "n_ast_nodes": 44, "n_identifiers": 8, "random_cut": "def _dir(self) -> str:\n \n logger.debug(\"Popp" }, { "id": 178267, "commit_id": "92314e4a9c431c407533e4a064481acf3c5983ab", "repo": "label-studio", "path": "label_studio/core/storage.py", "file_name": "storage.py", "fun_name": "url", "commit_message": "fix: DEV-3911: Move persistent storages to OS (#3377)\n\n* fix: DEV-3911: Move persistent storages to OS\r\n\r\n* Fix\r\n\r\n* Add deps\r\n\r\n* Back header\r\n\r\n* Move DownloadStorageData handler\r\n\r\n* Update all urls json\r\n\r\n* Fix import\r\n\r\n* add nginx config\r\n\r\n* Fix GSC storage\r\n\r\nCo-authored-by: Sergei Ivashchenko \r\nCo-authored-by: Sergey Zhuk ", "code": "def url(self, name, storage_url=False, *args, **kwargs):\n if flag_set('ff_back_dev_2915_storage_nginx_proxy_26092022_short'):\n if storage_url is True:\n return super().url(name, *args, **kwargs)\n return f'{settings.HOSTNAME}/storage-data/uploaded/?filepath={name}'\n else:\n return super().url(name, *args, **kwargs)\n\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 84, "n_words": 23, "vocab_size": 16, "complexity": 3, "nloc": 7, "token_counts": 63, "n_ast_nodes": 110, "n_identifiers": 10, "random_cut": "def url(self, name, storage_url=False, *args, **kwargs):\n if flag_set('ff_back_dev_2915_storage_nginx_proxy_26092022_short'):\n if storage_url is True:\n return super().url(name, *args, **kwargs)\n return f'{settings.HOSTNAME}/storage-data/uploaded/?fi" }, { "id": 109852, "commit_id": "a94dc42f6e02e8feca9892218551d169d04eaeb8", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_legend.py", "file_name": "test_legend.py", "fun_name": "test_legend_pathcollection_labelcolor_linecolor_cmap", "commit_message": "Correct get_text() call to get_color()", "code": "def test_legend_pathcollection_labelcolor_linecolor_cmap():\n # test the labelcolor for labelcolor='linecolor' on PathCollection\n # with a colormap\n fig, ax = plt.subplots()\n ax.scatter(np.arange(10), np.arange(10), c=np.arange(10), label='#1')\n\n leg = ax.legend(labelcolor='linecolor')\n text, = leg.get_texts()\n assert mpl.colors.same_color(text.get_color(), 'black')\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 6, "token_counts": 77, "n_ast_nodes": 130, "n_identifiers": 19, "random_cut": "def test_legend_pathcollection_labelcolor_linecolor_cmap():\n # test the labelcolor for " }, { "id": 151426, "commit_id": "92a1d58df8687bfeda5ed0f57a8736152430fce4", "repo": "freqtrade", "path": "tests/exchange/test_exchange.py", "file_name": "test_exchange.py", "fun_name": "test_refresh_latest_ohlcv", "commit_message": "Evict cache if we didn't get new candles for X hours", "code": "def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None:\n ohlcv = [\n [\n (arrow.utcnow().shift(minutes=-5).int_timestamp) * 1000, # unix timestamp ms\n 1, # open\n 2, # high\n 3, # low\n 4, # close\n 5, # volume (in quote currency)\n ],\n [\n arrow.utcnow().int_timestamp * 1000, # unix timestamp ms\n 3, # open\n 1, # high\n 4, # low\n 6, # close\n 5, # volume (in quote currency)\n ]\n ]\n\n caplog.set_level(logging.DEBUG)\n exchange = get_patched_exchange(mocker, default_conf)\n exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv)\n\n pairs = [('IOTA/ETH', '5m', candle_type), ('XRP/ETH', '5m', candle_type)]\n # empty dicts\n assert not exchange._klines\n res = exchange.refresh_latest_ohlcv(pairs, cache=False)\n # No caching\n assert not exchange._klines\n\n assert len(res) == len(pairs)\n assert exchange._api_async.fetch_ohlcv.call_count == 2\n exchange._api_async.fetch_ohlcv.reset_mock()\n\n exchange.required_candle_call_count = 2\n res = exchange.refresh_latest_ohlcv(pairs)\n assert len(res) == len(pairs)\n\n assert log_has(f'Refreshing candle (OHLCV) data for {len(pairs)} pairs', caplog)\n assert exchange._klines\n assert exchange._api_async.fetch_ohlcv.call_count == 4\n exchange._api_async.fetch_ohlcv.reset_mock()\n for pair in pairs:\n assert isinstance(exchange.klines(pair), DataFrame)\n assert len(exchange.klines(pair)) > 0\n\n # klines function should return a different object on each call\n # if copy is \"True\"\n assert exchange.klines(pair) is not exchange.klines(pair)\n assert exchange.klines(pair) is not exchange.klines(pair, copy=True)\n assert exchange.klines(pair, copy=True) is not exchange.klines(pair, copy=True)\n assert exchange.klines(pair, copy=False) is exchange.klines(pair, copy=False)\n\n # test caching\n res = exchange.refresh_latest_ohlcv(\n [('IOTA/ETH', '5m', candle_type), ('XRP/ETH', '5m', candle_type)])\n assert len(res) == len(pairs)\n\n assert exchange._api_async.fetch_ohlcv.call_count == 0\n assert log_has(f\"Using cached candle (OHLCV) data for {pairs[0][0]}, \"\n f\"{pairs[0][1]}, {candle_type} ...\",\n caplog)\n caplog.clear()\n # Reset refresh times - must do 2 call per pair as cache is expired\n exchange._pairs_last_refresh_time = {}\n res = exchange.refresh_latest_ohlcv(\n [('IOTA/ETH', '5m', candle_type), ('XRP/ETH', '5m', candle_type)])\n assert len(res) == len(pairs)\n\n assert exchange._api_async.fetch_ohlcv.call_count == 4\n\n # cache - but disabled caching\n exchange._api_async.fetch_ohlcv.reset_mock()\n exchange.required_candle_call_count = 1\n\n pairlist = [\n ('IOTA/ETH', '5m', candle_type),\n ('XRP/ETH', '5m', candle_type),\n ('XRP/ETH', '1d', candle_type)]\n res = exchange.refresh_latest_ohlcv(pairlist, cache=False)\n assert len(res) == 3\n assert exchange._api_async.fetch_ohlcv.call_count == 3\n\n # Test the same again, should NOT return from cache!\n exchange._api_async.fetch_ohlcv.reset_mock()\n res = exchange.refresh_latest_ohlcv(pairlist, cache=False)\n assert len(res) == 3\n assert exchange._api_async.fetch_ohlcv.call_count == 3\n exchange._api_async.fetch_ohlcv.reset_mock()\n caplog.clear()\n\n # Call with invalid timeframe\n res = exchange.refresh_latest_ohlcv([('IOTA/ETH', '3m', candle_type)], cache=False)\n if candle_type != CandleType.MARK:\n assert not res\n assert len(res) == 0\n assert log_has_re(r'Cannot download \\(IOTA\\/ETH, 3m\\).*', caplog)\n else:\n assert len(res) == 1\n\n\n@pytest.mark.parametrize('candle_type', [CandleType.FUTURES, CandleType.MARK, CandleType.SPOT])", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('candle_type', [CandleType.FUTURES, CandleType.MARK, CandleType.SPOT])", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 826, "n_words": 348, "vocab_size": 156, "complexity": 3, "nloc": 78, "token_counts": 615, "n_ast_nodes": 1047, "n_identifiers": 45, "random_cut": "def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None:\n ohlcv = [\n [\n (arrow.utcnow().shift(minutes=-5).int_timestamp) * 1000, # unix timestamp ms\n 1, # open\n 2, # high\n 3, # low\n 4, # close\n 5, # volume (in quote currency)\n ],\n [\n arrow.utcnow().int_timestamp * 1000, # unix timestamp ms\n 3, # open\n 1, # high\n 4, # low\n 6, # close\n 5, # volume (in quote currency)\n ]\n ]\n\n caplog.set_level(logging.DEBUG)\n exchange = get_patched_exchange(mocker, default_conf)\n exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv)\n\n pairs = [('IOTA/ETH', '5m', candle_type), ('XRP/ETH', '5m', candle_type)]\n # empty dicts\n assert not exchange._klines\n res = exchange.refresh_latest_ohlcv(pairs, cache=False)\n # No caching\n assert not exchange._klines\n\n assert len(res) == len(pairs)\n assert exchange._api_async.fetch_ohlcv.call_count == 2\n exchange._api_async.fetch_ohlcv.reset_mock()\n\n exchange.required_candle_call_count = 2\n res = exchange.refresh_latest_ohlcv(pairs)\n assert len(res) == len(pairs)\n\n assert log_has(f'Refreshing candle (OHLCV) data for {len(pairs)} pairs', caplog)\n assert exchange._klines\n assert exchange._api_async.fetch_ohlcv.call_count == 4\n exchange._api_async.fetch_ohlcv.reset_mock()\n for pair in pairs:\n assert isinstance(exchange.klines(pair), DataFrame)\n assert len(exchange.klines(pair)) > 0\n\n # klines function should return a different object on each call\n # if copy is \"True\"\n assert exchange.klines(pair) is not exchange.klines(pair)\n assert exchange.klines(pair) is not exchange.klines(pair, copy=True)\n assert exchange.klines(pair, copy=True) is not exchange.klines(pair, copy=True)\n assert exchange.klines(pair, copy=False) is exchange.klines(pair, copy=False)\n\n # test caching\n res = exchange.refresh_latest_ohlcv(\n [('IOTA/ETH', '5m', candle_type), ('XRP/ETH', '5m', candle_type)])\n assert len(res) == len(pairs)\n\n assert exchange._api_async.fetch_ohlcv.call_count == 0\n assert log_has(f\"Using cached candle (OHLCV) data for {pairs[0][0]}, \"\n f\"{pairs[0][1]}, {candle_type} ...\",\n caplog)\n caplog.clear()\n # Reset refresh times - must do 2 call per pair as cache is expired\n exchange._pairs_last_refresh_time = {}\n res = exchange.refresh_latest_ohlcv(\n [('IOTA/ETH', '5m', candle_type), ('XRP/ETH', '5m', candle_type)])\n assert len(res) == len(pairs)\n\n assert exchange._api_async.fetch_ohlcv.call_count == 4\n\n # cache - but disabled caching\n exchange._api_async.fetch_ohlcv.reset_mock()\n exchange.required_candle_call_count = 1\n\n pairlist = [\n ('IOTA/ETH', '5m', candle_type)," }, { "id": 206591, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/datastructures.py", "file_name": "datastructures.py", "fun_name": "__deepcopy__", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def __deepcopy__(self, memo):\n result = self.__class__()\n memo[id(self)] = result\n for key, value in dict.items(self):\n dict.__setitem__(\n result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)\n )\n return result\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 87, "n_words": 23, "vocab_size": 20, "complexity": 2, "nloc": 8, "token_counts": 61, "n_ast_nodes": 92, "n_identifiers": 13, "random_cut": "def __deepcopy__(self, memo):\n result = self.__class__()\n memo[id(self)] = result\n for key, value in d" }, { "id": 144465, "commit_id": "f264cf800ae80a2327de625dfc2db3613259f8e4", "repo": "ray", "path": "python/ray/data/tests/test_dataset.py", "file_name": "test_dataset.py", "fun_name": "test_groupby_arrow_max", "commit_message": "[Datasets] Support ignoring NaNs in aggregations. (#20787)\n\nAdds support for ignoring NaNs in aggregations. NaNs will now be ignored by default, and the user can pass in `ds.mean(\"A\", ignore_nulls=False)` if they would rather have the NaN be propagated to the output. Specifically, we'd have the following null-handling semantics:\r\n1. Mix of values and nulls - `ignore_nulls`=True: Ignore the nulls, return aggregation of values\r\n2. Mix of values and nulls - `ignore_nulls`=False: Return `None`\r\n3. All nulls: Return `None`\r\n4. Empty dataset: Return `None`\r\n\r\nThis all null and empty dataset handling matches the semantics of NumPy and Pandas.", "code": "def test_groupby_arrow_max(ray_start_regular_shared, num_parts):\n # Test built-in max aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_arrow_max with: {seed}\")\n random.seed(seed)\n xs = list(range(100))\n random.shuffle(xs)\n agg_ds = (\n ray.data.from_items([{\"A\": (x % 3), \"B\": x} for x in xs])\n .repartition(num_parts)\n .groupby(\"A\")\n .max(\"B\")\n )\n assert agg_ds.count() == 3\n assert [row.as_pydict() for row in agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"max(B)\": 99},\n {\"A\": 1, \"max(B)\": 97},\n {\"A\": 2, \"max(B)\": 98},\n ]\n\n # Test built-in max aggregation with nans\n nan_grouped_ds = (\n ray.data.from_items(\n [{\"A\": (x % 3), \"B\": x} for x in xs] + [{\"A\": 0, \"B\": None}]\n )\n .repartition(num_parts)\n .groupby(\"A\")\n )\n nan_agg_ds = nan_grouped_ds.max(\"B\")\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"max(B)\": 99},\n {\"A\": 1, \"max(B)\": 97},\n {\"A\": 2, \"max(B)\": 98},\n ]\n # Test ignore_nulls=False\n nan_agg_ds = nan_grouped_ds.max(\"B\", ignore_nulls=False)\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"max(B)\": None},\n {\"A\": 1, \"max(B)\": 97},\n {\"A\": 2, \"max(B)\": 98},\n ]\n # Test all nans\n nan_agg_ds = (\n ray.data.from_items([{\"A\": (x % 3), \"B\": None} for x in xs])\n .repartition(num_parts)\n .groupby(\"A\")\n .max(\"B\")\n )\n assert nan_agg_ds.count() == 3\n assert [row.as_pydict() for row in nan_agg_ds.sort(\"A\").iter_rows()] == [\n {\"A\": 0, \"max(B)\": None},\n {\"A\": 1, \"max(B)\": None},\n {\"A\": 2, \"max(B)\": None},\n ]\n\n # Test built-in global max aggregation\n assert (\n ray.data.from_items([{\"A\": x} for x in xs]).repartition(num_parts).max(\"A\")\n == 99\n )\n\n # Test empty dataset\n assert (\n ray.data.range_arrow(10).filter(lambda r: r[\"value\"] > 10).max(\"value\") is None\n )\n\n # Test built-in global max aggregation with nans\n nan_ds = ray.data.from_items([{\"A\": x} for x in xs] + [{\"A\": None}]).repartition(\n num_parts\n )\n assert nan_ds.max(\"A\") == 99\n # Test ignore_nulls=False\n assert nan_ds.max(\"A\", ignore_nulls=False) is None\n # Test all nans\n nan_ds = ray.data.from_items([{\"A\": None}] * len(xs)).repartition(num_parts)\n assert nan_ds.max(\"A\") is None\n\n\n@pytest.mark.parametrize(\"num_parts\", [1, 30])", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"num_parts\", [1, 30])", "n_ast_errors": 1, "ast_levels": 20, "n_whitespaces": 618, "n_words": 281, "vocab_size": 93, "complexity": 10, "nloc": 65, "token_counts": 607, "n_ast_nodes": 1055, "n_identifiers": 36, "random_cut": "def test_groupby_arrow_max(ray_start_regular_shared, num_parts):\n # Test built-in max aggregation\n seed = int(time.time())\n print(f\"Seeding RNG for test_groupby_arrow_max with: {seed}\")\n random.seed(seed)\n xs = list(rang" }, { "id": 122226, "commit_id": "32ef3ba37b31cdb51bbef6663265090bca468f4c", "repo": "jax", "path": "tests/lax_numpy_reducers_test.py", "file_name": "lax_numpy_reducers_test.py", "fun_name": "_shape_and_dtypes", "commit_message": "jnp.average: support tuple axis", "code": "def _shape_and_dtypes(shapes, dtypes):\n for shape in shapes:\n for dtype in _valid_dtypes_for_shape(shape, dtypes):\n yield (shape, dtype)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 23, "n_words": 15, "vocab_size": 12, "complexity": 3, "nloc": 4, "token_counts": 28, "n_ast_nodes": 41, "n_identifiers": 6, "random_cut": "def _shape_and_dtypes(shapes, dtypes):\n for shape in " }, { "id": 19982, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/utils/hashes.py", "file_name": "hashes.py", "fun_name": "digest_count", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def digest_count(self) -> int:\n return sum(len(digests) for digests in self._allowed.values())\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 2, "token_counts": 25, "n_ast_nodes": 40, "n_identifiers": 8, "random_cut": "def digest_count(self) -> int:\n" }, { "id": 188697, "commit_id": "87c6eec6190c9c5d12f816b6cc60f92725ebc7be", "repo": "jumpserver", "path": "apps/rbac/models/rolebinding.py", "file_name": "rolebinding.py", "fun_name": "clean", "commit_message": "perf: 优化 role bingding,优化 is_superuser", "code": "def clean(self):\n kwargs = dict(role=self.role, user=self.user, scope=self.scope)\n exists = self.__class__.objects.filter(**kwargs).exists()\n if exists:\n msg = \"Duplicate for key 'role_user' of system role binding, {}_{}\".format(\n self.role.id, self.user.id\n )\n raise ValidationError(msg)\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 96, "n_words": 28, "vocab_size": 26, "complexity": 2, "nloc": 8, "token_counts": 70, "n_ast_nodes": 111, "n_identifiers": 15, "random_cut": "def clean(self):\n kwargs = dict(role=self.role, user=self.user, scope=self.scope)\n exists = self.__class__.objects.filter(**kwargs).exists()\n if exists:\n msg = \"Duplicate for key 'role_user' of system role binding, {}_{}\".format(\n self.role.id, self.user.id\n " }, { "id": 314009, "commit_id": "68135e57af05af38fd9a55992cc9435230999ef0", "repo": "core", "path": "homeassistant/components/sensibo/switch.py", "file_name": "switch.py", "fun_name": "extra_state_attributes", "commit_message": "Split timer service for Sensibo (#73684)", "code": "def extra_state_attributes(self) -> Mapping[str, Any]:\n \n return self.entity_description.extra_fn(self.device_data)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 39, "n_identifiers": 8, "random_cut": "def extra_state_attributes(self) -> Mapping[str, Any]:\n \n return self.entity_description.extra_fn(self.device_data)\n" }, { "id": 168277, "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", "repo": "pandas", "path": "pandas/util/_exceptions.py", "file_name": "_exceptions.py", "fun_name": "find_stack_level", "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", "code": "def find_stack_level(frame) -> int:\n \n\n import pandas as pd\n\n pkg_dir = os.path.dirname(pd.__file__)\n test_dir = os.path.join(pkg_dir, \"tests\")\n\n n = 1\n while frame:\n fname = inspect.getfile(frame)\n if fname.startswith(pkg_dir) and not fname.startswith(test_dir):\n frame = frame.f_back\n n += 1\n else:\n break\n return n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 113, "n_words": 38, "vocab_size": 31, "complexity": 4, "nloc": 22, "token_counts": 79, "n_ast_nodes": 133, "n_identifiers": 18, "random_cut": "def find_stack_level(frame) -> int:\n \n\n import pandas as pd\n\n pkg_dir = os.path.dirname(pd.__file__)\n test_dir = os.path.join(pkg_dir, \"tests\")\n\n n = 1\n while frame:\n fname = inspect.getfile(frame)\n if fname.startswith(pkg_dir) and not fname.startswith(test_dir):\n frame = frame.f_back\n n += 1\n els" }, { "id": 92014, "commit_id": "1c949dfcf87384cdf976710ebf8fe3c536e26807", "repo": "sentry", "path": "src/sentry/tasks/process_buffer.py", "file_name": "process_buffer.py", "fun_name": "process_pending", "commit_message": "feat(locks): Add locks count metrics tagged with name (#36002)", "code": "def process_pending(partition=None):\n \n from sentry import buffer\n from sentry.app import locks\n\n if partition is None:\n lock_key = \"buffer:process_pending\"\n else:\n lock_key = \"buffer:process_pending:%d\" % partition\n\n lock = locks.get(lock_key, duration=60, name=\"process_pending\")\n\n try:\n with lock.acquire():\n buffer.process_pending(partition=partition)\n except UnableToAcquireLock as error:\n logger.warning(\"process_pending.fail\", extra={\"error\": error, \"partition\": partition})\n\n\n@instrumented_task(name=\"sentry.tasks.process_buffer.process_incr\")", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@instrumented_task(name=\"sentry.tasks.process_buffer.process_incr\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 104, "n_words": 42, "vocab_size": 36, "complexity": 3, "nloc": 13, "token_counts": 89, "n_ast_nodes": 172, "n_identifiers": 18, "random_cut": "def process_pending(partition=None):\n \n from sentry import buffer\n from sentry.app import locks\n\n if partition is None:\n lock_key = \"buffer:process_pending\"\n else:\n " }, { "id": 6517, "commit_id": "4440ccbc92cbf03520dc9e204179ad342f5fb2e9", "repo": "ludwig", "path": "ludwig/features/text_feature.py", "file_name": "text_feature.py", "fun_name": "populate_defaults", "commit_message": "Remove the concept of a \"level\" for text features preprocessing. (#1859)\n\nCo-authored-by: Daniel Treiman ", "code": "def populate_defaults(input_feature):\n set_default_values(input_feature, {TIED: None, \"encoder\": \"parallel_cnn\"})\n\n encoder_class = get_encoder_cls(input_feature[\"type\"], input_feature[\"encoder\"])\n\n if hasattr(encoder_class, \"default_params\"):\n set_default_values(input_feature, encoder_class.default_params)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 49, "n_ast_nodes": 83, "n_identifiers": 8, "random_cut": "def populate_defaults(input_feature):\n set_default_values(input_feature, {TIED: None, \"encoder\": \"parallel_cnn\"})\n\n encoder_class = get_encoder_cls(input_feature[\"type\"], input_feature[\"encoder\"])\n\n if hasattr(encoder_class, \"default_params\"):\n set_default_values(input_feature, encoder_class.default_params)" }, { "id": 186442, "commit_id": "a0dbe1e85035f12e194d91148836d830871ec554", "repo": "certbot", "path": "certbot-apache/tests/configurator_test.py", "file_name": "configurator_test.py", "fun_name": "test_constant", "commit_message": "Improve assertions in certbot-apache tests. (#9131)\n\n* Improve assertions in certbot-apache tests.\r\n\r\nReplacements inspired by flake8-assertive.\r\n\r\n* Fix test failures\r\n\r\n* assertEqual is not for None :D\r\n\r\n* Pass all tests :)", "code": "def test_constant(self):\n self.assertIn(\"debian_apache_2_4/multiple_vhosts/apache\", self.config.options.server_root)\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 6, "random_cut": "def test_constant(self):\n self.assertIn(\"debian_apache_2_4/multiple_vhosts/apache\", self.config.options.server_root" }, { "id": 200281, "commit_id": "6d2bbf80752549276a968fd4af78231c569d55c5", "repo": "sympy", "path": "sympy/testing/runtests.py", "file_name": "runtests.py", "fun_name": "_report_failure", "commit_message": "runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy", "code": "def _report_failure(self, out, test, example, got):\n \n s = self._checker.output_difference(example, got, self.optionflags)\n s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')\n out(self._failure_header(test, example) + s)\n\n\nif IS_WINDOWS:\n DocTestRunner.report_failure = _report_failure # type: ignore\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 42, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 4, "token_counts": 58, "n_ast_nodes": 107, "n_identifiers": 16, "random_cut": "def _report_failure(self, out, test, example, got):\n \n s = self._checker.output_difference(example, got, self.optionflags)\n s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')\n out(self._failure_header(test, example) + s)\n\n\n" }, { "id": 205176, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/sqlite3/_functions.py", "file_name": "_functions.py", "fun_name": "_sqlite_time_trunc", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _sqlite_time_trunc(lookup_type, dt, tzname, conn_tzname):\n if dt is None:\n return None\n dt_parsed = _sqlite_datetime_parse(dt, tzname, conn_tzname)\n if dt_parsed is None:\n try:\n dt = typecast_time(dt)\n except (ValueError, TypeError):\n return None\n else:\n dt = dt_parsed\n if lookup_type == \"hour\":\n return f\"{dt.hour:02d}:00:00\"\n elif lookup_type == \"minute\":\n return f\"{dt.hour:02d}:{dt.minute:02d}:00\"\n elif lookup_type == \"second\":\n return f\"{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}\"\n raise ValueError(f\"Unsupported lookup type: {lookup_type!r}\")\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 150, "n_words": 56, "vocab_size": 35, "complexity": 7, "nloc": 18, "token_counts": 85, "n_ast_nodes": 198, "n_identifiers": 13, "random_cut": "def _sqlite_time_trunc(lookup_type, dt, tzname, conn_tzname):\n if dt is None:\n return None\n dt_parsed = _sqlite_datetime_parse(dt, tzname, conn_tzname)\n if dt_parsed is None:\n try:\n dt = typecast_time(dt)\n except (ValueError, TypeError):\n return None\n else:\n dt = dt_parsed\n if lookup_type == \"hour\":\n return f\"{dt.hour:02d}:00:00\"\n elif lookup_type == \"minute\":\n return f\"{dt.hour:02d}:{dt.minute:02d}:00\"\n " }, { "id": 147521, "commit_id": "cb1919b8d011c877a9690e3d09dd5de79b87cdd8", "repo": "ray", "path": "python/ray/serve/tests/test_model_wrappers.py", "file_name": "test_model_wrappers.py", "fun_name": "test_model_wrappers_in_pipeline", "commit_message": "[Doc][Serve] Add minimal docs for model wrappers and http adapters (#23536)", "code": "def test_model_wrappers_in_pipeline(serve_instance):\n _, path = tempfile.mkstemp()\n with open(path, \"w\") as f:\n json.dump(2, f)\n\n predictor_cls = \"ray.serve.tests.test_model_wrappers.AdderPredictor\"\n checkpoint_cls = \"ray.serve.tests.test_model_wrappers.AdderCheckpoint\"\n\n with InputNode() as dag_input:\n m1 = ModelWrapperDeployment.bind(\n predictor_cls=predictor_cls, # TODO: can't be the raw class right now?\n checkpoint={ # TODO: can't be the raw object right now?\n \"checkpoint_cls\": checkpoint_cls,\n \"uri\": path,\n },\n )\n dag = m1.predict.bind(dag_input)\n deployments = build(Ingress.bind(dag))\n for d in deployments:\n d.deploy()\n\n resp = requests.post(\"http://127.0.0.1:8000/ingress\", json={\"array\": [40]})\n print(resp.text)\n resp.raise_for_status()\n return resp.json() == {\"value\": [42], \"batch_size\": 1}\n\n\n# NOTE(simon): Make sure this is the last test because the REST API will start\n# controller and http proxy in another namespace.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 230, "n_words": 100, "vocab_size": 79, "complexity": 2, "nloc": 22, "token_counts": 147, "n_ast_nodes": 258, "n_identifiers": 31, "random_cut": "def test_model_wrappers_in_pipeline(serve_instance):\n _, path = tempfile.mkstemp()\n with open(path, \"w\") as f:\n json.dump(2, f)\n\n predictor_cls = \"ray.serve.tests.test_model_wrappers.AdderPredictor\"\n checkpoint_cls = \"ray.serve.tests.test_model_wrappers.AdderCheckpoint\"\n\n with InputNode() as dag_input:\n m1 = ModelWrapperDeployment.bind(\n predictor_cls=predictor_cls, # TODO: can't be the raw class right now?\n checkpoint={ # TODO: can't be the raw object right now?\n \"checkpoint_cls\": checkpoint_cls,\n \"uri\": path,\n },\n )\n dag = m1.predict.bind(dag_input)\n deployments = build(Ingress.bind(dag))\n fo" }, { "id": 207391, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/tests.py", "file_name": "tests.py", "fun_name": "test_suppress_base_options_command_defaults", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_suppress_base_options_command_defaults(self):\n args = [\"suppress_base_options_command\"]\n out, err = self.run_manage(args)\n self.assertNoOutput(err)\n self.assertOutput(\n out,\n \"EXECUTE:SuppressBaseOptionsCommand options=[('file', None), \"\n \"('force_color', False), ('no_color', False), \"\n \"('pythonpath', None), ('settings', None), \"\n \"('traceback', False), ('verbosity', 1)]\",\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 120, "n_words": 31, "vocab_size": 23, "complexity": 1, "nloc": 11, "token_counts": 38, "n_ast_nodes": 70, "n_identifiers": 8, "random_cut": "def test_suppress_base_options_command_defaults(self):\n args = [\"suppress_base_options_command\"]\n " }, { "id": 19155, "commit_id": "4c58179509e6f6047789efb0a95c2b0e20cb6c8f", "repo": "mlflow", "path": "mlflow/models/evaluation/base.py", "file_name": "base.py", "fun_name": "_start_run_or_reuse_active_run", "commit_message": "Improve evaluation api (#5256)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap limitation on value type\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix format\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def _start_run_or_reuse_active_run():\n \n active_run = mlflow.active_run()\n if not active_run:\n # Note `mlflow.start_run` throws if `run_id` is not found.\n with mlflow.start_run() as run:\n yield run.info.run_id\n else:\n yield active_run.info.run_id\n\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 70, "n_words": 26, "vocab_size": 23, "complexity": 2, "nloc": 7, "token_counts": 39, "n_ast_nodes": 74, "n_identifiers": 7, "random_cut": "def _start_run_or_reuse_active_run():\n \n active_run = mlflow.active_run()\n if not active_run:\n # Note `mlflow.start_run` throws if `run_id` is not found.\n with mlflow.start_run() a" }, { "id": 94406, "commit_id": "5db82e580ae2668fb4b468dc7160a06c1be6500f", "repo": "sentry", "path": "tests/sentry/utils/suspect_resolutions/test_get_suspect_resolutions.py", "file_name": "test_get_suspect_resolutions.py", "fun_name": "test_suspect_resolutions_evaluation_analytics_event", "commit_message": "feat(suspect-resolutions): Add feature flag and celery task (#37572)", "code": "def test_suspect_resolutions_evaluation_analytics_event(self, record):\n organization = self.create_organization()\n project = self.create_project(organization=organization)\n resolved_issue = Group.objects.create(status=GroupStatus.RESOLVED, project=project)\n resolution_type = Activity.objects.create(\n project=project, group=resolved_issue, type=ActivityType.SET_RESOLVED_IN_RELEASE.value\n )\n get_suspect_resolutions(resolved_issue.id)\n\n notification_record = [\n r for r in record.call_args_list if r[0][0] == \"suspect_resolution.evaluation\"\n ]\n\n assert notification_record == [\n mock.call(\n \"suspect_resolution.evaluation\",\n resolved_group_id=resolved_issue.id,\n candidate_group_id=0,\n resolved_group_resolution_type=resolution_type.type,\n pearson_r_coefficient=0.5,\n pearson_r_start_time=datetime(2022, 1, 2),\n pearson_r_end_time=datetime(2022, 1, 1),\n pearson_r_resolution_time=datetime(2022, 1, 3),\n is_commit_correlated=True,\n resolved_issue_release_ids=[1, 2],\n candidate_issue_release_ids=[3, 4],\n )\n ]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 338, "n_words": 60, "vocab_size": 48, "complexity": 3, "nloc": 26, "token_counts": 180, "n_ast_nodes": 256, "n_identifiers": 39, "random_cut": "def test_suspect_resolutions_evaluation_analytics_event(self, record):\n organization = self.create_organization()\n project = self.create_project(organization=organization)\n resolved_issue = Group.objects.create(status=GroupStatus.RESOLVED, project=project)\n resolution_type = Activity.objects.create(\n project=project, group=resolved_issue, type=ActivityType.SET_RESOLVED_IN_RELEASE.value\n )\n get_suspect_resolutions(resolved_issue.id)\n\n notification_record = [\n r for r in record.call_args_list if r[0][0] == \"suspect_resolution.evaluation\"\n ]\n\n assert notification_record == [\n mock.call(\n \"suspect_resolution.evaluation\",\n resolved_group_id=resolved_issue.id,\n candidate_group_id=0,\n resolved_group_resolution_type=resolution_type.type,\n pearson_r_coefficient=0.5,\n pearson_r_start_time=datetime(2022, 1, 2),\n pearson_r_end_time=datetime(2022, 1, 1),\n pearson_r_resolution_time=datetime(2022, 1, 3),\n is_commit_correlated=True,\n resolved_issue_release_ids=[1, 2],\n candidate_issue_release_ids=[3, 4],\n " }, { "id": 25420, "commit_id": "34b9569800a38af41a27ed893b12567757ef6c89", "repo": "PaddleOCR", "path": "ppocr/modeling/heads/rec_nrtr_head.py", "file_name": "rec_nrtr_head.py", "fun_name": "forward_test", "commit_message": "Update rec_nrtr_head.py (#8564)", "code": "def forward_test(self, src):\n\n bs = paddle.shape(src)[0]\n if self.encoder is not None:\n src = self.positional_encoding(src)\n for encoder_layer in self.encoder:\n src = encoder_layer(src)\n memory = src # B N C\n else:\n memory = src\n dec_seq = paddle.full((bs, 1), 2, dtype=paddle.int64)\n dec_prob = paddle.full((bs, 1), 1., dtype=paddle.float32)\n for len_dec_seq in range(1, paddle.to_tensor(self.max_len)):\n dec_seq_embed = self.embedding(dec_seq)\n dec_seq_embed = self.positional_encoding(dec_seq_embed)\n tgt_mask = self.generate_square_subsequent_mask(\n paddle.shape(dec_seq_embed)[1])\n tgt = dec_seq_embed\n for decoder_layer in self.decoder:\n tgt = decoder_layer(tgt, memory, self_mask=tgt_mask)\n dec_output = tgt\n dec_output = dec_output[:, -1, :]\n word_prob = F.softmax(self.tgt_word_prj(dec_output), axis=-1)\n preds_idx = paddle.argmax(word_prob, axis=-1)\n if paddle.equal_all(\n preds_idx,\n paddle.full(\n paddle.shape(preds_idx), 3, dtype='int64')):\n break\n preds_prob = paddle.max(word_prob, axis=-1)\n dec_seq = paddle.concat(\n [dec_seq, paddle.reshape(preds_idx, [-1, 1])], axis=1)\n dec_prob = paddle.concat(\n [dec_prob, paddle.reshape(preds_prob, [-1, 1])], axis=1)\n return [dec_seq, dec_prob]\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 505, "n_words": 118, "vocab_size": 75, "complexity": 6, "nloc": 34, "token_counts": 314, "n_ast_nodes": 472, "n_identifiers": 41, "random_cut": "def forward_test(self, src):\n\n bs = paddle.shape(src)[0]\n if self.encoder is not None:\n src = self.positional_encoding(src)\n for encoder_layer in self.encoder:\n src = encoder_layer(src)\n memory = src # B N C\n else:\n memory = src\n dec_seq = paddle.full((bs, 1), 2, dtype=paddle.int64)\n dec_prob = paddle.full((bs, 1), 1., dtype=paddle.float32)\n for len_dec_seq in range(1, paddle.to_tensor(self.max_len)):\n dec_seq_embed = self.embedding(dec_seq)\n dec_seq_embed = self.positional_encoding(dec_seq_embed)\n tgt_mask = self.generate_square_subsequent_mask(\n paddle.shape(dec_seq_embed)[1])\n tgt = dec_seq_embed\n for de" }, { "id": 46076, "commit_id": "7bd8b2d7f3bca39a919cf0aeef91da1c476d792d", "repo": "airflow", "path": "tests/providers/alibaba/cloud/log/test_oss_task_handler.py", "file_name": "test_oss_task_handler.py", "fun_name": "test_oss_write_into_remote_existing_file_via_append", "commit_message": "Add oss_task_handler into alibaba-provider and enable remote logging to OSS (#21785)", "code": "def test_oss_write_into_remote_existing_file_via_append(self, mock_service, mock_oss_log_exists):\n # Given\n mock_oss_log_exists.return_value = True\n mock_service.return_value.head_key.return_value.content_length = 1\n\n # When\n self.oss_task_handler.oss_write(MOCK_CONTENT, '1.log', append=True)\n\n # Then\n assert mock_service.call_count == 2\n mock_service.return_value.head_key.assert_called_once_with(MOCK_BUCKET_NAME, 'airflow/logs/1.log')\n mock_oss_log_exists.assert_called_once_with('airflow/logs/1.log')\n mock_service.return_value.append_string.assert_called_once_with(\n MOCK_BUCKET_NAME, MOCK_CONTENT, 'airflow/logs/1.log', 1\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 119, "n_words": 32, "vocab_size": 28, "complexity": 1, "nloc": 10, "token_counts": 79, "n_ast_nodes": 128, "n_identifiers": 15, "random_cut": "def test_oss_write_into_remote_existing_file_via_append(self, mock_service, mock_oss_log_exists):\n # Given" }, { "id": 91123, "commit_id": "9e12287b84b21f79f8381249ed883f134c292a93", "repo": "sentry", "path": "tests/sentry/eventstore/snuba/test_backend.py", "file_name": "test_backend.py", "fun_name": "test_get_event_beyond_retention", "commit_message": "fix(eventstore): Silently handle events beyond retention (#35542)", "code": "def test_get_event_beyond_retention(self):\n event = self.store_event(\n data={\n \"event_id\": \"d\" * 32,\n \"type\": \"default\",\n \"platform\": \"python\",\n \"fingerprint\": [\"group2\"],\n \"timestamp\": iso_format(before_now(days=14)),\n \"tags\": {\"foo\": \"1\"},\n },\n project_id=self.project2.id,\n )\n\n with mock.patch(\"sentry.quotas.get_event_retention\") as get_event_retention:\n get_event_retention.return_value = 7\n event = self.eventstore.get_event_by_id(self.project2.id, \"d\" * 32)\n assert event is None\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 217, "n_words": 41, "vocab_size": 35, "complexity": 1, "nloc": 16, "token_counts": 102, "n_ast_nodes": 182, "n_identifiers": 17, "random_cut": "def test_get_event_beyond_retention(self):\n event = self.store_event(\n data={\n \"event_id\": \"d\" * 32,\n \"type" }, { "id": 107121, "commit_id": "a97de5885d49486d51fbaae77955af7324abaa96", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_axes.py", "file_name": "test_axes.py", "fun_name": "test_bxp_bad_capwidths", "commit_message": "Custom cap widths in box and whisker plots in bxp() and boxplot()", "code": "def test_bxp_bad_capwidths():\n with pytest.raises(ValueError):\n _bxp_test_helper(bxp_kwargs=dict(capwidths=[1]))\n\n\n@image_comparison(['boxplot', 'boxplot'], tol=1.28, style='default')", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@image_comparison(['boxplot', 'boxplot'], tol=1.28, style='default')", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 17, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 74, "n_identifiers": 11, "random_cut": "def test_bxp_bad_capwidths():\n with pytest.raises(ValueError):\n _bxp_test_helper(bxp_kwargs=dict(capwidths=[1]))\n\n\n@image_comparison(['boxplot', 'boxplot'], tol=1.28, style='default')" }, { "id": 60962, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/pyproject.py", "file_name": "pyproject.py", "fun_name": "_is_list_of_str", "commit_message": "upd; format", "code": "def _is_list_of_str(obj):\n # type: (Any) -> bool\n return (\n isinstance(obj, list) and\n all(isinstance(item, str) for item in obj)\n )\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 41, "n_words": 19, "vocab_size": 19, "complexity": 3, "nloc": 5, "token_counts": 28, "n_ast_nodes": 43, "n_identifiers": 7, "random_cut": "def _is_list_of_str(obj):\n # type: (Any) -> bool\n return (\n isinstance(obj, list) and\n all(isinstance(item, str) for item in obj)\n )\n\n" }, { "id": 180788, "commit_id": "99833d506ef88f9452e516ef78db98edae8798f6", "repo": "gradio", "path": "demo/blocks_flashcards/run.py", "file_name": "run.py", "fun_name": "flip_card", "commit_message": "Refactoring Layout: Adding column widths, forms, and more. (#2097)\n\n* changes\r\n\r\n* changes\r\n\r\n* fix\r\n\r\n* change\r\n\r\n* change\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* change\r\n\r\n* remove test config outputs\r\n\r\n* fix wflow\r\n\r\n* attempt root user\r\n\r\n* attempt root user\r\n\r\n* attempt root user\r\n\r\n* attempt root user\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* change\r\n\r\n* changes\r\n\r\n* change\r\n\r\n* Update gradio/layouts.py\r\n\r\nCo-authored-by: Abubakar Abid \r\n\r\n* changes\r\n\r\nCo-authored-by: Abubakar Abid ", "code": "def flip_card(card):\n return card[1], gr.Column.update(visible=True)\n\n flip_btn.click(flip_card, [selected_card], [back, answer_col])\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 55, "n_identifiers": 11, "random_cut": "def flip_card(card):\n return card[1], gr.Column.update(visible=True)\n\n flip_btn.click(fl" }, { "id": 214492, "commit_id": "78a4f3aae54d8134a89ea868662eec933bd2dea6", "repo": "flair", "path": "flair/models/relation_extractor_model.py", "file_name": "relation_extractor_model.py", "fun_name": "_get_state_dict", "commit_message": "Fix relation extractor", "code": "def _get_state_dict(self):\n model_state = {\n **super()._get_state_dict(),\n \"embeddings\": self.embeddings,\n \"label_dictionary\": self.label_dictionary,\n \"label_type\": self.label_type,\n \"entity_label_type\": self.entity_label_type,\n \"weight_dict\": self.weight_dict,\n \"pooling_operation\": self.pooling_operation,\n \"entity_pair_filters\": self.entity_pair_filters,\n }\n return model_state\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 131, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 15, "token_counts": 80, "n_ast_nodes": 104, "n_identifiers": 11, "random_cut": "def _get_state_dict(self):\n model_state = {\n **super()._get_state_dict(),\n \"embeddings\": self.embeddings,\n \"label_dictionary\": self.label_dictionary,\n \"label_type\": self.label_type,\n \"entity_label_type\": self.entity_label_type,\n \"weight_dict\": self.weight_dict,\n \"pooling_operation\": self.pooling_operation,\n \"entity_pair_filters\": self.entity_pair_filters," }, { "id": 219219, "commit_id": "39c9303aacc44c84767e40856be0c952ede47281", "repo": "XX-Net", "path": "code/default/launcher/tests/ingegrate_testing.py", "file_name": "ingegrate_testing.py", "fun_name": "kill_python", "commit_message": "try auto testing.", "code": "def kill_python(self):\n xlog.info(\"start kill python\")\n if sys.platform == \"win32\":\n # This will kill this script as well.\n os.system(\"taskkill /im /F python.exe\")\n else:\n os.system(\"pkill -9 -f 'start.py'\")\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 79, "n_words": 26, "vocab_size": 25, "complexity": 2, "nloc": 6, "token_counts": 32, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "def kill_python(self):\n xlog.info(\"start kill python\")\n" }, { "id": 202074, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/cache/tests.py", "file_name": "tests.py", "fun_name": "test_incr_version", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_incr_version(self):\n \"Dummy cache versions can't be incremented\"\n cache.set(\"answer\", 42)\n with self.assertRaises(ValueError):\n cache.incr_version(\"answer\")\n with self.assertRaises(ValueError):\n cache.incr_version(\"does_not_exist\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 42, "n_ast_nodes": 81, "n_identifiers": 7, "random_cut": "def test_incr_version(self):\n \"Dummy cache versions can't be incremented\"\n " }, { "id": 202312, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/contenttypes_tests/test_fields.py", "file_name": "test_fields.py", "fun_name": "test_get_content_type_no_arguments", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_get_content_type_no_arguments(self):\n with self.assertRaisesMessage(\n Exception, \"Impossible arguments to GFK.get_content_type!\"\n ):\n Answer.question.get_content_type()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 22, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def test_get_content_type_no_arguments(self):\n with self.assertRaisesMessage(\n Exception, \"Imposs" }, { "id": 194429, "commit_id": "1830123ba3edf7290b7c6cb1c6f406ccf1d0e5d4", "repo": "kivy", "path": "kivy/core/window/__init__.py", "file_name": "__init__.py", "fun_name": "unregister_event_manager", "commit_message": "Feature: EventManagerBase (#7658)\n\n* Added EventManagerBase class and event_managers attribute to WindowBase class.\r\n* Added on_motion event to Widget class.\r\n* Updated post_dispatch_input in EventLoopBase to skip non-touch events.\r\n* Using type ids in MouseMotionEventProvider.\r\n* Added on_motion method to Widget subclasses.\r\n* Updated Widget.on_motion method to dispatch to filtered widgets if 'pos' is not in me.profile.\r\n* Changed motion_filter property in Widget to store key to list values.\r\n* Updated Widget.on_motion to not dispatch event to children if widget is disabled.\r\n* Widget: Using flags to control dispatching in on_motion method.\r\n* Widget: Don't dispatch on_motion to children if only self is registered.\r\n* Widget: Removed collision on disabled check from on_motion method.\r\n* Widget: Added docstrings for motion_filter and related methods.\r\n* EventManager: Moved motion event flags to eventmanager/__init__.py module.\r\n* ScreenManager: Overrode the on_motion method.\r\n* WindowBase: Using attributes event_managers and event_managers_dict.\r\n* WindowBase: Added doc for register_event_manager and unregister_event_manager methods.\r\n* Widget: Improved default dispatch to stop after the last registered widgets.\r\n* EventManagerBase: Added initial docs class and module.\r\n* Widget: Added experimental warnings to motion_filter property and to on_motion and (un)register_for_motion_event methods.\r\n* WindowBase: Added docs for event_managers and event_managers_dict attributes.\r\n* MotionEvent: Added type_id and flags to push_attrs list.\r\n* EventManagerBase: Added versionadded tag on all flags.\r\n* EventManagerBase: Use dispatch modes instead of flags.", "code": "def unregister_event_manager(self, manager):\n \n self.event_managers.remove(manager)\n for type_id in manager.type_ids:\n self.event_managers_dict[type_id].remove(manager)\n manager.stop()\n manager.window = None\n", "url": "https://github.com/kivy/kivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 44, "n_ast_nodes": 72, "n_identifiers": 10, "random_cut": "def unregister_event_manager(self, manager):\n \n self.event_managers.remove(manager)\n for type_id in manager.type_ids:\n self.event_managers_dict[type_id].remove(manager)\n manager.st" }, { "id": 310412, "commit_id": "b2811cff515a87685673aea2319037c415b067a7", "repo": "core", "path": "tests/components/flunearyou/conftest.py", "file_name": "conftest.py", "fun_name": "data_cdc_fixture", "commit_message": "Clean up Flu Near You tests (#64575)\n\n* Clean up Flu Near You tests\r\n\r\n* Docstring\r\n\r\n* More fixtures\r\n\r\n* Revert \"More fixtures\"\r\n\r\nThis reverts commit 30f079b6266ef6cb14417ca895da1ae937c87abe.", "code": "def data_cdc_fixture():\n \n return json.loads(load_fixture(\"cdc_data.json\", \"flunearyou\"))\n\n\n@pytest.fixture(name=\"setup_flunearyou\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"setup_flunearyou\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 51, "n_identifiers": 7, "random_cut": "def data_cdc_fixture():\n \n return jso" }, { "id": 306934, "commit_id": "56c4e0391dd4696ee52b20cf2660da8c9cac480b", "repo": "core", "path": "homeassistant/components/hdmi_cec/media_player.py", "file_name": "media_player.py", "fun_name": "media_pause", "commit_message": "Use new media player enums [e-h] (#78049)", "code": "def media_pause(self) -> None:\n \n self.send_keypress(KEY_PAUSE)\n self._state = MediaPlayerState.PAUSED\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def media_pause(self) -> None:\n \n self.send_keyp" }, { "id": 125475, "commit_id": "0c139914bbb3e3557f13738b5f3f9fe8d2d428b4", "repo": "ray", "path": "python/ray/air/tests/test_data_batch_conversion.py", "file_name": "test_data_batch_conversion.py", "fun_name": "test_numpy_object_pandas", "commit_message": "[Datasets] Automatically cast tensor columns when building Pandas blocks. (#26684)\n\nThis PR tries to automatically cast tensor columns to our TensorArray extension type when building Pandas blocks, logging a warning and falling back to the opaque object-typed column if the cast fails. This should allow users to remain mostly tensor extension agnostic.\r\n\r\nTensorArray now eagerly validates the underlying tensor data, raising an error if e.g. the underlying ndarrays have heterogeneous shapes; previously, TensorArray wouldn't validate this on construction and would instead let failures happen downstream. This means that our internal TensorArray use needs to follow a try-except pattern, falling back to a plain NumPy object column.", "code": "def test_numpy_object_pandas():\n input_data = np.array([[1, 2, 3], [1]], dtype=object)\n expected_output = pd.DataFrame({TENSOR_COLUMN_NAME: input_data})\n actual_output = convert_batch_type_to_pandas(input_data)\n assert expected_output.equals(actual_output)\n\n np.testing.assert_array_equal(\n convert_pandas_to_batch_type(actual_output, type=DataType.NUMPY), input_data\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 47, "n_words": 23, "vocab_size": 20, "complexity": 1, "nloc": 8, "token_counts": 72, "n_ast_nodes": 109, "n_identifiers": 19, "random_cut": "def test_numpy_object_pandas():\n input_data = np.array([[1, 2, 3], [1]], dtype=object)\n expected_output = pd.DataFrame({TENSOR_COLUMN_NAME: input_data})\n actual_output = convert_batch_type_to_pandas(input_data)\n assert expected_output.equals(actual_output)\n\n np.testing.assert_array_equal(" }, { "id": 248306, "commit_id": "1fe202a1a3343fad77da270ffe0923a46f1944dd", "repo": "synapse", "path": "synapse/storage/engines/sqlite.py", "file_name": "sqlite.py", "fun_name": "supports_returning", "commit_message": "Tidy up and type-hint the database engine modules (#12734)\n\nCo-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com>", "code": "def supports_returning(self) -> bool:\n \n return sqlite3.sqlite_version_info >= (3, 35, 0)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 32, "n_identifiers": 5, "random_cut": "def supports_returning(self) -> bool:\n \n return sqlite3.sqlite_version_info >= (3, 35, 0)\n" }, { "id": 198238, "commit_id": "1ceeaf7635d2a633fe1a4295bed4fbebebcb8402", "repo": "sympy", "path": "sympy/core/tests/test_power.py", "file_name": "test_power.py", "fun_name": "test_issue_7638", "commit_message": "fix(core): fix evaluation of sqrt((-1+I)**2)", "code": "def test_issue_7638():\n f = pi/log(sqrt(2))\n assert ((1 + I)**(I*f/2))**0.3 == (1 + I)**(0.15*I*f)\n # if 1/3 -> 1.0/3 this should fail since it cannot be shown that the\n # sign will be +/-1; for the previous \"small arg\" case, it didn't matter\n # that this could not be proved\n assert (1 + I)**(4*I*f) == ((1 + I)**(12*I*f))**Rational(1, 3)\n\n assert (((1 + I)**(I*(1 + 7*f)))**Rational(1, 3)).exp == Rational(1, 3)\n r = symbols('r', real=True)\n assert sqrt(r**2) == abs(r)\n assert cbrt(r**3) != r\n assert sqrt(Pow(2*I, 5*S.Half)) != (2*I)**Rational(5, 4)\n p = symbols('p', positive=True)\n assert cbrt(p**2) == p**Rational(2, 3)\n assert NS(((0.2 + 0.7*I)**(0.7 + 1.0*I))**(0.5 - 0.1*I), 1) == '0.4 + 0.2*I'\n assert sqrt(1/(1 + I)) == sqrt(1 - I)/sqrt(2) # or 1/sqrt(1 + I)\n e = 1/(1 - sqrt(2))\n assert sqrt(e) == I/sqrt(-1 + sqrt(2))\n assert e**Rational(-1, 2) == -I*sqrt(-1 + sqrt(2))\n assert sqrt((cos(1)**2 + sin(1)**2 - 1)**(3 + I)).exp in [S.Half,\n Rational(3, 2) + I/2]\n assert sqrt(r**Rational(4, 3)) != r**Rational(2, 3)\n assert sqrt((p + I)**Rational(4, 3)) == (p + I)**Rational(2, 3)\n\n for q in 1+I, 1-I:\n assert sqrt(q**2) == q\n for q in -1+I, -1-I:\n assert sqrt(q**2) == -q\n\n assert sqrt((p + r*I)**2) != p + r*I\n e = (1 + I/5)\n assert sqrt(e**5) == e**(5*S.Half)\n assert sqrt(e**6) == e**3\n assert sqrt((1 + I*r)**6) != (1 + I*r)**3\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 375, "n_words": 216, "vocab_size": 119, "complexity": 3, "nloc": 29, "token_counts": 552, "n_ast_nodes": 826, "n_identifiers": 23, "random_cut": "def test_issue_7638():\n f = pi/log(sqrt(2))\n assert ((1 + I)**(I*f/2))**0.3 == (1 + I)**(0.15*I*f)\n # if 1/" }, { "id": 290548, "commit_id": "ee910bd0e41391e00ccd521fe7d605e494d33046", "repo": "core", "path": "tests/components/stream/test_worker.py", "file_name": "test_worker.py", "fun_name": "test_get_image", "commit_message": "Refactor camera stream settings (#81663)", "code": "async def test_get_image(hass, h264_video, filename):\n \n await async_setup_component(hass, \"stream\", {\"stream\": {}})\n\n # Since libjpeg-turbo is not installed on the CI runner, we use a mock\n with patch(\n \"homeassistant.components.camera.img_util.TurboJPEGSingleton\"\n ) as mock_turbo_jpeg_singleton:\n mock_turbo_jpeg_singleton.instance.return_value = mock_turbo_jpeg()\n stream = create_stream(hass, h264_video, {}, dynamic_stream_settings())\n\n with patch.object(hass.config, \"is_allowed_path\", return_value=True):\n make_recording = hass.async_create_task(stream.async_record(filename))\n await make_recording\n assert stream._keyframe_converter._image is None\n\n assert await stream.async_get_image() == EMPTY_8_6_JPEG\n\n await stream.stop()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 121, "n_words": 59, "vocab_size": 49, "complexity": 1, "nloc": 13, "token_counts": 110, "n_ast_nodes": 189, "n_identifiers": 23, "random_cut": "async def test_get_image(hass, h264_video, filename):\n \n await async_setup_component(hass, \"stream\", {\"stream\": {}})" }, { "id": 334842, "commit_id": "12b10cbe0986409e2b87e891248d299b071d0383", "repo": "diffusers", "path": "src/diffusers/pipelines/pipeline_ddim.py", "file_name": "pipeline_ddim.py", "fun_name": "__call__", "commit_message": "finish refactor", "code": "def __call__(self, batch_size=1, generator=None, torch_device=None, eta=0.0, num_inference_steps=50):\n # eta corresponds to η in paper and should be between [0, 1]\n if torch_device is None:\n torch_device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n num_trained_timesteps = self.noise_scheduler.timesteps\n inference_step_times = range(0, num_trained_timesteps, num_trained_timesteps // num_inference_steps)\n\n self.unet.to(torch_device)\n\n # Sample gaussian noise to begin loop\n image = torch.randn(\n (batch_size, self.unet.in_channels, self.unet.resolution, self.unet.resolution),\n generator=generator,\n )\n image = image.to(torch_device)\n\n # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf\n # Ideally, read DDIM paper in-detail understanding\n\n # Notation ( -> \n # - pred_noise_t -> e_theta(x_t, t)\n # - pred_original_image -> f_theta(x_t, t) or x_0\n # - std_dev_t -> sigma_t\n # - eta -> η\n # - pred_image_direction -> \"direction pointingc to x_t\"\n # - pred_prev_image -> \"x_t-1\"\n for t in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps):\n # 1. predict noise residual\n with torch.no_grad():\n residual = self.unet(image, inference_step_times[t])\n\n # 2. predict previous mean of image x_t-1\n pred_prev_image = self.noise_scheduler.step(residual, image, t, num_inference_steps, eta)\n\n # 3. optionally sample variance\n variance = 0\n if eta > 0:\n noise = torch.randn(image.shape, generator=generator).to(image.device)\n variance = self.noise_scheduler.get_variance(t, num_inference_steps).sqrt() * eta * noise\n\n # 4. set current image to prev_image: x_t -> x_t-1\n image = pred_prev_image + variance\n\n return image\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 511, "n_words": 195, "vocab_size": 123, "complexity": 5, "nloc": 21, "token_counts": 225, "n_ast_nodes": 355, "n_identifiers": 35, "random_cut": "def __call__(self, batch_size=1, generator=None, torch_device=None, eta=0.0, num_inference_steps=50):\n # eta corresponds to η in paper and should be between [0, 1]\n if torch_device is None:\n torch_device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n num_trained_timesteps = self.noise_scheduler.timesteps\n inference_step_times = range(0, num_trained_timesteps, num_trained_timesteps // num_inference_steps)\n\n self.unet.to(torch_device)\n\n # Sample gaussian noise to begin loop\n image = torch.randn(\n (batch_size, self.unet.in_channels, self.unet.resolution, self.unet.resolution),\n generator=generator,\n )\n image = image.to(torch_device)\n\n # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf\n # Ideally, read DDIM paper in-detail understanding\n\n # Notation ( -> \n # - pred_noise_t -> e_theta(x_t, t)\n # - pred_original_image -> f_theta(x_t, t) or x_0\n # - std_dev_t -> sigma_t\n # - eta -> η\n # - pred_image_" }, { "id": 137298, "commit_id": "794cfd9725b4dc113aa50e60428367b15e921514", "repo": "ray", "path": "rllib/evaluation/tests/test_rollout_worker.py", "file_name": "test_rollout_worker.py", "fun_name": "test_action_normalization", "commit_message": "[RLlib] `AlgorithmConfig.overrides()` to replace `multiagent->policies->config` and `evaluation_config` dicts. (#30879)", "code": "def test_action_normalization(self):\n from ray.rllib.examples.env.random_env import RandomEnv\n\n action_space = gym.spaces.Box(0.0001, 0.0002, (5,))\n\n # Normalize: True (unsquash between Policy's action_space.low/high).\n ev = RolloutWorker(\n env_creator=lambda _: RandomEnv(\n config=dict(\n action_space=action_space,\n max_episode_len=10,\n p_done=0.0,\n check_action_bounds=True,\n )\n ),\n config=AlgorithmConfig()\n .multi_agent(\n policies={\n \"default_policy\": PolicySpec(\n policy_class=RandomPolicy,\n config={\"ignore_action_bounds\": True},\n )\n }\n )\n .rollouts(num_rollout_workers=0, batch_mode=\"complete_episodes\")\n .environment(\n action_space=action_space, normalize_actions=True, clip_actions=False\n ),\n )\n sample = convert_ma_batch_to_sample_batch(ev.sample())\n # Check, whether the action bounds have been breached (expected).\n # We still arrived here b/c we unsquashed according to the Env's action\n # space.\n self.assertGreater(np.max(sample[\"actions\"]), action_space.high[0])\n self.assertLess(np.min(sample[\"actions\"]), action_space.low[0])\n ev.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 489, "n_words": 83, "vocab_size": 71, "complexity": 1, "nloc": 30, "token_counts": 189, "n_ast_nodes": 284, "n_identifiers": 43, "random_cut": "def test_action_normalization(self):\n from ray.rllib.examples.env.random_env import RandomEnv\n\n action_space = gym.spaces.Box(0.0001, 0.0002, (5,))\n\n # Normalize: True (unsquash between Policy's action_space.low/high).\n ev = RolloutWorker(\n env_creator=lambda _: RandomEnv(\n config=dict(\n action_space=action_space,\n max_episode_len=10,\n p_done=0.0,\n check_action_bounds=True,\n )\n ),\n config=AlgorithmConfig()\n .multi_agent(\n policies={\n \"default_policy\": PolicySpec(\n policy_class=RandomPolicy,\n config={\"ignore_action_bounds\": True},\n )\n }\n )\n .rollouts(num_rollout_workers=0, batch_mode=\"complete_episodes\")\n .environment(\n action_space=action_space, normalize_actions=True, clip_actions=False\n ),\n )\n sample = convert_ma_batch_to_sample_batch(ev.sample())\n # Check, whether the action" }, { "id": 44312, "commit_id": "640c0b67631c5f2c8ee866b0726fa7a8a452cd3c", "repo": "airflow", "path": "tests/providers/google/cloud/operators/test_vertex_ai.py", "file_name": "test_vertex_ai.py", "fun_name": "test_execute", "commit_message": "Create CustomJob and Datasets operators for Vertex AI service (#20077)", "code": "def test_execute(self, mock_hook, to_dict_mock):\n op = CreateDatasetOperator(\n task_id=TASK_ID,\n gcp_conn_id=GCP_CONN_ID,\n delegate_to=DELEGATE_TO,\n impersonation_chain=IMPERSONATION_CHAIN,\n region=GCP_LOCATION,\n project_id=GCP_PROJECT,\n dataset=TEST_DATASET,\n retry=RETRY,\n timeout=TIMEOUT,\n metadata=METADATA,\n )\n op.execute(context={'ti': mock.MagicMock()})\n mock_hook.assert_called_once_with(\n gcp_conn_id=GCP_CONN_ID, delegate_to=DELEGATE_TO, impersonation_chain=IMPERSONATION_CHAIN\n )\n mock_hook.return_value.create_dataset.assert_called_once_with(\n region=GCP_LOCATION,\n project_id=GCP_PROJECT,\n dataset=TEST_DATASET,\n retry=RETRY,\n timeout=TIMEOUT,\n metadata=METADATA,\n )\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 268, "n_words": 33, "vocab_size": 23, "complexity": 1, "nloc": 25, "token_counts": 119, "n_ast_nodes": 168, "n_identifiers": 33, "random_cut": "def test_execute(self, mock_hook, to_dict_mock):\n op = CreateDatasetOperator(\n task_id=TASK_ID,\n gcp_conn_id=GCP_CONN_ID,\n delegate_to=DELEGATE_TO,\n impersonation_chain=IMPERSONATION_CHAIN,\n region=GCP_LOCATION,\n project_id=GCP_PROJECT,\n dataset=TEST_DATASET,\n " }, { "id": 91679, "commit_id": "7f60db924ea37f34e0cfe6856777239e2a2ffe13", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_abnormal_user_sessions", "commit_message": "feat(metrics): make indexer more configurable (#35604)\n\nThis makes the sentry_metrics indexer more configurable in the following ways, to enable indexing on the ingest-performance-metrics topic:\r\n\r\n- configurable input Kafka topic\r\n- configurable output Kafka topic\r\n- configurable model from which to pull index results\r\n- tags for internal metrics to distinguish between the two modes operationally", "code": "def test_abnormal_user_sessions(self):\n user_ts = time.time()\n self._send_buckets(\n [\n {\n \"org_id\": self.organization.id,\n \"project_id\": self.project.id,\n \"metric_id\": self.session_user_metric,\n \"timestamp\": user_ts,\n \"tags\": {\n self.session_status_tag: _indexer_record(self.organization.id, \"abnormal\")\n },\n \"type\": \"s\",\n \"value\": [1, 2, 4],\n \"retention_days\": 90,\n },\n {\n \"org_id\": self.organization.id,\n \"project_id\": self.project.id,\n \"metric_id\": self.session_user_metric,\n \"timestamp\": user_ts,\n \"tags\": {},\n \"type\": \"s\",\n \"value\": [1, 2, 4, 7, 9],\n \"retention_days\": 90,\n },\n ],\n entity=\"metrics_sets\",\n )\n response = self.get_success_response(\n self.organization.slug,\n field=[\"session.abnormal_user\"],\n statsPeriod=\"6m\",\n interval=\"1m\",\n )\n group = response.data[\"groups\"][0]\n assert group[\"totals\"] == {\"session.abnormal_user\": 3}\n assert group[\"series\"] == {\"session.abnormal_user\": [0, 0, 0, 0, 0, 3]}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 620, "n_words": 82, "vocab_size": 53, "complexity": 1, "nloc": 38, "token_counts": 218, "n_ast_nodes": 354, "n_identifiers": 20, "random_cut": "def test_abnormal_user_sessions(self):\n user_ts = time.time()\n self._send_buckets(\n [\n {\n \"org_id\": self.organization.id,\n \"project_id\": self.project.id,\n \"metric_id\": self.session_user_metric,\n \"timestamp\": user_ts,\n \"tags\": {\n self.session_status_tag: _indexer_record(self.organization.id, \"abnormal\")\n },\n \"type\": \"s\",\n \"value\": [1," }, { "id": 126525, "commit_id": "5b6a58ed2850d52b8e279d9553a910b7b1de1b42", "repo": "ray", "path": "rllib/offline/estimators/tests/test_ope.py", "file_name": "test_ope.py", "fun_name": "test_dm_mixed_policy_random_data", "commit_message": "[RLlib] Add OPE Learning Tests (#27154)", "code": "def test_dm_mixed_policy_random_data(self):\n print(\"Test DirectMethod on mixed policy on random dataset\")\n check_estimate(\n estimator_cls=DirectMethod,\n gamma=self.gamma,\n q_model_config=self.q_model_config,\n policy=self.mixed_policy,\n batch=self.random_batch,\n mean_ret=self.mixed_reward,\n std_ret=self.mixed_std,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 116, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 11, "token_counts": 52, "n_ast_nodes": 77, "n_identifiers": 16, "random_cut": "def test_dm_mixed_policy_random_data(self):\n print(\"Test DirectMethod on mixed policy on random dataset\")\n check_estimate(\n estimator_cls=DirectMethod,\n gamma=self.gamma,\n q_model_config=self.q_model_config,\n policy=self.mixed_policy,\n batch=self.random_batch,\n " }, { "id": 137649, "commit_id": "e76ccee69aaa7583be1a9d81cf7b2aa72cf25647", "repo": "ray", "path": "python/ray/tests/spark/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_get_spark_task_assigned_physical_gpus", "commit_message": "Ray on spark implementation (#28771)\n\nREP: ray-project/enhancements#14", "code": "def test_get_spark_task_assigned_physical_gpus():\n with patch.dict(os.environ, {}, clear=True):\n assert get_spark_task_assigned_physical_gpus([2, 5]) == [2, 5]\n\n with patch.dict(os.environ, {\"CUDA_VISIBLE_DEVICES\": \"2,3,6\"}, clear=True):\n assert get_spark_task_assigned_physical_gpus([0, 1]) == [2, 3]\n assert get_spark_task_assigned_physical_gpus([0, 2]) == [2, 6]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 55, "n_words": 29, "vocab_size": 19, "complexity": 1, "nloc": 6, "token_counts": 86, "n_ast_nodes": 133, "n_identifiers": 7, "random_cut": "def test_get_spark_task_assigned_physical_gpus():\n with patch.dict(os.environ, {}, clear=True):\n assert get_spark_tas" }, { "id": 269982, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks_test.py", "file_name": "callbacks_test.py", "fun_name": "fitModelAndAssertKerasModelWritten", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def fitModelAndAssertKerasModelWritten(self, model):\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, write_graph=True, profile_batch=0\n )\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=3,\n validation_data=(x, y),\n callbacks=[tb_cbk],\n )\n summary_file = list_summaries(self.logdir)\n self.assertEqual(\n summary_file.tensors,\n {\n _ObservedSummary(logdir=self.train_dir, tag=\"keras\"),\n },\n )\n if not model.run_eagerly:\n # There should be one train graph\n self.assertLen(summary_file.graph_defs, 1)\n for graph_def in summary_file.graph_defs:\n graph_def_str = str(graph_def)\n\n # All the model layers should appear in the graphs\n for layer in model.layers:\n if \"input\" not in layer.name:\n self.assertIn(layer.name, graph_def_str)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 385, "n_words": 78, "vocab_size": 61, "complexity": 5, "nloc": 27, "token_counts": 174, "n_ast_nodes": 259, "n_identifiers": 35, "random_cut": "def fitModelAndAssertKerasModelWritten(self, model):\n x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))\n tb_cbk = keras.callbacks.TensorBoard(\n self.logdir, write_graph=True, profile_batch=0\n )\n model.fit(\n x,\n y,\n batch_size=2,\n epochs=3,\n validation_data=(x, y),\n callbacks=[tb_cbk],\n )\n summary_file = list_summaries(self.logdir)\n self.assertEqual(\n summary_file.tensors,\n {\n _ObservedSummary(logdir=self.train_dir, tag=\"keras\"),\n },\n )\n if not model.run_eagerly:\n # Th" }, { "id": 192654, "commit_id": "1ac6e8b91b980b052324f77828a5ef4a6715dd66", "repo": "vision", "path": "torchvision/prototype/datasets/_builtin/oxford_iiit_pet.py", "file_name": "oxford_iiit_pet.py", "fun_name": "_resources", "commit_message": "Refactor and simplify prototype datasets (#5778)\n\n* refactor prototype datasets to inherit from IterDataPipe (#5448)\r\n\r\n* refactor prototype datasets to inherit from IterDataPipe\r\n\r\n* depend on new architecture\r\n\r\n* fix missing file detection\r\n\r\n* remove unrelated file\r\n\r\n* reinstante decorator for mock registering\r\n\r\n* options -> config\r\n\r\n* remove passing of info to mock data functions\r\n\r\n* refactor categories file generation\r\n\r\n* fix imagenet\r\n\r\n* fix prototype datasets data loading tests (#5711)\r\n\r\n* reenable serialization test\r\n\r\n* cleanup\r\n\r\n* fix dill test\r\n\r\n* trigger CI\r\n\r\n* patch DILL_AVAILABLE for pickle serialization\r\n\r\n* revert CI changes\r\n\r\n* remove dill test and traversable test\r\n\r\n* add data loader test\r\n\r\n* parametrize over only_datapipe\r\n\r\n* draw one sample rather than exhaust data loader\r\n\r\n* cleanup\r\n\r\n* trigger CI\r\n\r\n* migrate VOC prototype dataset (#5743)\r\n\r\n* migrate VOC prototype dataset\r\n\r\n* cleanup\r\n\r\n* revert unrelated mock data changes\r\n\r\n* remove categories annotations\r\n\r\n* move properties to constructor\r\n\r\n* readd homepage\r\n\r\n* migrate CIFAR prototype datasets (#5751)\r\n\r\n* migrate country211 prototype dataset (#5753)\r\n\r\n* migrate CLEVR prototype datsaet (#5752)\r\n\r\n* migrate coco prototype (#5473)\r\n\r\n* migrate coco prototype\r\n\r\n* revert unrelated change\r\n\r\n* add kwargs to super constructor call\r\n\r\n* remove unneeded changes\r\n\r\n* fix docstring position\r\n\r\n* make kwargs explicit\r\n\r\n* add dependencies to docstring\r\n\r\n* fix missing dependency message\r\n\r\n* Migrate PCAM prototype dataset (#5745)\r\n\r\n* Port PCAM\r\n\r\n* skip_integrity_check\r\n\r\n* Update torchvision/prototype/datasets/_builtin/pcam.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Address comments\r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Migrate DTD prototype dataset (#5757)\r\n\r\n* Migrate DTD prototype dataset\r\n\r\n* Docstring\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Migrate GTSRB prototype dataset (#5746)\r\n\r\n* Migrate GTSRB prototype dataset\r\n\r\n* ufmt\r\n\r\n* Address comments\r\n\r\n* Apparently mypy doesn't know that __len__ returns ints. How cute.\r\n\r\n* why is the CI not triggered??\r\n\r\n* Update torchvision/prototype/datasets/_builtin/gtsrb.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* migrate CelebA prototype dataset (#5750)\r\n\r\n* migrate CelebA prototype dataset\r\n\r\n* inline split_id\r\n\r\n* Migrate Food101 prototype dataset (#5758)\r\n\r\n* Migrate Food101 dataset\r\n\r\n* Added length\r\n\r\n* Update torchvision/prototype/datasets/_builtin/food101.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Migrate Fer2013 prototype dataset (#5759)\r\n\r\n* Migrate Fer2013 prototype dataset\r\n\r\n* Update torchvision/prototype/datasets/_builtin/fer2013.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* Migrate EuroSAT prototype dataset (#5760)\r\n\r\n* Migrate Semeion prototype dataset (#5761)\r\n\r\n* migrate caltech prototype datasets (#5749)\r\n\r\n* migrate caltech prototype datasets\r\n\r\n* resolve third party dependencies\r\n\r\n* Migrate Oxford Pets prototype dataset (#5764)\r\n\r\n* Migrate Oxford Pets prototype dataset\r\n\r\n* Update torchvision/prototype/datasets/_builtin/oxford_iiit_pet.py\r\n\r\nCo-authored-by: Philip Meier \r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* migrate mnist prototype datasets (#5480)\r\n\r\n* migrate MNIST prototype datasets\r\n\r\n* Update torchvision/prototype/datasets/_builtin/mnist.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Migrate Stanford Cars prototype dataset (#5767)\r\n\r\n* Migrate Stanford Cars prototype dataset\r\n\r\n* Address comments\r\n\r\n* fix category file generation (#5770)\r\n\r\n* fix category file generation\r\n\r\n* revert unrelated change\r\n\r\n* revert unrelated change\r\n\r\n* migrate cub200 prototype dataset (#5765)\r\n\r\n* migrate cub200 prototype dataset\r\n\r\n* address comments\r\n\r\n* fix category-file-generation\r\n\r\n* Migrate USPS prototype dataset (#5771)\r\n\r\n* migrate SBD prototype dataset (#5772)\r\n\r\n* migrate SBD prototype dataset\r\n\r\n* reuse categories\r\n\r\n* Migrate SVHN prototype dataset (#5769)\r\n\r\n* add test to enforce __len__ is working on prototype datasets (#5742)\r\n\r\n* reactivate special dataset tests\r\n\r\n* add missing annotation\r\n\r\n* Cleanup prototype dataset implementation (#5774)\r\n\r\n* Remove Dataset2 class\r\n\r\n* Move read_categories_file out of DatasetInfo\r\n\r\n* Remove FrozenBunch and FrozenMapping\r\n\r\n* Remove test_prototype_datasets_api.py and move missing dep test somewhere else\r\n\r\n* ufmt\r\n\r\n* Let read_categories_file accept names instead of paths\r\n\r\n* Mypy\r\n\r\n* flake8\r\n\r\n* fix category file reading\r\n\r\nCo-authored-by: Philip Meier \r\n\r\n* update prototype dataset README (#5777)\r\n\r\n* update prototype dataset README\r\n\r\n* fix header level\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\nCo-authored-by: Nicolas Hug ", "code": "def _resources(self) -> List[OnlineResource]:\n images = HttpResource(\n \"https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz\",\n sha256=\"67195c5e1c01f1ab5f9b6a5d22b8c27a580d896ece458917e61d459337fa318d\",\n preprocess=\"decompress\",\n )\n anns = HttpResource(\n \"https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz\",\n sha256=\"52425fb6de5c424942b7626b428656fcbd798db970a937df61750c0f1d358e91\",\n preprocess=\"decompress\",\n )\n return [images, anns]\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 121, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 12, "token_counts": 46, "n_ast_nodes": 78, "n_identifiers": 9, "random_cut": "def _resources(self) -> List[OnlineResource]:\n images = HttpResource(\n \"https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz\",\n sha256=\"67" }, { "id": 66660, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v12_0/set_cwip_and_delete_asset_settings.py", "file_name": "set_cwip_and_delete_asset_settings.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\t\n\n\tif frappe.db.exists(\"DocType\", \"Asset Settings\"):\n\t\tfrappe.reload_doctype(\"Asset Category\")\n\t\tcwip_value = frappe.db.get_single_value(\"Asset Settings\", \"disable_cwip_accounting\")\n\n\t\tfrappe.db.sql(, cint(cwip_value))\n\n\t\tfrappe.db.sql()\n\t\tfrappe.delete_doc_if_exists(\"DocType\", \"Asset Settings\")\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 19, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 64, "n_ast_nodes": 121, "n_identifiers": 10, "random_cut": "def execute():\n\t\n\n\tif frappe.db.exists(\"DocType\", \"Asset Settings\"):\n\t\tfrappe.reload_doctype(\"Asset Category\")\n\t\tcwip_value = frappe.db.get_single_value(\"Asset Settings\", \"disable_cwip_accounting\")\n\n\t\tfrappe.db.sql(, cint(cwip_value))\n\n\t\tfrappe.db.sql()\n\t\tfr" }, { "id": 261954, "commit_id": "2fb1f705031d4a9602e5853232d28b53cde89a5f", "repo": "TTS", "path": "TTS/tts/utils/text/symbols.py", "file_name": "symbols.py", "fun_name": "parse_symbols", "commit_message": "Implement BaseCharacters, IPAPhonemes, Graphemes", "code": "def parse_symbols():\n return {\n \"pad\": _pad,\n \"eos\": _eos,\n \"bos\": _bos,\n \"characters\": _characters,\n \"punctuations\": _punctuations,\n \"phonemes\": _phonemes,\n }\n\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 64, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 9, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def parse_symbols():\n return {\n \"pad\": _pad,\n \"eos\": _eos,\n \"bos\": _bos,\n \"characters\": _char" }, { "id": 208580, "commit_id": "1a9d9554bcee466394990535e190d55008904df8", "repo": "ipython", "path": "IPython/core/tests/test_magic.py", "file_name": "test_magic.py", "fun_name": "test_file_double_quote", "commit_message": "Format code", "code": "def test_file_double_quote():\n \n ip = get_ipython()\n with TemporaryDirectory() as td:\n fname = os.path.join(td, '\"file1\"')\n ip.run_cell_magic(\n \"writefile\",\n fname,\n \"\\n\".join(\n [\n \"line1\",\n \"line2\",\n ]\n ),\n )\n s = Path(fname).read_text(encoding=\"utf-8\")\n assert \"line1\\n\" in s\n assert \"line2\" in s\n\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 197, "n_words": 34, "vocab_size": 28, "complexity": 1, "nloc": 17, "token_counts": 71, "n_ast_nodes": 134, "n_identifiers": 14, "random_cut": "def test_file_double_quote():\n \n ip = get_ipython()\n with TemporaryDirectory() as td:\n fname = os.path.join(td, '\"file1\"')\n ip.run_cell_magic(\n \"writefile\",\n fname,\n \"\\n\".join(\n [\n \"line1\",\n \"line2\",\n ]\n ),\n )\n s = Path(fname).read_text(encoding=\"utf-8\")\n assert \"line1\\n\" in s\n assert \"line2\" in s\n\n" }, { "id": 171193, "commit_id": "f3c46cd0899d5e11e0602798d9390c90e51e9ba7", "repo": "pandas", "path": "pandas/tests/scalar/timestamp/test_timestamp.py", "file_name": "test_timestamp.py", "fun_name": "test_sub_timedeltalike_mismatched_reso", "commit_message": "API: make Timestamp/Timedelta _as_unit public as_unit (#48819)\n\n* API: make Timestamp/Timedelta _as_unit public as_unit\r\n\r\n* update test\r\n\r\n* update test\r\n\r\n* update tests\r\n\r\n* fix pyi typo\r\n\r\n* fixup\r\n\r\n* fixup", "code": "def test_sub_timedeltalike_mismatched_reso(self, ts_tz):\n # case with non-lossy rounding\n ts = ts_tz\n\n # choose a unit for `other` that doesn't match ts_tz's;\n # this construction ensures we get cases with other._creso < ts._creso\n # and cases with other._creso > ts._creso\n unit = {\n NpyDatetimeUnit.NPY_FR_us.value: \"ms\",\n NpyDatetimeUnit.NPY_FR_ms.value: \"s\",\n NpyDatetimeUnit.NPY_FR_s.value: \"us\",\n }[ts._creso]\n other = Timedelta(0).as_unit(unit)\n assert other._creso != ts._creso\n\n result = ts + other\n assert isinstance(result, Timestamp)\n assert result == ts\n assert result._creso == max(ts._creso, other._creso)\n\n result = other + ts\n assert isinstance(result, Timestamp)\n assert result == ts\n assert result._creso == max(ts._creso, other._creso)\n\n if ts._creso < other._creso:\n # Case where rounding is lossy\n other2 = other + Timedelta._from_value_and_reso(1, other._creso)\n exp = ts.as_unit(other.unit) + other2\n res = ts + other2\n assert res == exp\n assert res._creso == max(ts._creso, other._creso)\n res = other2 + ts\n assert res == exp\n assert res._creso == max(ts._creso, other._creso)\n else:\n ts2 = ts + Timedelta._from_value_and_reso(1, ts._creso)\n exp = ts2 + other.as_unit(ts2.unit)\n\n res = ts2 + other\n assert res == exp\n assert res._creso == max(ts._creso, other._creso)\n res = other + ts2\n assert res == exp\n assert res._creso == max(ts._creso, other._creso)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 533, "n_words": 179, "vocab_size": 67, "complexity": 2, "nloc": 35, "token_counts": 283, "n_ast_nodes": 438, "n_identifiers": 23, "random_cut": "def test_sub_timedeltalike_mismatched_reso(self, ts_tz):\n # case with non-lossy rounding\n ts = ts_tz\n\n # choose a unit for `other` that doesn't match ts_tz's;\n # this construction ensures we get cases with other._creso < ts._creso\n # and cases with other._creso > ts._creso\n unit = {\n NpyDatetimeUnit.NPY_FR_us.value: \"ms\",\n NpyDatetimeUnit.NPY_FR_ms.value: \"s\",\n NpyDatetimeUnit.NPY_FR_s.value: \"us\",\n }[ts._creso]\n other = Timedelta(0).as_unit(unit)\n assert other._creso != ts._creso\n\n result = ts + other\n assert isinstance(result, Timestamp)\n assert result == ts\n assert result._creso == max(ts._creso, other._creso)\n\n result = other + ts\n assert isinstance(result, Timestamp)\n assert result == ts\n assert result._creso == max(ts._creso, other._creso)\n\n if ts._creso < other._creso:\n # Case where rounding is lossy\n other2 = other + Timedelta._from_value_and_reso(1, other._creso)\n exp = ts.as_unit(other.unit) + other2\n res = ts + other2\n assert res == exp\n assert res._creso == max(ts._creso, other._creso)\n res = other2 + ts\n assert res == exp\n assert res._creso == max(ts._creso, other._creso)\n else:\n ts2 = ts + Timedelta._from_value_and_reso(1, ts._creso)\n exp = ts2 + other.as_unit(ts2.unit)\n\n res = ts2 + other\n assert res == exp\n assert res._creso == max(ts._creso, other._creso)\n res = ot" }, { "id": 179572, "commit_id": "188757c1abd64a69a27ee926e30a890fcd87bc7b", "repo": "gradio", "path": "gradio/routes.py", "file_name": "routes.py", "fun_name": "file", "commit_message": "added backend support, see demo xray_blocks", "code": "def file(path):\n if (\n app.launchable.encrypt\n and isinstance(app.launchable.examples, str)\n and path.startswith(app.launchable.examples)\n ):\n with open(safe_join(app.cwd, path), \"rb\") as encrypted_file:\n encrypted_data = encrypted_file.read()\n file_data = encryptor.decrypt(app.launchable.encryption_key, encrypted_data)\n return FileResponse(\n io.BytesIO(file_data), attachment_filename=os.path.basename(path)\n )\n else:\n return FileResponse(safe_join(app.cwd, path))\n\n\n@app.get(\"/api\", response_class=HTMLResponse) # Needed for Spaces\n@app.get(\"/api/\", response_class=HTMLResponse)", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "@app.get(\"/api\", response_class=HTMLResponse) # Needed for Spaces\n@app.get(\"/api/\", response_class=HTMLResponse)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 126, "n_words": 41, "vocab_size": 37, "complexity": 4, "nloc": 14, "token_counts": 109, "n_ast_nodes": 211, "n_identifiers": 28, "random_cut": "def file(path):\n if (\n app.launchable.encrypt\n and isinstance(app.launchable.examples, str)\n and path" }, { "id": 7066, "commit_id": "e65f74e87e8e29922f4e9f9d839978ffb2c5b029", "repo": "ludwig", "path": "ludwig/models/predictor.py", "file_name": "predictor.py", "fun_name": "_accumulate_preds", "commit_message": "Adds mechanism for calibrating probabilities for category and binary features (#1949)\n\n* Started adding files for calibration implementation.\r\n\r\n* Adds option to return logits and labels in predictor.\r\n\r\n* Pre-commit fixes\r\n\r\n* First pass temperature scaling working.\r\n\r\n* Fixes calibration for categorical feature.\r\n\r\n* Separate calibrated logits from logits.\r\n\r\n* Adds option to revert temperature scaling.\r\n\r\n* Refactoring, move binary prediction logic into calibration class.\r\n\r\n* Reverted accidental commit to simple_model_training.py\r\n\r\n* Adds checks and comments.\r\n\r\n* Fixes matrix scaling, convert pandas series to numpy arrays.\r\n\r\n* Fixes number of classes for categorical features.\r\n\r\n* Adds structured calibration result, unit tests.\r\n\r\n* Make create_calibration_module not abstract, default implementation returns None.\r\n\r\n* Relax precision requirement for calibration test.\r\n\r\n* Save weights after calibration, so calibration results are included in save file.\r\n\r\n* Implemented dirichlet scaling with l2 off-diagonal regularization.\r\n\r\n* Adds masked_select off_diagonal method.\r\n\r\n* Change back to matrix scaling.\r\n\r\n* Updates test expectations to reflect learning rate settings.\r\n\r\n* Tuned default regularization weight.\r\n\r\n* Comments.\r\n\r\n* Set random seed, testing to see if that makes a difference.\r\n\r\n* Remove checks for exact NLL, ECE values post calibration.\r\n\r\n* Restored LOGITS to EXCLUDE_PRED_SET, added another option to return logits in batch_predict.\r\n\r\n* Factor calibration method out of Trainer into Calibrator\r\n\r\n* Removed horovod argument from calibrator.\r\n\r\n* Return batch_size if eval_batch_size not specified.\r\n\r\n* Fix calibration_module docstring.\r\n\r\n* Updates comment, adds fallback method of calibrating on training set if no validation set available.\r\n\r\n* Adds calibration registry, replaces if statements for instantiating calibration.\r\n\r\n* Raise ValueError if unsupported calibration method specified.\r\n\r\n* Remove calibrate method from Trainer\r\n\r\n* f string\r\n\r\n* Use backend to create predictor for calibration.\r\n\r\n* Moves saving out of calibrator\r\n\r\n* Fix comment.\r\n\r\n* Adds ray test of calibration.\r\n\r\n* Implements collect_logits in ray predictor.\r\n\r\n* First pass implementation of collect_labels.\r\n\r\n* Implements collect_logits and collect_labels in ray backend.\r\n\r\n* Merge predictions and labels in ray backend\r\n\r\n* Reverts collect_labels, get labels from dataset in calibrate.\r\n\r\n* Allow overriding EXCLUDE_PRED_SET when getting preds.\r\n\r\n* Changes 'calibration' config option to binary.\r\n\r\n* Test both binary and category output features in ray test.\r\n\r\n* Comments/\r\n\r\n* Adds type hints.\r\n\r\nCo-authored-by: Daniel Treiman ", "code": "def _accumulate_preds(self, preds, predictions, exclude_pred_set=EXCLUDE_PRED_SET):\n # accumulate predictions from batch for each output feature\n for of_name, of_preds in preds.items():\n for pred_name, pred_values in of_preds.items():\n if pred_name not in exclude_pred_set:\n key = f\"{of_name}_{pred_name}\"\n predictions[key].append(pred_values)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 110, "n_words": 33, "vocab_size": 29, "complexity": 4, "nloc": 6, "token_counts": 54, "n_ast_nodes": 91, "n_identifiers": 13, "random_cut": "def _accumulate_preds(self, preds, predictions, exclude_pred_set=EXCLUDE_PRED_SET):\n # accumulate predictions from batch for each output feature\n for of_name, of_preds in preds.items():\n for pred_name, pred_v" }, { "id": 64842, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/cheque_print_template/cheque_print_template.py", "file_name": "cheque_print_template.py", "fun_name": "create_or_update_cheque_print_format", "commit_message": "style: format code with black", "code": "def create_or_update_cheque_print_format(template_name):\n\tif not frappe.db.exists(\"Print Format\", template_name):\n\t\tcheque_print = frappe.new_doc(\"Print Format\")\n\t\tcheque_print.update(\n\t\t\t{\n\t\t\t\t\"doc_type\": \"Payment Entry\",\n\t\t\t\t\"standard\": \"No\",\n\t\t\t\t\"custom_format\": 1,\n\t\t\t\t\"print_format_type\": \"Jinja\",\n\t\t\t\t\"name\": template_name,\n\t\t\t}\n\t\t)\n\telse:\n\t\tcheque_print = frappe.get_doc(\"Print Format\", template_name)\n\n\tdoc = frappe.get_doc(\"Cheque Print Template\", template_name)\n\n\tcheque_print.html = % {\n\t\t\"starting_position_from_top_edge\": doc.starting_position_from_top_edge\n\t\tif doc.cheque_size == \"A4\"\n\t\telse 0.0,\n\t\t\"cheque_width\": doc.cheque_width,\n\t\t\"cheque_height\": doc.cheque_height,\n\t\t\"acc_pay_dist_from_top_edge\": doc.acc_pay_dist_from_top_edge,\n\t\t\"acc_pay_dist_from_left_edge\": doc.acc_pay_dist_from_left_edge,\n\t\t\"message_to_show\": doc.message_to_show if doc.message_to_show else _(\"Account Pay Only\"),\n\t\t\"date_dist_from_top_edge\": doc.date_dist_from_top_edge,\n\t\t\"date_dist_from_left_edge\": doc.date_dist_from_left_edge,\n\t\t\"acc_no_dist_from_top_edge\": doc.acc_no_dist_from_top_edge,\n\t\t\"acc_no_dist_from_left_edge\": doc.acc_no_dist_from_left_edge,\n\t\t\"payer_name_from_top_edge\": doc.payer_name_from_top_edge,\n\t\t\"payer_name_from_left_edge\": doc.payer_name_from_left_edge,\n\t\t\"amt_in_words_from_top_edge\": doc.amt_in_words_from_top_edge,\n\t\t\"amt_in_words_from_left_edge\": doc.amt_in_words_from_left_edge,\n\t\t\"amt_in_word_width\": doc.amt_in_word_width,\n\t\t\"amt_in_words_line_spacing\": doc.amt_in_words_line_spacing,\n\t\t\"amt_in_figures_from_top_edge\": doc.amt_in_figures_from_top_edge,\n\t\t\"amt_in_figures_from_left_edge\": doc.amt_in_figures_from_left_edge,\n\t\t\"signatory_from_top_edge\": doc.signatory_from_top_edge,\n\t\t\"signatory_from_left_edge\": doc.signatory_from_left_edge,\n\t}\n\n\tcheque_print.save(ignore_permissions=True)\n\n\tfrappe.db.set_value(\"Cheque Print Template\", template_name, \"has_print_format\", 1)\n\n\treturn cheque_print\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 63, "n_words": 104, "vocab_size": 88, "complexity": 4, "nloc": 85, "token_counts": 246, "n_ast_nodes": 419, "n_identifiers": 36, "random_cut": "def create_or_update_cheque_print_format(template_name):\n\tif not frappe.db.exists(\"Print Format\", template_name):\n\t\tcheque_print = frappe.new_doc(\"Print Format\")\n\t\tcheque_print.update(\n\t\t\t{\n\t\t\t\t\"doc_type\": \"Payment Entry\",\n\t\t\t\t\"standard\": \"No\",\n\t\t\t\t\"custom_format\": 1,\n\t\t\t\t\"print_format_type\": \"Jinja\",\n\t\t\t\t\"name\": template_name,\n\t\t\t}\n\t\t)\n\telse:\n\t\tcheque_print = frappe.get_doc(\"Print Format\", template_name)\n\n\tdoc = frappe.get_doc(\"Cheque Print Template\", template_name)\n\n\tcheque_print.html = % {\n\t\t\"starting_position_from_top_edge\": doc.starting_position_from_top_edge\n\t\tif doc.cheque_size == \"A4\"\n\t\telse 0.0,\n\t\t\"cheque_width\": doc.cheque_width,\n\t\t\"cheque_height\": doc.cheque_height,\n\t\t\"acc_pay_dist_from_top_edge\": doc.acc_pay_dist_from_top_edge,\n\t\t\"acc_pay_dist_from_left_edge\": doc.acc_pay_dist_from_left_edge,\n\t\t\"message_to_show\": doc.message_to_show if doc.message_to_show else _(\"Account Pay Only\"),\n\t\t\"date_dist_from_top_edge\": doc.date_dist_from_top_edge,\n\t\t\"date_dist_from_left_edge\": doc.date_dist_from_left_edge,\n\t\t\"acc_no_dist_from_top_edge\": doc.acc_no_dist_from_top_edge,\n\t\t\"acc_no_dist_from_left_edge\": doc.acc_no_dist_from_left_edge,\n\t\t\"payer_name_from_top_edge\": doc.payer_name_from_top_edge,\n\t\t\"payer_name_from_left_edge\": doc.payer_name_from_left_edge,\n\t\t\"amt_in_words_from_top_edge\": doc.amt_in_words_from_top_edge,\n\t" }, { "id": 197337, "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", "repo": "sympy", "path": "sympy/physics/quantum/shor.py", "file_name": "shor.py", "fun_name": "shor", "commit_message": "Remove abbreviations in documentation", "code": "def shor(N):\n \n a = random.randrange(N - 2) + 2\n if igcd(N, a) != 1:\n return igcd(N, a)\n r = period_find(a, N)\n if r % 2 == 1:\n shor(N)\n answer = (igcd(a**(r/2) - 1, N), igcd(a**(r/2) + 1, N))\n return answer\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 75, "n_words": 40, "vocab_size": 27, "complexity": 3, "nloc": 9, "token_counts": 89, "n_ast_nodes": 138, "n_identifiers": 9, "random_cut": "def shor(N):\n \n a = random.randrange(N - 2) + 2\n if igc" }, { "id": 276738, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/data_utils.py", "file_name": "data_utils.py", "fun_name": "_hash_file", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _hash_file(fpath, algorithm=\"sha256\", chunk_size=65535):\n \n if isinstance(algorithm, str):\n hasher = _resolve_hasher(algorithm)\n else:\n hasher = algorithm\n\n with open(fpath, \"rb\") as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b\"\"):\n hasher.update(chunk)\n\n return hasher.hexdigest()\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 75, "n_words": 28, "vocab_size": 26, "complexity": 3, "nloc": 9, "token_counts": 73, "n_ast_nodes": 123, "n_identifiers": 15, "random_cut": "def _hash_file(fpath, algorithm=\"sha256\", chunk_size=65535):\n \n if isinstance(algorithm, str):\n hasher = _resolve_hasher(algorithm)\n else:\n hasher = algorithm\n\n with open(fpath, \"rb\") as fpath_file:\n for chunk in iter(lambda: fpath_file.read(chunk_size), b\"\"):\n hasher.u" }, { "id": 46930, "commit_id": "9769a65c20f6028d640061efacbc5bfeb5ebaf3d", "repo": "airflow", "path": "tests/jobs/test_backfill_job.py", "file_name": "test_backfill_job.py", "fun_name": "test_backfill_max_limit_check", "commit_message": "Fixed backfill interference with scheduler (#22701)\n\nCo-authored-by: Dmirty Suvorov ", "code": "def test_backfill_max_limit_check(self, dag_maker):\n dag_id = 'test_backfill_max_limit_check'\n run_id = 'test_dag_run'\n start_date = DEFAULT_DATE - datetime.timedelta(hours=1)\n end_date = DEFAULT_DATE\n\n dag_run_created_cond = threading.Condition()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 20, "vocab_size": 15, "complexity": 2, "nloc": 28, "token_counts": 176, "n_ast_nodes": 61, "n_identifiers": 14, "random_cut": "def test_backfill_max_limit_check(self, dag_maker):\n dag_id = 'test_backfill_max_limit_check'\n run_id = 'test_dag_run'\n start_date = DEFAULT_DATE - datetime.timedelta(hours=1)\n end_date = DEFAULT_DATE\n\n dag_run_created_cond = threading.Condition()\n" }, { "id": 45438, "commit_id": "bb26f96665567325a7fbb810249820e7dac0322a", "repo": "airflow", "path": "airflow/www/utils.py", "file_name": "utils.py", "fun_name": "get_instance_with_map", "commit_message": "Make Grid and and Graph view work with task mapping (#21740)\n\n* Expand mapped tasks in the Scheduler\r\n\r\nTechnically this is done inside\r\nDagRun.task_instance_scheduling_decisions, but the only place that is\r\ncurrently called is the Scheduler\r\n\r\nThe way we are getting `upstream_ti` to pass to expand_mapped_task is\r\nall sorts of wrong and will need fixing, I think the interface for that\r\nmethod is wrong and the mapped task should be responsible for finding\r\nthe right upstream TI itself.\r\n\r\n* make UI and tree work with mapped tasks\r\n\r\n* add graph tooltip and map count\r\n\r\n* simplify node label redraw logic\r\n\r\n* add utils.js and map_index to /taskInstances\r\n\r\n* use TaskInstanceState instead of strings\r\n\r\n* move map_index on /taskinstance to separate PR\r\n\r\n* check to use Task or Tasks\r\n\r\n* remove `no_status` and use TaskInstanceState\r\n\r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def get_instance_with_map(task_instance, session):\n if task_instance.map_index == -1:\n return alchemy_to_dict(task_instance)\n mapped_instances = get_mapped_instances(task_instance, session)\n return get_mapped_summary(task_instance, mapped_instances)\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 35, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def get_instance_with_map(task_instance, session):\n if task_instance.map_index == -1:\n return alchemy_to_dict" }, { "id": 27823, "commit_id": "2a5e6795271fcec84228f86267f3127c9925a888", "repo": "saleor", "path": "saleor/graphql/discount/mutations/voucher_create.py", "file_name": "voucher_create.py", "fun_name": "success_response", "commit_message": "Reorganise discount mutations (#10037)\n\n* reorganise discount mutations\r\n\r\n* remove commented imports\r\n\r\n* fixes after review\r\n\r\n* drop NodeIatalogueInfo", "code": "def success_response(cls, instance):\n instance = ChannelContext(node=instance, channel_slug=None)\n return super().success_response(instance)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 44, "n_identifiers": 7, "random_cut": "def success_response(cls, instance):\n instance = ChannelContext(node=insta" }, { "id": 192861, "commit_id": "b430ba684fb0d689427eaa44ba0b2c363e64f285", "repo": "vision", "path": "test/test_prototype_datasets_utils.py", "file_name": "test_prototype_datasets_utils.py", "fun_name": "test_load_folder", "commit_message": "simplify OnlineResource.load (#5990)\n\n* simplify OnlineResource.load\r\n\r\n* [PoC] merge mock data preparation and loading\r\n\r\n* Revert \"cache mock data based on config\"\r\n\r\nThis reverts commit 5ed6eedef74865e0baa746a375d5ec1f0ab1bde7.\r\n\r\n* Revert \"[PoC] merge mock data preparation and loading\"\r\n\r\nThis reverts commit d62747962f9ed6a7b0b80849e7c971efabb5d3da.\r\n\r\n* remove preprocess returning a new path in favor of querying twice\r\n\r\n* address test comments\r\n\r\n* clarify comment\r\n\r\n* mypy\r\n\r\n* use builtin decompress utility", "code": "def test_load_folder(self, tmp_path):\n folder, files = self._make_folder(tmp_path)\n\n resource = self.DummyResource(file_name=folder.name)\n\n dp = resource.load(tmp_path)\n assert isinstance(dp, FileOpener)\n assert {path: buffer.read().decode() for path, buffer in dp} == files\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 26, "vocab_size": 22, "complexity": 2, "nloc": 6, "token_counts": 66, "n_ast_nodes": 103, "n_identifiers": 18, "random_cut": "def test_load_folder(self, tmp_path):\n folder, files = self._make_folder(tmp_path)\n\n resource = self.DummyResource(file_name=folder.name)\n\n " }, { "id": 65261, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/gross_and_net_profit_report/gross_and_net_profit_report.py", "file_name": "gross_and_net_profit_report.py", "fun_name": "adjust_account", "commit_message": "style: format code with black", "code": "def adjust_account(data, period_list, consolidated=False):\n\tleaf_nodes = [item for item in data if item[\"is_group\"] == 0]\n\ttotals = {}\n\tfor node in leaf_nodes:\n\t\tset_total(node, node[\"total\"], data, totals)\n\tfor d in data:\n\t\tfor period in period_list:\n\t\t\tkey = period if consolidated else period.key\n\t\t\td[key] = totals[d[\"account\"]]\n\t\t\td[\"total\"] = totals[d[\"account\"]]\n\treturn data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 38, "n_words": 49, "vocab_size": 35, "complexity": 7, "nloc": 11, "token_counts": 94, "n_ast_nodes": 144, "n_identifiers": 12, "random_cut": "def adjust_account(data, period_list, consolidated=False):\n\tleaf_nodes = [item for item in data " }, { "id": 92947, "commit_id": "cd803d173c72b64d06c0687170bf9a945d0b503c", "repo": "sentry", "path": "tests/sentry/search/events/test_builder.py", "file_name": "test_builder.py", "fun_name": "test_aggregate_query_with_multiple_entities_without_orderby", "commit_message": "fix(snuba): Add appropriate `UseCaseKey` for indexer [TET-146] (#36308)\n\n* fix(snuba): Add appropriate `UseCaseKey` for indexer\r\n\r\nUpdate indexer invocation call to have the appropriate\r\n`UseCaseKey` depending on use case.\r\n\r\nIn `src/sentry/sentry_metrics/indexer/base.py::StringIndexer`\r\nwhen using `resolve` and `reverse_resolve` callers should not\r\nrely on the default use_case_id.\r\n\r\nImportant changes:\r\n- Add required parameter `use_case_id: UseCaseKey` to `get_series` from `src/sentry/snuba/metrics/datasource.py#L612`;\r\n- Add required parameter to `get_metrics` in `src/sentry/snuba/metrics/datasource.py`\r\n- Add required parameter to `get_tags` in `src/sentry/snuba/metrics/datasource.py`\r\n- Add required parameter to `get_tag_values` in `src/sentry/snuba/metrics/datasource.py`", "code": "def test_aggregate_query_with_multiple_entities_without_orderby(self):\n self.store_metric(\n 200,\n tags={\"transaction\": \"baz_transaction\"},\n timestamp=self.start + datetime.timedelta(minutes=5),\n )\n self.store_metric(\n 1,\n metric=\"user\",\n tags={\"transaction\": \"bar_transaction\"},\n timestamp=self.start + datetime.timedelta(minutes=5),\n )\n self.store_metric(\n 1,\n metric=\"user\",\n tags={\"transaction\": \"baz_transaction\"},\n timestamp=self.start + datetime.timedelta(minutes=5),\n )\n self.store_metric(\n 2,\n metric=\"user\",\n tags={\"transaction\": \"baz_transaction\"},\n timestamp=self.start + datetime.timedelta(minutes=5),\n )\n # This will query both sets & distribution cause of selected columns\n query = MetricsQueryBuilder(\n self.params,\n # Filter by count_unique since the default primary is distributions without an orderby\n \"count_unique(user):>1\",\n dataset=Dataset.PerformanceMetrics,\n selected_columns=[\n \"transaction\",\n \"project\",\n \"p95(transaction.duration)\",\n \"count_unique(user)\",\n ],\n allow_metric_aggregates=True,\n use_aggregate_conditions=True,\n )\n result = query.run_query(\"test_query\")\n assert len(result[\"data\"]) == 1\n assert result[\"data\"][0] == {\n \"transaction\": indexer.resolve(\n self.organization.id,\n \"baz_transaction\",\n UseCaseKey.PERFORMANCE,\n ),\n \"project\": self.project.slug,\n \"p95_transaction_duration\": 200,\n \"count_unique_user\": 2,\n }\n self.assertCountEqual(\n result[\"meta\"],\n [\n {\"name\": \"transaction\", \"type\": \"UInt64\"},\n {\"name\": \"project\", \"type\": \"String\"},\n {\"name\": \"p95_transaction_duration\", \"type\": \"Float64\"},\n {\"name\": \"count_unique_user\", \"type\": \"UInt64\"},\n ],\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 746, "n_words": 122, "vocab_size": 80, "complexity": 1, "nloc": 58, "token_counts": 293, "n_ast_nodes": 496, "n_identifiers": 31, "random_cut": "def test_aggregate_query_with_multiple_entities_without_orderby(self):\n self.store_metric(\n 200,\n tags={\"transaction\": \"baz_transaction\"},\n timestamp=self.start + datetime.timedelta(minutes=5),\n )\n self.store_metric(\n 1,\n metric=\"user\",\n tags={\"transaction\": \"bar_transaction\"},\n timestamp=self.start + datetime.timedelta(minutes=5),\n )\n self.store_metric(\n 1,\n metric=\"user\",\n tags={\"transaction\": \"baz_transaction\"},\n timestamp=self.start + datetime.timedelta(minutes=5),\n )\n self.store_metric(\n 2,\n metric=\"user\",\n tags={\"transaction\": \"baz_transaction\"},\n timestamp=self.start + datetime.timedelta(minutes=5),\n )\n # This will query both sets & distribution cause of selected columns\n query = MetricsQueryBuilder(\n self.params,\n # Filter by count_unique since the default primary is distributions without an orderby\n \"count_unique(user):>1\",\n dataset=Dataset.PerformanceMetrics,\n selected_columns=[\n \"transaction\",\n \"project\",\n \"p95(transaction.duration)\",\n \"count_unique(user)\",\n ],\n allow_metric_aggregates=True,\n use_aggregate_conditions=True,\n )\n res" }, { "id": 91721, "commit_id": "7f60db924ea37f34e0cfe6856777239e2a2ffe13", "repo": "sentry", "path": "tests/sentry/sentry_metrics/test_postgres_indexer.py", "file_name": "test_postgres_indexer.py", "fun_name": "test_already_created_plus_written_results", "commit_message": "feat(metrics): make indexer more configurable (#35604)\n\nThis makes the sentry_metrics indexer more configurable in the following ways, to enable indexing on the ingest-performance-metrics topic:\r\n\r\n- configurable input Kafka topic\r\n- configurable output Kafka topic\r\n- configurable model from which to pull index results\r\n- tags for internal metrics to distinguish between the two modes operationally", "code": "def test_already_created_plus_written_results(self) -> None:\n \n org_id = 1234\n v0 = StringIndexer.objects.create(organization_id=org_id, string=\"v1.2.0\")\n v1 = StringIndexer.objects.create(organization_id=org_id, string=\"v1.2.1\")\n v2 = StringIndexer.objects.create(organization_id=org_id, string=\"v1.2.2\")\n\n expected_mapping = {\"v1.2.0\": v0.id, \"v1.2.1\": v1.id, \"v1.2.2\": v2.id}\n\n results = self.indexer.bulk_record(\n use_case_id=self.use_case_id, org_strings={org_id: {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\"}}\n )\n assert len(results[org_id]) == len(expected_mapping) == 3\n\n for string, id in results[org_id].items():\n assert expected_mapping[string] == id\n\n results = self.indexer.bulk_record(\n use_case_id=self.use_case_id,\n org_strings={org_id: {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\", \"v1.2.3\"}},\n )\n\n v3 = StringIndexer.objects.get(organization_id=org_id, string=\"v1.2.3\")\n expected_mapping[\"v1.2.3\"] = v3.id\n\n assert len(results[org_id]) == len(expected_mapping) == 4\n\n for string, id in results[org_id].items():\n assert expected_mapping[string] == id\n\n fetch_meta = results.get_fetch_metadata()\n assert_fetch_type_for_tag_string_set(\n fetch_meta, FetchType.CACHE_HIT, {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\"}\n )\n assert_fetch_type_for_tag_string_set(fetch_meta, FetchType.FIRST_SEEN, {\"v1.2.3\"})\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 302, "n_words": 96, "vocab_size": 57, "complexity": 3, "nloc": 30, "token_counts": 270, "n_ast_nodes": 436, "n_identifiers": 28, "random_cut": "def test_already_created_plus_written_results(self) -> None:\n \n org_id = 1234\n v0 = StringIndexer.objects.create(organization_id=org_id, string=\"v1.2.0\")\n v1 = StringIndexer.objects.create(organization_id=org_id, string=\"v1.2.1\")\n v2 = StringIndexer.objects.create(organization_id=org_id, string=\"v1.2.2\")\n\n expected_mapping = {\"v1.2.0\": v0.id, \"v1.2.1\": v1.id, \"v1.2.2\": v2.id}\n\n results = self.indexer.bulk_record(\n use_case_id=self.use_case_id, org_strings={org_id: {\"v1.2.0\", \"v1.2.1\", \"v1.2.2\"}}\n )\n assert len(results[org_id]) == len(expected_mapping) == 3\n\n for string, id in results[org_id].items():\n assert expected_mapping[str" }, { "id": 168180, "commit_id": "46c615d43bd197fb4defdf6231929b58c0e50288", "repo": "pandas", "path": "pandas/tests/arrays/categorical/test_indexing.py", "file_name": "test_indexing.py", "fun_name": "test_categories_assignments", "commit_message": "DEPR: inplace keyword for Categorical.set_ordered, setting .categories directly (#47834)\n\n* DEPR: inplcae keyword for Categorical.set_ordered, setting .categories directly\r\n\r\n* update docs\r\n\r\n* typo fixup\r\n\r\n* suppress warning\r\n\r\nCo-authored-by: Jeff Reback ", "code": "def test_categories_assignments(self):\n cat = Categorical([\"a\", \"b\", \"c\", \"a\"])\n exp = np.array([1, 2, 3, 1], dtype=np.int64)\n with tm.assert_produces_warning(FutureWarning, match=\"Use rename_categories\"):\n cat.categories = [1, 2, 3]\n tm.assert_numpy_array_equal(cat.__array__(), exp)\n tm.assert_index_equal(cat.categories, Index([1, 2, 3]))\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 75, "n_words": 30, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 95, "n_ast_nodes": 149, "n_identifiers": 18, "random_cut": "def test_categories_assignments(self):\n cat = Categorical([\"a\", \"b\", \"c\", \"a\"])\n exp = np.array([1, 2, 3, 1], dtype=np.int64)\n with tm.assert_produces_warning(FutureW" }, { "id": 298363, "commit_id": "e45d4d53dd98ac23f138eed57d39bd46be8048fd", "repo": "core", "path": "tests/components/alexa/test_smart_home.py", "file_name": "test_smart_home.py", "fun_name": "test_input_boolean", "commit_message": "Correct time stamp format in Alexa responses (#70267)", "code": "async def test_input_boolean(hass):\n \n device = (\"input_boolean.test\", \"off\", {\"friendly_name\": \"Test input boolean\"})\n appliance = await discovery_test(device, hass)\n\n assert appliance[\"endpointId\"] == \"input_boolean#test\"\n assert appliance[\"displayCategories\"][0] == \"OTHER\"\n assert appliance[\"friendlyName\"] == \"Test input boolean\"\n assert_endpoint_capabilities(\n appliance, \"Alexa.PowerController\", \"Alexa.EndpointHealth\", \"Alexa\"\n )\n\n await assert_power_controller_works(\n \"input_boolean#test\",\n \"input_boolean.turn_on\",\n \"input_boolean.turn_off\",\n hass,\n \"2022-04-19T07:53:05Z\",\n )\n\n\n@freeze_time(\"2022-04-19 07:53:05\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@freeze_time(\"2022-04-19 07:53:05\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 117, "n_words": 46, "vocab_size": 37, "complexity": 1, "nloc": 16, "token_counts": 76, "n_ast_nodes": 156, "n_identifiers": 8, "random_cut": "async def test_input_boolean(hass):\n \n device = (\"input_boolean.test\", \"off\", {\"friendly_name\": \"Test input boolean\"})" }, { "id": 245306, "commit_id": "fa77be290460e84ce7da975831cb7e687a419177", "repo": "mmdetection", "path": "tests/test_models/test_task_modules/test_prior_generators/test_anchor_generator.py", "file_name": "test_anchor_generator.py", "fun_name": "test_strides", "commit_message": "Refactor package", "code": "def test_strides():\n from mmdet.models.task_modules.prior_generators import AnchorGenerator\n\n # Square strides\n self = AnchorGenerator([10], [1.], [1.], [10])\n anchors = self.grid_anchors([(2, 2)], device='cpu')\n\n expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],\n [-5., 5., 5., 15.], [5., 5., 15., 15.]])\n\n assert torch.equal(anchors[0], expected_anchors)\n\n # Different strides in x and y direction\n self = AnchorGenerator([(10, 20)], [1.], [1.], [10])\n anchors = self.grid_anchors([(2, 2)], device='cpu')\n\n expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],\n [-5., 15., 5., 25.], [5., 15., 15., 25.]])\n\n assert torch.equal(anchors[0], expected_anchors)\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 186, "n_words": 82, "vocab_size": 41, "complexity": 1, "nloc": 12, "token_counts": 258, "n_ast_nodes": 306, "n_identifiers": 14, "random_cut": "def test_strides():\n from mmdet.models.task_modules.prior_generators import AnchorGenerator\n\n # Square strides\n self = AnchorGenerator([10], [1.], [1.], [10])\n anchors = self.grid_anchors([(2, 2)], device='cpu')\n\n expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],\n [-5., 5., 5., 15.], [5., 5., 15., 15.]])\n\n assert torch.equal(anchors[0], expected_anc" }, { "id": 13223, "commit_id": "82960f105149c478e4fc88e8b4fef8bbe2454429", "repo": "jina", "path": "tests/integration/external_deployment/test_external_deployment.py", "file_name": "test_external_deployment.py", "fun_name": "foo", "commit_message": "feat: distributed replicas across different hosts (#5217)", "code": "def foo(self, docs, *args, **kwargs):\n for doc in docs:\n doc.tags['name'] = self.runtime_args.name\n doc.tags['uuid'] = self._id\n\n\n@pytest.mark.parametrize('num_shards', [1, 2], indirect=True)", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('num_shards', [1, 2], indirect=True)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 46, "n_words": 19, "vocab_size": 18, "complexity": 2, "nloc": 4, "token_counts": 40, "n_ast_nodes": 92, "n_identifiers": 14, "random_cut": "def foo(self, docs, *args, **kwargs):\n for doc in docs:\n doc.tags['name'] = self.runtime_ar" }, { "id": 161708, "commit_id": "7975c563e19f04be4c39fd7f36bc3939e5ed9d84", "repo": "rich", "path": "tests/test_text.py", "file_name": "test_text.py", "fun_name": "test_wrap_overflow_long", "commit_message": "Fix numerous typos in tests", "code": "def test_wrap_overflow_long():\n text = Text(\"bigword\" * 10)\n lines = text.wrap(Console(), 4, overflow=\"ellipsis\")\n assert len(lines) == 1\n assert lines[0] == Text(\"big…\")\n\n", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 20, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 45, "n_ast_nodes": 77, "n_identifiers": 8, "random_cut": "def test_wrap_overflow_long():\n text" }, { "id": 9084, "commit_id": "4992c3d75bdbc3bfde8b49fa2b0f6694bfad9987", "repo": "insightface", "path": "parsing/dml_csr/networks/modules/ddgcn.py", "file_name": "ddgcn.py", "fun_name": "forward", "commit_message": "Create ddgcn.py", "code": "def forward(self, x):\n # b, c, h, w = x.size()\n node_k = self.node_k(x)\n node_v = self.node_v(x)\n node_q = self.node_q(x)\n b,c,h,w = node_k.size()\n node_k = node_k.view(b, c, -1).permute(0, 2, 1)\n node_q = node_q.view(b, c, -1)\n node_v = node_v.view(b, c, -1).permute(0, 2, 1)\n # A = k * q\n # AV = k * q * v\n # AVW = k *(q *v) * w\n AV = torch.bmm(node_q,node_v)\n AV = self.softmax(AV)\n AV = torch.bmm(node_k, AV)\n AV = AV.transpose(1, 2).contiguous()\n AVW = self.conv_wg(AV)\n AVW = self.bn_wg(AVW)\n AVW = AVW.view(b, c, h, -1)\n # out = F.relu_(self.out(AVW) + x)\n out = self.gamma * self.out(AVW) + x\n return out\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 250, "n_words": 104, "vocab_size": 51, "complexity": 1, "nloc": 17, "token_counts": 190, "n_ast_nodes": 292, "n_identifiers": 24, "random_cut": "def forward(self, x):\n # b, c, h, w = x.size()\n node_k = self.node_k(x)\n node_v = self.node_v(x)\n node_q = self.node_q(x)\n b,c,h,w = node_k.size()\n node_k = node_k.view(b, c, -1).permute(0, 2, 1)\n node_q = node_q.view(b, c, -1)\n node_v = node_v.view(b, c, -1).permute(0, 2, 1)\n # A = k * q\n # AV = k * q * v\n # AVW = k *(q *v) * w\n AV = torch.bmm(node_q,node_v)\n AV = self.softmax(AV)\n AV = torch.bmm(node_k, AV)\n AV = AV.transpose(1, 2).contiguous()\n AVW = self.conv_wg(AV)\n AVW = self.bn_wg(AVW)\n " }, { "id": 69412, "commit_id": "5f84993bae5df78e257cc2bfc41c123a1122a0b6", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "customer_query", "commit_message": "test: added test case to validate seachfields for customer, supplier", "code": "def customer_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False):\n\tdoctype = \"Customer\"\n\tconditions = []\n\tcust_master_name = frappe.defaults.get_user_default(\"cust_master_name\")\n\n\tfields = [\"name\"]\n\tif cust_master_name != \"Customer Name\":\n\t\tfields = [\"customer_name\"]\n\n\tfields = get_fields(doctype, fields)\n\tsearchfields = frappe.get_meta(doctype).get_search_fields()\n\tsearchfields = \" or \".join(field + \" like %(txt)s\" for field in searchfields)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\t**{\n\t\t\t\t\"fields\": \", \".join(fields),\n\t\t\t\t\"scond\": searchfields,\n\t\t\t\t\"mcond\": get_match_cond(doctype),\n\t\t\t\t\"fcond\": get_filters_cond(doctype, filters, conditions).replace(\"%\", \"%%\"),\n\t\t\t}\n\t\t),\n\t\t{\"txt\": \"%%%s%%\" % txt, \"_txt\": txt.replace(\"%\", \"\"), \"start\": start, \"page_len\": page_len},\n\t\tas_dict=as_dict,\n\t)\n\n\n# searches for supplier\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 60, "n_words": 85, "vocab_size": 69, "complexity": 3, "nloc": 31, "token_counts": 171, "n_ast_nodes": 311, "n_identifiers": 28, "random_cut": "def customer_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False):\n\tdoctype = \"Customer\"\n\tconditions = []\n\tcust_maste" }, { "id": 66367, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/maintenance/doctype/maintenance_visit/test_maintenance_visit.py", "file_name": "test_maintenance_visit.py", "fun_name": "make_sales_person", "commit_message": "style: format code with black", "code": "def make_sales_person(name):\n\tsales_person = frappe.get_doc({\"doctype\": \"Sales Person\", \"sales_person_name\": name})\n\tsales_person.insert(ignore_if_duplicate=True)\n\n\treturn sales_person\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 8, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def make_sales_person(name):\n\tsales_person = frappe.get_doc({\"doctype\": \"Sales Person\", \"sales_person_name\": name})\n\tsales_person.insert(ignore_if_d" }, { "id": 218121, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/_bootstrap_external.py", "file_name": "_bootstrap_external.py", "fun_name": "_set_bootstrap_module", "commit_message": "add python 3.10.4 for windows", "code": "def _set_bootstrap_module(_bootstrap_module):\n global _bootstrap\n _bootstrap = _bootstrap_module\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 7, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 17, "n_identifiers": 3, "random_cut": "def _set_bootstrap_module(_bootstrap_module):\n global _bootstrap\n " }, { "id": 64262, "commit_id": "4b62d2d7fe08ab9b36b533419ecb38d0aa5a3ab1", "repo": "erpnext", "path": "erpnext/patches/v13_0/create_website_items.py", "file_name": "create_website_items.py", "fun_name": "execute", "commit_message": "fix: (Linter) Write queries using QB/ORM and other minor lines for semgrep to skip", "code": "def execute():\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"website_item\")\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"website_item_tabbed_section\")\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"website_offer\")\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"recommended_items\")\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"e_commerce_settings\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"item\")\n\n\titem_fields = [\"item_code\", \"item_name\", \"item_group\", \"stock_uom\", \"brand\", \"image\",\n\t\t\"has_variants\", \"variant_of\", \"description\", \"weightage\"]\n\tweb_fields_to_map = [\"route\", \"slideshow\", \"website_image_alt\",\n\t\t\"website_warehouse\", \"web_long_description\", \"website_content\", \"thumbnail\"]\n\n\t# get all valid columns (fields) from Item master DB schema\n\titem_table_fields = frappe.db.sql(\"desc `tabItem`\", as_dict=1) # nosemgrep\n\titem_table_fields = [d.get('Field') for d in item_table_fields]\n\n\t# prepare fields to query from Item, check if the web field exists in Item master\n\tweb_query_fields = []\n\tfor web_field in web_fields_to_map:\n\t\tif web_field in item_table_fields:\n\t\t\tweb_query_fields.append(web_field)\n\t\t\titem_fields.append(web_field)\n\n\t# check if the filter fields exist in Item master\n\tor_filters = {}\n\tfor field in [\"show_in_website\", \"show_variant_in_website\"]:\n\t\tif field in item_table_fields:\n\t\t\tor_filters[field] = 1\n\n\tif not web_query_fields or not or_filters:\n\t\t# web fields to map are not present in Item master schema\n\t\t# most likely a fresh installation that doesnt need this patch\n\t\treturn\n\n\titems = frappe.db.get_all(\n\t\t\"Item\",\n\t\tfields=item_fields,\n\t\tor_filters=or_filters\n\t)\n\ttotal_count = len(items)\n\n\tfor count, item in enumerate(items, start=1):\n\t\tif frappe.db.exists(\"Website Item\", {\"item_code\": item.item_code}):\n\t\t\tcontinue\n\n\t\t# make new website item from item (publish item)\n\t\twebsite_item = make_website_item(item, save=False)\n\t\twebsite_item.ranking = item.get(\"weightage\")\n\n\t\tfor field in web_fields_to_map:\n\t\t\twebsite_item.update({field: item.get(field)})\n\n\t\twebsite_item.save()\n\n\t\t# move Website Item Group & Website Specification table to Website Item\n\t\tfor doctype in (\"Website Item Group\", \"Item Website Specification\"):\n\t\t\tfrappe.db.set_value(\n\t\t\t\tdoctype,\n\t\t\t\t{\"parenttype\": \"Item\", \"parent\": item.item_code}, # filters\n\t\t\t\t{\"parenttype\": \"Website Item\", \"parent\": website_item.name} # value dict\n\t\t\t)\n\n\t\tif count % 20 == 0: # commit after every 20 items\n\t\t\tfrappe.db.commit()\n\n\t\tfrappe.utils.update_progress_bar('Creating Website Items', count, total_count)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 197, "n_words": 251, "vocab_size": 158, "complexity": 13, "nloc": 47, "token_counts": 359, "n_ast_nodes": 640, "n_identifiers": 38, "random_cut": "def execute():\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"website_item\")\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"website_item_tabbed_section\")\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"website_offer\")\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"recommended_items\")\n\tfrappe.reload_doc(\"e_commerce\", \"doctype\", \"e_commerce_settings\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"item\")\n\n\titem_fields = [\"item_code\", \"item_name\", \"item_group\", \"stock_uom\", \"brand\", \"image\",\n\t\t\"has_variants\", \"variant_of\", \"description\", \"weightage\"]\n\tweb_fields_to_map = [\"route\", \"slideshow\", \"website_image_alt\",\n\t\t\"website_warehouse\", \"web_long_description\", \"website_content\", \"thumbnail\"]\n\n\t# get all valid columns (fields) from Item master DB schema\n\titem_table_fields = frappe.db.sql(\"desc `tabItem`\", as_dict=1) # nosemgrep\n\titem_table_fields = [d.get('Field') for d in item_table_fields]\n\n\t# prepare fields to query from Item, check if the web field exists in Item master\n\tweb_query_fields = []\n\tfor web_field in web_fields_to_map:\n\t\tif web_field in item_table_fields:\n\t\t\tweb_query_fields.append(web_field)\n\t\t\titem_fields.append(web_field)\n\n\t# check if the filter fields exist in Item master\n\tor_filters = {}\n\tfor field in [\"show_in_website\", \"show_variant_in_website\"]:\n\t\tif field in item_table_fields:\n" }, { "id": 7606, "commit_id": "03b4ab273abd7e22a56bb550b56f3d667200abf9", "repo": "ludwig", "path": "ludwig/features/text_feature.py", "file_name": "text_feature.py", "fun_name": "update_config_with_metadata", "commit_message": "Encoder refactor V2 (#2370)\n\n* Added base files and some initial code\r\n\r\n* More files created, fleshing out binary feature and corresponding encoders\r\n\r\n* Added more schema infra\r\n\r\n* Registered all feature encoders\r\n\r\n* Separated feature utils infra\r\n\r\n* Added all preprocessing classes\r\n\r\n* Filled out rest of schema configs\r\n\r\n* Fixed preproc dataclass\r\n\r\n* Fixed small errors blocking import\r\n\r\n* Tests should be passing\r\n\r\n* Deleted unnecesssary files and removed commented out code\r\n\r\n* fixed flake8\r\n\r\n* Fixed most tests\r\n\r\n* fixed pattern validation\r\n\r\n* Fixed missing val strategies and solved custom encoder update issue\r\n\r\n* Removed preprocessing from features due to schema SSOT\r\n\r\n* fix flake 8\r\n\r\n* Started encoder schema work\r\n\r\n* Parallel CNN Encoder\r\n\r\n* StackedCNN Encoder\r\n\r\n* Added image encoders\r\n\r\n* Finished sequence encoders\r\n\r\n* Partway through text encoders\r\n\r\n* Added text encoders\r\n\r\n* Bag Encoders\r\n\r\n* Binary and Date Encoders\r\n\r\n* category, date, h3, and set encoders\r\n\r\n* Wired up encoder schemas\r\n\r\n* Switched input feature encoder schema definitions\r\n\r\n* Fixed handful of issues\r\n\r\n* Fix schema issues\r\n\r\n* Refactored a bunch of test configs\r\n\r\n* Small changes\r\n\r\n* Removed default param from register_encoder\r\n\r\n* Schema working now, working on refactoring\r\n\r\n* Finished decoder schemas\r\n\r\n* Removed default param from register_decoder\r\n\r\n* Added some default params to output features and more decoder work\r\n\r\n* Refactored all input feature encoder/decoder referencing\r\n\r\n* Refactored pretty much all the tests\r\n\r\n* Added back constants\r\n\r\n* Solved gbm issue\r\n\r\n* Fixed save_load test\r\n\r\n* various fixes\r\n\r\n* Fixed import issue\r\n\r\n* Flake 8 and various fixes\r\n\r\n* Solved more failed tests\r\n\r\n* Refactored missed tests\r\n\r\n* Removed commented lines\r\n\r\n* Added init file for decoders schema\r\n\r\n* Fixed failing tests\r\n\r\n* Fixed hyperopt shared params test\r\n\r\n* Added backwards compatability logic and test\r\n\r\n* Flake 8\r\n\r\n* removed comment\r\n\r\n* Added base files and some initial code\r\n\r\n* More files created, fleshing out binary feature and corresponding encoders\r\n\r\n* Added more schema infra\r\n\r\n* Registered all feature encoders\r\n\r\n* Separated feature utils infra\r\n\r\n* Added all preprocessing classes\r\n\r\n* Filled out rest of schema configs\r\n\r\n* Fixed preproc dataclass\r\n\r\n* Fixed small errors blocking import\r\n\r\n* Tests should be passing\r\n\r\n* Deleted unnecesssary files and removed commented out code\r\n\r\n* fixed flake8\r\n\r\n* Fixed most tests\r\n\r\n* fixed pattern validation\r\n\r\n* Fixed missing val strategies and solved custom encoder update issue\r\n\r\n* Removed preprocessing from features due to schema SSOT\r\n\r\n* fix flake 8\r\n\r\n* Started encoder schema work\r\n\r\n* Parallel CNN Encoder\r\n\r\n* StackedCNN Encoder\r\n\r\n* Added image encoders\r\n\r\n* Finished sequence encoders\r\n\r\n* Partway through text encoders\r\n\r\n* Added text encoders\r\n\r\n* Bag Encoders\r\n\r\n* Binary and Date Encoders\r\n\r\n* category, date, h3, and set encoders\r\n\r\n* Wired up encoder schemas\r\n\r\n* Switched input feature encoder schema definitions\r\n\r\n* Fixed handful of issues\r\n\r\n* Fix schema issues\r\n\r\n* Refactored a bunch of test configs\r\n\r\n* Small changes\r\n\r\n* Removed default param from register_encoder\r\n\r\n* Schema working now, working on refactoring\r\n\r\n* Finished decoder schemas\r\n\r\n* Removed default param from register_decoder\r\n\r\n* Added some default params to output features and more decoder work\r\n\r\n* Refactored all input feature encoder/decoder referencing\r\n\r\n* Refactored pretty much all the tests\r\n\r\n* Added back constants\r\n\r\n* Solved gbm issue\r\n\r\n* Fixed save_load test\r\n\r\n* various fixes\r\n\r\n* Fixed import issue\r\n\r\n* Flake 8 and various fixes\r\n\r\n* Solved more failed tests\r\n\r\n* Refactored missed tests\r\n\r\n* Removed commented lines\r\n\r\n* Added init file for decoders schema\r\n\r\n* Fixed failing tests\r\n\r\n* Fixed hyperopt shared params test\r\n\r\n* Added backwards compatability logic and test\r\n\r\n* Flake 8\r\n\r\n* removed comment\r\n\r\n* Skipping CTRL Encoder test since it's blasting memory\r\n\r\n* Fixed audio_feature test\r\n\r\n* Addressed failing tests\r\n\r\n* Fixed backwards compatability\r\n\r\n* Fixed more failing tests\r\n\r\n* Flake 8\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Refactored default logic for all features\r\n\r\n* Fixed H3 weighted_sum encoder wrong type\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix import issue\r\n\r\n* Mark slow HF tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed defaults tests\r\n\r\n* Pin Ray nightly version\r\n\r\n* fix link\r\n\r\n* pin torch to 07/26\r\n\r\n* cleanup\r\n\r\n* upgrade ray pinned version to enable parquet partition filtering\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* downgrade Ray to ensure TensorDtypes are not inferred during Ray Dataset <=> Dask conversions\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Removed custom encoder decoder helper method\r\n\r\n* unpin torch\r\n\r\n* Flake 8\r\n\r\n* Daniel feedback\r\n\r\n* Small fixes\r\n\r\n* Fixed default weights init\r\n\r\n* Added test with encoder dependencies for global defaults\r\n\r\n* Fixed Arnav's test\r\n\r\n* Addressed Arnav's feedback\r\n\r\n* Address nit\r\n\r\n* Addressed feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address nit\r\n\r\n* Fix test\r\n\r\n* Initial feedback refactor\r\n\r\n* More refactoring\r\n\r\n* Added vocab field to all text_encoder configs\r\n\r\n* More refactoring\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix audio feature test, also s/logging/logger.\r\n\r\n* param names should start with lowercase s/N/n\r\n\r\n* Re-added schema utils used in encoder refactor.\r\n\r\n* Removes unused overwrite_defaults()\r\n\r\n* Oops, name is passed to feature as a kwarg not a member of the feature config. Why? Probably should change that.\r\n\r\n* Change lowercase default back to True. Fixes test_strings_utils\r\n\r\n* Set feature validation error with output size 1.\r\n\r\n* MLP mixer encoder needs num_channels.\r\n\r\n* Use schema.dump instead of .__dict__ to convert marshmallow dataclass to dict\r\n\r\n* (x,) in python is a tuple with a single element x. Watch out for this when defining schemas.\r\n\r\n* Construct features by using build_single_input/output to share code for deserializing feature configs. Also changes ECD to BaseModel, IMO its confusing to import ECD to use a class method from BaseModel.\r\n\r\n* Fix test_trainer_utils, adds convenience method BaseFeature.load_from_dictionary\r\n\r\n* Use feature load_from_dictionary instead of BaseModel in feature tests.\r\n\r\n* Populate encoder and decoder types in shared test fixtures, fixes error expectations in test_validate_config_combiner.py\r\n\r\n* Fixes test_validate_config_misc.py by ensuring only one option of OneOf allows None, because OneOf fails validation if more than one condition match.\r\n\r\n* Updates test_defaults.py\r\n\r\n* Adds type, column, proc_column to feature schemas. Revert feature tests by passing in config dict again.\r\n\r\n* decorate feature base classes with @dataclass, fixes failure building input features in trainer.\r\n\r\n* Implement _serialize for PreprocessingDataclassField.\r\n\r\n* use type(feature) to get schema class.\r\n\r\n* Fix test_trainer_utils.py\r\n\r\n* audio_feature requires embedding_size, but passthrough encoder does not have this property. Technically, passthrough encoder is not supported for audio features.\r\n\r\n* Wow, apparently the order of elements in the oneOf affects which error message we get from jsonschema.\r\n\r\n* Get default encoders from feature schema.\r\n\r\n* Get encoder defaults from schema in config_utils.py\r\n\r\n* Make number feature allow decoders without clip property\r\n\r\n* s/list/List\r\n\r\n* Adds reduce_output to h3 encoder.\r\n\r\n* Moves decoder params into nested decoder.\r\n\r\n* Update processing parameters with computed_fill_value.\r\n\r\n* Removes test code.\r\n\r\n* Adds input_size to decoder base because some features assume decoders have an input_size\r\n\r\n* dense encoder not supported for bag features, changed to embed.\r\n\r\n* Adds input_size param to dense encoder schema, since its a required parameter of dense encoder.\r\n\r\n* Fixes vector feature input_size in encoder metadata.\r\n\r\n* Fixes test reducers, set sequence reduce mode in output feature base.\r\n\r\n* Don't nest encoder parameters in decoder\r\n\r\n* Fixes test_torchscript, get num_classes from encoder config.\r\n\r\n* Audio feature padding is float, not int.\r\n\r\n* Adds temp check for threshold to fix GBM tests.\r\n\r\n* Adds missing value strategy drop_row for vector feature in test.\r\n\r\n* Drop row should work even if computed_fill_value is an empty string\r\n\r\n* Removes duplicated TOP_K constant.\r\n\r\n* Consolidated set_default_values\r\n\r\n* Removes commented-out defaults.\r\n\r\n* Remove load_config from OutputFeature, it isn't doing anything here.\r\n\r\n* Removes comment.\r\n\r\n* Fix type annotations for input/output feature constructors.\r\n\r\n* Fixes output feature dependencies being ignored.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Adds test for construction of output features with dependencies.\r\n\r\n* Encoder/Decoder config now lives on encoder/decoder object\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixes decoder params to match their respective classes. Moves fc_stack params and threshold back to output feature.\r\n\r\n* Make clip property of number output feature again.\r\n\r\n* Adds threshold property to set feature schema, use this property instead of storing it in the decoder.\r\n\r\n* input_size in output_feature instead of decoder.\r\n\r\n* Made vector_size property of vector_feature.\r\n\r\n* Fixed gbm tests\r\n\r\n* Fixed flake 8\r\n\r\n* Re-adds num_classes as member of category output feature.\r\n\r\n* Makes vocab_size match vocab used in preprocessing.\r\n\r\n* num_classes in CategoryOutputFeature.\r\n\r\n* Moves num_classes from decoder to category output feature.\r\n\r\n* Fixes test_model_training_options. Copies fc_layer keys into decoder if they are present on output features.\r\n\r\n* Adds field descriptors for fc_layers params in BaseOutputFeatureConfig.\r\n\r\nCo-authored-by: connor-mccorm \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: connor-mccorm <97468934+connor-mccorm@users.noreply.github.com>\r\nCo-authored-by: Geoffrey Angus \r\nCo-authored-by: Arnav Garg \r\nCo-authored-by: Daniel Treiman ", "code": "def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):\n output_feature[DECODER][\"vocab_size\"] = feature_metadata[\"vocab_size\"]\n output_feature[DECODER][\"max_sequence_length\"] = feature_metadata[\"max_sequence_length\"]\n if isinstance(output_feature[LOSS][\"class_weights\"], (list, tuple)):\n # [0, 0] for UNK and PAD\n output_feature[LOSS][\"class_weights\"] = [0, 0] + output_feature[LOSS][\"class_weights\"]\n if len(output_feature[LOSS][\"class_weights\"]) != output_feature[DECODER][\"vocab_size\"]:\n raise ValueError(\n \"The length of class_weights ({}) is not compatible with \"\n \"the number of classes ({})\".format(\n len(output_feature[LOSS][\"class_weights\"]), output_feature[DECODER][\"vocab_size\"]\n )\n )\n\n if output_feature[LOSS][\"class_similarities_temperature\"] > 0:\n if \"class_similarities\" in output_feature:\n distances = output_feature[\"class_similarities\"]\n temperature = output_feature[LOSS][\"class_similarities_temperature\"]\n for i in range(len(distances)):\n distances[i, :] = softmax(distances[i, :], temperature=temperature)\n output_feature[LOSS][\"class_similarities\"] = distances\n else:\n raise ValueError(\n \"class_similarities_temperature > 0,\"\n \"but no class similarities are provided \"\n \"for feature {}\".format(output_feature[COLUMN])\n )\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 455, "n_words": 97, "vocab_size": 73, "complexity": 6, "nloc": 25, "token_counts": 212, "n_ast_nodes": 352, "n_identifiers": 19, "random_cut": "def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):\n output_feature[DECODER][\"vocab_size\"] = feature_metadata[\"vocab_size\"]\n output_feature[DECODER][\"max_sequence_length\"] = feature_metadata[\"max_sequence_length\"]\n if isinstance(output_feature[LOSS][\"class_weights\"], (list, tuple)):\n # [0, 0] for UNK and PAD\n output_feature[LOSS][\"class_weights\"] = [0, 0] + output_feature[LOSS][\"class_weights\"]\n if len(output_feature[LOSS][\"class_weights\"]) != output_feature[DECODER][\"vocab_size\"]:\n raise ValueError(\n \"The length of class_weights ({}) is not compatible with \"\n \"the number of classes ({})\".format(\n len(output_feature[LOSS][\"class_weights\"]), output_feature[DECODER][\"vocab_size\"]\n )\n )\n\n if output_feature[LOSS][\"class_similarities_temperature\"] > 0:\n if \"class_similarities\" in output_feature:\n distances = output_feature[\"class_similarities\"]\n temperature = output_feature[LOSS][\"class_similarities_temperature\"]\n " }, { "id": 19434, "commit_id": "5a151615aa47901f7c44e5b543fe2e2b0f6e9d24", "repo": "pipenv", "path": "tests/integration/test_lock.py", "file_name": "test_lock.py", "fun_name": "test_outdated_setuptools_with_pep517_legacy_build_meta_is_updated", "commit_message": "missed these tests becasue they run only on earlier python versions.", "code": "def test_outdated_setuptools_with_pep517_legacy_build_meta_is_updated(PipenvInstance):\n \n with PipenvInstance(chdir=True) as p:\n c = p.pipenv('run pip install \"setuptools<=40.2\"')\n assert c.returncode == 0\n c = p.pipenv(\"run python -c 'import setuptools; print(setuptools.__version__)'\")\n assert c.returncode == 0\n assert c.stdout.splitlines()[1] == \"40.2.0\"\n c = p.pipenv(\"install legacy-backend-package\")\n assert c.returncode == 0\n assert \"vistir\" in p.lockfile[\"default\"]\n\n\n@pytest.mark.lock\n@pytest.mark.install\n@pytest.mark.skip_windows\n@pytest.mark.skipif(sys.version_info >= (3, 9), reason=\"old setuptools doesn't work\")\n@pytest.mark.needs_internet", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "@pytest.mark.lock\n@pytest.mark.install\n@pytest.mark.skip_windows\n@pytest.mark.skipif(sys.version_info >= (3, 9), reason=\"old setuptools doesn't work\")\n@pytest.mark.needs_internet", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 113, "n_words": 56, "vocab_size": 41, "complexity": 1, "nloc": 10, "token_counts": 80, "n_ast_nodes": 212, "n_identifiers": 20, "random_cut": "def test_outdated_setuptools_with_pep517_legacy_build_meta_is_updated(PipenvInstance):\n \n with PipenvInstance(chdir=True) as p:\n c = p.pipenv('run pip install \"setuptools<=40.2\"')\n assert c.returncode == 0\n c = p.pipenv(\"run python -c 'import setuptools; print(setuptools.__version__)'\")\n assert c.returncode == 0\n assert c.stdout.splitlines()[1] == \"40.2.0\"\n " }, { "id": 252213, "commit_id": "83e543c3e66654b952f1979c0adaa62df91b2832", "repo": "mitmproxy", "path": "mitmproxy/connection.py", "file_name": "connection.py", "fun_name": "set_state", "commit_message": "add multi proxy mode\n\nThis commit makes it possible for mitmproxy to spawn multiple\nTCP/UDP proxy servers at the same time, see\nhttps://github.com/mitmproxy/mitmproxy/discussions/5288", "code": "def set_state(self, state):\n self.peername = tuple(state[\"address\"]) if state[\"address\"] else None\n self.alpn = state[\"alpn\"]\n self.cipher = state[\"cipher_name\"]\n self.id = state[\"id\"]\n self.sni = state[\"sni\"]\n self.timestamp_end = state[\"timestamp_end\"]\n self.timestamp_start = state[\"timestamp_start\"]\n self.timestamp_tls_setup = state[\"timestamp_tls_setup\"]\n self.tls_version = state[\"tls_version\"]\n # only used in sans-io\n self.state = ConnectionState(state[\"state\"])\n self.sockname = tuple(state[\"sockname\"]) if state[\"sockname\"] else None\n self.error = state[\"error\"]\n self.tls = state[\"tls\"]\n self.certificate_list = [\n certs.Cert.from_state(x) for x in state[\"certificate_list\"]\n ]\n self.mitmcert = (\n certs.Cert.from_state(state[\"mitmcert\"])\n if state[\"mitmcert\"] is not None\n else None\n )\n self.alpn_offers = state[\"alpn_offers\"]\n self.cipher_list = state[\"cipher_list\"]\n self.proxy_mode = mode_specs.ProxyMode.from_state(state[\"proxy_mode\"])\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 275, "n_words": 85, "vocab_size": 60, "complexity": 5, "nloc": 25, "token_counts": 213, "n_ast_nodes": 360, "n_identifiers": 28, "random_cut": "def set_state(self, state):\n self.peername = tuple(state[\"address\"]) if state[\"address\"] else None\n self.alpn = state[\"alpn\"]\n self.cipher = state[\"cipher_name\"]\n self.id = state[\"id\"]\n self.sni = state[\"sni\"]\n self.timestamp_end = state[\"timestamp_end\"]\n self.timestamp_start = state[\"timestamp_start\"]\n self.timestamp_tls_setup = state[\"timestamp_tls_setup\"]\n self.tls_version = state[\"tls_version\"]\n # only used in sans-io\n self.state = ConnectionState(state[\"state\"])\n self.soc" }, { "id": 156356, "commit_id": "00572071d15e7e8cfc20d8342b00aabadf0d2102", "repo": "dask", "path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "fun_name": "test_local", "commit_message": "Change `to_parquet` default to `write_metadata_file=None` (#8988)\n\n* Refactor to_parquet\r\n\r\nA bit of refactoring before changing the default of\r\n`write_metadata_file` to `None` in `to_parquet`.\r\n\r\n- Simplify implementation\r\n- Don't include file metadata in `write_partition` calls if it's not\r\nneeded\r\n- Everything needed to support implementing `write_metadata_file=None`\r\nas default *except* changing the value (to ensure tests pass).\r\n\r\n* Fixup failing parquet tests\r\n\r\nMost of the failures are due to divisions not being known by default\r\nanymore, since they're only known by default if a `_metadata` file is\r\npresent.\r\n\r\n* Respond to feedback", "code": "def test_local(tmpdir, write_engine, read_engine, has_metadata):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df = dd.from_pandas(data, chunksize=500)\n\n kwargs = {\"write_metadata_file\": True} if has_metadata else {}\n df.to_parquet(tmp, write_index=False, engine=write_engine, **kwargs)\n\n files = os.listdir(tmp)\n assert (\"_common_metadata\" in files) == has_metadata\n assert (\"_metadata\" in files) == has_metadata\n assert \"part.0.parquet\" in files\n\n df2 = dd.read_parquet(tmp, index=False, engine=read_engine)\n\n assert len(df2.divisions) > 1\n\n out = df2.compute(scheduler=\"sync\").reset_index()\n\n for column in df.columns:\n assert (data[column] == out[column]).all()\n\n\n@pytest.mark.parametrize(\"index\", [False, True])\n@write_read_engines_xfail", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"index\", [False, True])\n@write_read_engines_xfail", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 219, "n_words": 89, "vocab_size": 68, "complexity": 3, "nloc": 24, "token_counts": 228, "n_ast_nodes": 389, "n_identifiers": 47, "random_cut": "def test_local(tmpdir, write_engine, read_engine, has_metadata):\n tmp = str(tmpdir)\n data = pd.DataFrame(\n {\n \"i32\": np.arange(1000, dtype=np.int32),\n \"i64\": np.arange(1000, dtype=np.int64),\n \"f\": np.arange(1000, dtype=np.float64),\n \"bhello\": np.random.choice([\"hello\", \"yo\", \"people\"], size=1000).astype(\n \"O\"\n ),\n }\n )\n df = dd.from_pandas(data, chunksize=500)\n\n kwargs = {\"write_metadata_file\": True} if has_metada" }, { "id": 119827, "commit_id": "c3a4a6e63da11246611247feac7ff4c00750ae21", "repo": "jax", "path": "jax/_src/lax/lax.py", "file_name": "lax.py", "fun_name": "_top_k_translation_rule", "commit_message": "Revert previous change\n\nPiperOrigin-RevId: 435397906", "code": "def _top_k_translation_rule(ctx, avals_in, avals_out, x, *, k):\n return xla.xla_destructure(ctx.builder, xops.TopK(x, k))\n\ntop_k_p = Primitive('top_k')\ntop_k_p.multiple_results = True\ntop_k_p.def_impl(partial(xla.apply_primitive, top_k_p))\ntop_k_p.def_abstract_eval(_top_k_abstract_eval)\nxla.register_translation(top_k_p, _top_k_translation_rule)\nad.primitive_jvps[top_k_p] = _top_k_jvp\nbatching.primitive_batchers[top_k_p] = _top_k_batch_rule\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 28, "vocab_size": 25, "complexity": 1, "nloc": 2, "token_counts": 33, "n_ast_nodes": 132, "n_identifiers": 26, "random_cut": "def _top_k_translation_rule(ctx, avals_in, avals_out, x, *, k):\n return xla.xla_destructure(ctx.builder, xops.TopK(x, k))\n\ntop_k_p = Primitive('top_k')\ntop_k_p.multiple_results = True\ntop_k_p.def_impl(partial(xla.apply_primi" }, { "id": 170125, "commit_id": "2410fca2c62898fb29659d5b93273a65515d695b", "repo": "pandas", "path": "pandas/tests/io/json/test_pandas.py", "file_name": "test_pandas.py", "fun_name": "test_series_roundtrip_simple", "commit_message": "DEP: Enforce numpy keyword deprecation in read_json (#49083)", "code": "def test_series_roundtrip_simple(self, orient, string_series):\n data = string_series.to_json(orient=orient)\n result = read_json(data, typ=\"series\", orient=orient)\n\n expected = string_series\n if orient in (\"values\", \"records\"):\n expected = expected.reset_index(drop=True)\n if orient != \"split\":\n expected.name = None\n\n tm.assert_series_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 95, "n_words": 32, "vocab_size": 25, "complexity": 3, "nloc": 9, "token_counts": 73, "n_ast_nodes": 119, "n_identifiers": 15, "random_cut": "def test_series_roundtrip_simple(self, orient, string_series):\n data = string_series.to_json(orient=orient)\n result = read_json(data, typ=\"series\", orient=orient)\n\n expected = string_series\n if orient in (\"values\", \"records\"):\n " }, { "id": 283635, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/stocks/options/test_yfinance_view.py", "file_name": "test_yfinance_view.py", "fun_name": "test_show_parity", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def test_show_parity(mocker):\n # MOCK CHARTS\n mocker.patch(\n target=\"openbb_terminal.stocks.options.yfinance_view.theme.visualize_output\"\n )\n\n # MOCK EXPORT_DATA\n mocker.patch(target=\"openbb_terminal.stocks.options.yfinance_view.export_data\")\n\n yfinance_view.show_parity(\n ticker=\"PM\",\n exp=\"2022-01-07\",\n put=True,\n ask=True,\n mini=0.0,\n maxi=100.0,\n export=\"csv\",\n )\n\n\n@pytest.mark.default_cassette(\"test_risk_neutral_vals\")\n@pytest.mark.vcr", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.default_cassette(\"test_risk_neutral_vals\")\n@pytest.mark.vcr", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 97, "n_words": 23, "vocab_size": 20, "complexity": 1, "nloc": 14, "token_counts": 58, "n_ast_nodes": 117, "n_identifiers": 17, "random_cut": "def test_show_parity(mocker):\n # MOCK CHARTS\n mocker.patch(\n target=\"openbb_terminal.stocks.options.yfinance_view.theme.visualize_output\"\n )\n\n # MOCK EXPORT_DATA\n mocker.patch(target=\"openbb_terminal.stocks.options.yfinance_view.export_data\")\n\n yfinance_view.show_" }, { "id": 33411, "commit_id": "6519150c315bdcd415bbd115cec11e839f3eb866", "repo": "transformers", "path": "src/transformers/models/layoutlmv3/configuration_layoutlmv3.py", "file_name": "configuration_layoutlmv3.py", "fun_name": "inputs", "commit_message": "Add image height and width to ONNX dynamic axes (#18915)", "code": "def inputs(self) -> Mapping[str, Mapping[int, str]]:\n # The order of inputs is different for question answering and sequence classification\n if self.task in [\"question-answering\", \"sequence-classification\"]:\n return OrderedDict(\n [\n (\"input_ids\", {0: \"batch\", 1: \"sequence\"}),\n (\"attention_mask\", {0: \"batch\", 1: \"sequence\"}),\n (\"bbox\", {0: \"batch\", 1: \"sequence\"}),\n (\"pixel_values\", {0: \"batch\", 1: \"num_channels\", 2: \"height\", 3: \"width\"}),\n ]\n )\n else:\n return OrderedDict(\n [\n (\"input_ids\", {0: \"batch\", 1: \"sequence\"}),\n (\"bbox\", {0: \"batch\", 1: \"sequence\"}),\n (\"attention_mask\", {0: \"batch\", 1: \"sequence\"}),\n (\"pixel_values\", {0: \"batch\", 1: \"num_channels\"}),\n ]\n )\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 355, "n_words": 79, "vocab_size": 44, "complexity": 2, "nloc": 19, "token_counts": 162, "n_ast_nodes": 275, "n_identifiers": 7, "random_cut": "def inputs(self) -> Mapping[str, Mapping[int, str]]:\n # The order of inputs is different for question answering and sequence classification\n if self.task in [\"question-answering\", \"sequence-classification\"]:\n return OrderedDict(\n [\n (\"input_ids\", {0: \"batch\", 1: \"sequence\"}),\n (\"attention_mask\", {0: \"batch\", 1: \"sequence\"}),\n (\"bbox\", {0: \"batch\", 1: \"" }, { "id": 65150, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/asset_depreciations_and_balances/asset_depreciations_and_balances.py", "file_name": "asset_depreciations_and_balances.py", "fun_name": "get_assets", "commit_message": "style: format code with black", "code": "def get_assets(filters):\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"to_date\": filters.to_date, \"from_date\": filters.from_date, \"company\": filters.company},\n\t\tas_dict=1,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 7, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 49, "token_counts": 39, "n_ast_nodes": 64, "n_identifiers": 9, "random_cut": "def get_assets(filters):\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"to_date\": filters.to_date, \"from_date\": filters.from_date, \"company\": filters.company},\n\t\tas_dict=" }, { "id": 106162, "commit_id": "232a43943e87dfedcc328a9a3d3b4d89ea5c6627", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "__iter__", "commit_message": "Sharded save_to_disk + multiprocessing (#5268)\n\n* add num_shards, num_proc, storage_options to save_to_disk\r\n\r\n* minor\r\n\r\n* add tests\r\n\r\n* remove old s3fs integreation tests\r\n\r\n* style\r\n\r\n* style\r\n\r\n* Update DatasetDict.save_to_disk\r\n\r\n* test dataset dict\r\n\r\n* update dataset dict load_from_disk\r\n\r\n* minor\r\n\r\n* update test\r\n\r\n* update docs\r\n\r\n* backport to_reader to pyarrow < 8\r\n\r\n* typo\r\n\r\n* support both max_shard_size and num_shards\r\n\r\n* style\r\n\r\n* docstrings\r\n\r\n* test _estimate_nbytes\r\n\r\n* add test for num_shards\r\n\r\n* style\r\n\r\n* mario's comment\r\n\r\n* add config.PBAR_REFRESH_TIME_INTERVAL\r\n\r\n* fix docstrings\r\n\r\n* use kwargs_iterable in iflatmap_unordered\r\n\r\n* fix tests", "code": "def __iter__(self):\n for batch in self.table._batches:\n if self.max_chunksize is None or len(batch) <= self.max_chunksize:\n yield batch\n else:\n for offset in range(0, len(batch), self.max_chunksize):\n yield batch.slice(offset, self.max_chunksize)\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 103, "n_words": 26, "vocab_size": 22, "complexity": 5, "nloc": 7, "token_counts": 62, "n_ast_nodes": 96, "n_identifiers": 10, "random_cut": "def __iter__(self):\n for batch in self.table._batches:\n if self.max_chunksize is None or len(batch) <= self.max_chunksize:\n yield batch\n else:\n for offset in range(0, " }, { "id": 27658, "commit_id": "3673e7e11f22e5a695c708b7a594c11857a93898", "repo": "saleor", "path": "saleor/graphql/checkout/tests/test_checkout_promo_codes.py", "file_name": "test_checkout_promo_codes.py", "fun_name": "test_checkout_add_voucher_code_by_token", "commit_message": "Unify checkout mutations/resolvers to use id field. (#9862)\n\n* Unify checkout mutations/resolvers to use id field.\r\n\r\n* Update changelog\r\n\r\n* Remove uneeded \" \" in mutation's field description", "code": "def test_checkout_add_voucher_code_by_token(api_client, checkout_with_item, voucher):\n variables = {\n \"id\": to_global_id_or_none(checkout_with_item),\n \"promoCode\": voucher.code,\n }\n data = _mutate_checkout_add_promo_code(api_client, variables)\n\n assert not data[\"errors\"]\n assert data[\"checkout\"][\"token\"] == str(checkout_with_item.token)\n assert data[\"checkout\"][\"voucherCode\"] == voucher.code\n\n\n@mock.patch(\"saleor.plugins.webhook.tasks.send_webhook_request_sync\")", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@mock.patch(\"saleor.plugins.webhook.tasks.send_webhook_request_sync\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 58, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 67, "n_ast_nodes": 126, "n_identifiers": 13, "random_cut": "def test_checkout_add_voucher_code_by_token(api_client, checkout_with_item, voucher):\n variables = {\n \"id\": to_global_id_or_none(checkout_with_item),\n \"promoCode\": voucher.code,\n }\n data = _mutate_checkout_add_promo_code(api_client, variables)\n\n assert not data[\"errors\"]\n assert data[\"checkout\"][\"token\"] ==" }, { "id": 32211, "commit_id": "c1c79b06550b587b2a975016ef9d18b53258025b", "repo": "transformers", "path": "tests/models/nllb/test_tokenization_nllb.py", "file_name": "test_tokenization_nllb.py", "fun_name": "test_mask_token", "commit_message": "NLLB tokenizer (#18126)\n\n* NLLB tokenizer\r\n\r\n* Apply suggestions from code review - Thanks Stefan!\r\n\r\nCo-authored-by: Stefan Schweter \r\n\r\n* Final touches\r\n\r\n* Style :)\r\n\r\n* Update docs/source/en/model_doc/nllb.mdx\r\n\r\nCo-authored-by: Stefan Schweter \r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* PR reviews\r\n\r\n* Auto models\r\n\r\nCo-authored-by: Stefan Schweter \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def test_mask_token(self):\n self.assertListEqual(self.tokenizer.convert_tokens_to_ids([\"\", \"ar_AR\"]), [256203, 3])\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 28, "n_ast_nodes": 46, "n_identifiers": 5, "random_cut": "def test_mask_token(self):\n self.assertListEqual(self.tokeni" }, { "id": 269532, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "shape", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def shape(x):\n \n return tf.shape(x)\n\n\n@keras_export(\"keras.backend.int_shape\")\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.int_shape\")\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 10, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 41, "n_identifiers": 6, "random_cut": "def shape(x):\n \n return tf.shape(x)\n\n\n@keras_export(\"keras.backend.int_shape\"" }, { "id": 291322, "commit_id": "815dfe9134db71b9e182fa7ac974393aaf6910d5", "repo": "core", "path": "tests/components/shelly/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_get_rpc_channel_name", "commit_message": "Fix Shelly gen2 channel name (#82655)\n\n* Fix Shelly gen2 channel name\r\n\r\n* Review comment", "code": "async def test_get_rpc_channel_name(mock_rpc_device):\n \n assert get_rpc_channel_name(mock_rpc_device, \"input:0\") == \"test switch_0\"\n assert get_rpc_channel_name(mock_rpc_device, \"input:3\") == \"Test name switch_3\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 48, "n_identifiers": 3, "random_cut": "async def test_get_rpc_channel_name(mock_rpc_device):\n \n assert get_rpc_channel_name(mock_rpc_device, \"input:0\") == \"test switch_0\"\n assert get_rpc_channel_name(mock_rp" }, { "id": 78404, "commit_id": "ab5a3390e363907369b572dce2b6defaea1a2370", "repo": "wagtail", "path": "wagtail/snippets/tests/test_snippets.py", "file_name": "test_snippets.py", "fun_name": "test_publish", "commit_message": "Add tests for snippets with DraftStateMixin enabled", "code": "def test_publish(self):\n timestamp = now()\n with freeze_time(timestamp):\n response = self.post(\n post_data={\n \"text\": \"Draft-enabled Foo, Published\",\n \"action-publish\": \"action-publish\",\n }\n )\n snippet = DraftStateModel.objects.get(text=\"Draft-enabled Foo, Published\")\n\n self.assertRedirects(\n response, reverse(\"wagtailsnippets_tests_draftstatemodel:list\")\n )\n\n # The instance should be created\n self.assertEqual(snippet.text, \"Draft-enabled Foo, Published\")\n\n # The instance should be live\n self.assertTrue(snippet.live)\n self.assertFalse(snippet.has_unpublished_changes)\n self.assertEqual(snippet.first_published_at, timestamp)\n self.assertEqual(snippet.last_published_at, timestamp)\n\n # A revision should be created and set as both latest_revision and live_revision\n self.assertIsNotNone(snippet.live_revision)\n self.assertEqual(snippet.live_revision, snippet.latest_revision)\n\n # The revision content should contain the new data\n self.assertEqual(\n snippet.live_revision.content[\"text\"],\n \"Draft-enabled Foo, Published\",\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 329, "n_words": 81, "vocab_size": 55, "complexity": 1, "nloc": 24, "token_counts": 140, "n_ast_nodes": 241, "n_identifiers": 26, "random_cut": "def test_publish(self):\n timestamp = now()\n " }, { "id": 220896, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/trsock.py", "file_name": "trsock.py", "fun_name": "__enter__", "commit_message": "add python 3.10.4 for windows", "code": "def __enter__(self):\n self._na('context manager protocol')\n return self._sock.__enter__()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 34, "n_identifiers": 4, "random_cut": "def __enter__(self):\n self._na('context manager protocol')\n " }, { "id": 274753, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/metrics/metrics.py", "file_name": "metrics.py", "fun_name": "interpolate_pr_auc", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def interpolate_pr_auc(self):\n \n dtp = (\n self.true_positives[: self.num_thresholds - 1]\n - self.true_positives[1:]\n )\n p = tf.math.add(self.true_positives, self.false_positives)\n dp = p[: self.num_thresholds - 1] - p[1:]\n prec_slope = tf.math.divide_no_nan(\n dtp, tf.maximum(dp, 0), name=\"prec_slope\"\n )\n intercept = self.true_positives[1:] - tf.multiply(prec_slope, p[1:])\n\n safe_p_ratio = tf.where(\n tf.logical_and(p[: self.num_thresholds - 1] > 0, p[1:] > 0),\n tf.math.divide_no_nan(\n p[: self.num_thresholds - 1],\n tf.maximum(p[1:], 0),\n name=\"recall_relative_ratio\",\n ),\n tf.ones_like(p[1:]),\n )\n\n pr_auc_increment = tf.math.divide_no_nan(\n prec_slope * (dtp + intercept * tf.math.log(safe_p_ratio)),\n tf.maximum(self.true_positives[1:] + self.false_negatives[1:], 0),\n name=\"pr_auc_increment\",\n )\n\n if self.multi_label:\n by_label_auc = tf.reduce_sum(\n pr_auc_increment, name=self.name + \"_by_label\", axis=0\n )\n if self.label_weights is None:\n # Evenly weighted average of the label AUCs.\n return tf.reduce_mean(by_label_auc, name=self.name)\n else:\n # Weighted average of the label AUCs.\n return tf.math.divide_no_nan(\n tf.reduce_sum(\n tf.multiply(by_label_auc, self.label_weights)\n ),\n tf.reduce_sum(self.label_weights),\n name=self.name,\n )\n else:\n return tf.reduce_sum(pr_auc_increment, name=\"interpolate_pr_auc\")\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 621, "n_words": 124, "vocab_size": 74, "complexity": 3, "nloc": 41, "token_counts": 337, "n_ast_nodes": 515, "n_identifiers": 30, "random_cut": "def interpolate_pr_auc(self):\n \n dtp = (\n self.true_positives[: self.num_thresholds - 1]\n - self.true_positives[1:]\n )\n p = tf.math.add(self.true_positives, self.false_positives)\n dp = p[: self.num_thresholds - 1] - p[1:]\n prec_slope = tf.math.divide_no_nan(\n dtp, tf.maximum(dp, 0), name=\"prec_slope\"\n )\n intercept = self.true_positives[1:] - tf.multiply(prec_slope, p[1:])\n\n safe_p_ratio = tf.where(\n tf.logical_and(p[: self.num_thresholds - 1] > 0, p[1:] > 0),\n tf.math.divide_no_nan(\n p[: self.num_thresholds - 1],\n tf.maximum(p[1:], 0),\n name=\"recall_relative_ratio\",\n ),\n tf.ones_like(p[1:]),\n )\n\n pr_auc_increment = tf.math.divide_no_nan(\n prec_slope * (dtp + intercept * tf.math.log(safe_p_ratio)),\n tf.maximum(self.true_positives[1:] + self.false_negatives[1:], 0),\n name=\"pr_auc_increment\",\n )\n\n if self.multi_label:\n by_label_auc = tf.reduce_sum(\n pr_auc_increment, name=self.name + \"_by_label\", axis=0\n )\n if self.label_weights is None:\n # Evenly weighted average of the label AUCs.\n return tf.reduce_mean(b" }, { "id": 69030, "commit_id": "5c0a25012c602ed0d47136468e3b0bee11ddf5dd", "repo": "erpnext", "path": "erpnext/loan_management/doctype/loan_balance_adjustment/loan_balance_adjustment.py", "file_name": "loan_balance_adjustment.py", "fun_name": "get_values_on_cancel", "commit_message": "feat: add adjustment amount to loan\n- fix: bugs in loan balance adjustment", "code": "def get_values_on_cancel(self, loan_details):\n if self.adjustment_type == \"Credit Adjustment\":\n adjustment_amount = loan_details.adjustment_amount - self.amount\n elif self.adjustment_type == \"Debit Adjustment\":\n adjustment_amount = loan_details.adjustment_amount + self.amount\n\n return adjustment_amount\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 67, "n_words": 25, "vocab_size": 17, "complexity": 3, "nloc": 6, "token_counts": 41, "n_ast_nodes": 68, "n_identifiers": 6, "random_cut": "def get_values_on_cancel(self, loan_details):\n if self.adjustment_" }, { "id": 7812, "commit_id": "60197fe851aadfa51d18c16dd42b49f728ed7eaa", "repo": "ludwig", "path": "ludwig/data/dataset_synthesizer.py", "file_name": "dataset_synthesizer.py", "fun_name": "_get_feature_encoder_or_decoder", "commit_message": "Adds registry to organize backward compatibility updates around versions and config sections (#2335)\n\n* First pass implementation of VersionTransformation\r\n\r\n* Remove test main.\r\n\r\n* Refactors backward_compatibility.py to use version registration system\r\n\r\n* Changed sort order to process outer first.\r\n\r\n* Moves test_deprecated_field_aliases from test_defaults.py to test_backward_compatibility.py\r\n\r\n* s/prefix/prefixes in test_version_transformation.py\r\n\r\n* Removes comment, print statements.\r\n\r\n* Adds docstrings.\r\n\r\n* typo fix.\r\n\r\n* Removes unused import.\r\n\r\n* Small cleanup to backward_compatibility.py, removed redundant keys.\r\n\r\n* Assume version 0.4 if no version present in the config.\r\n\r\n* Updates dataset synthesis to work with nested encoder/decoders.\r\n\r\n* Fixes test_server.py\r\n\r\n* nesting image feature params in test_ray\r\n\r\n* _get_feature_encoder_or_decoder in generate_category.\r\n\r\n* oops, forgot random.choice.\r\n\r\nCo-authored-by: Daniel Treiman ", "code": "def _get_feature_encoder_or_decoder(feature):\n \n if DECODER in feature:\n return feature[DECODER]\n elif ENCODER in feature:\n return feature[ENCODER]\n else:\n feature[ENCODER] = {}\n return feature[ENCODER]\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 20, "vocab_size": 14, "complexity": 3, "nloc": 8, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 4, "random_cut": "def _get_feature_encoder_or_decoder(feature):\n \n if DECODER in feature:\n return feature[DECODER]\n elif ENCODER in feature:\n return feature[ENCODER]\n else:\n " }, { "id": 254766, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/gatherelements.py", "file_name": "gatherelements.py", "fun_name": "export_gather_elements_1", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export_gather_elements_1() -> None:\n axis = 0\n node = onnx.helper.make_node(\n 'GatherElements',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=axis,\n )\n data = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]], dtype=np.float32)\n indices = np.array([[1, 2, 0],\n [2, 0, 0]], dtype=np.int32)\n\n y = gather_elements(data, indices, axis)\n # print(y) produces\n # [[4, 8, 3],\n # [7, 2, 3]]\n\n expect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test_gather_elements_1')\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 261, "n_words": 58, "vocab_size": 46, "complexity": 1, "nloc": 16, "token_counts": 145, "n_ast_nodes": 213, "n_identifiers": 21, "random_cut": "def export_gather_elements_1() -> None:\n axis = 0\n node = onnx.helper.make_node(\n 'GatherElements',\n inputs=['data', 'indices'],\n outputs=['y'],\n axis=axis,\n )\n data = np.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]], dtype=np.float32)\n indices = np.array([[1, 2, 0],\n [2, 0, 0]], dtype=np.int32)\n\n y = gather_elements(data, indices, axis)\n # print(y) produces\n # [[4, 8, 3],\n # [7, 2, 3]]\n\n expect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],\n name='test" }, { "id": 32768, "commit_id": "f9a0008d2d3082a665f711b24f5314e4a8205fab", "repo": "transformers", "path": "src/transformers/models/videomae/modeling_videomae.py", "file_name": "modeling_videomae.py", "fun_name": "get_sinusoid_encoding_table", "commit_message": "Add VideoMAE (#17821)\n\n* First draft\r\n\r\n* Add VideoMAEForVideoClassification\r\n\r\n* Improve conversion script\r\n\r\n* Add VideoMAEForPreTraining\r\n\r\n* Add VideoMAEFeatureExtractor\r\n\r\n* Improve VideoMAEFeatureExtractor\r\n\r\n* Improve docs\r\n\r\n* Add first draft of model tests\r\n\r\n* Improve VideoMAEForPreTraining\r\n\r\n* Fix base_model_prefix\r\n\r\n* Make model take pixel_values of shape (B, T, C, H, W)\r\n\r\n* Add loss computation of VideoMAEForPreTraining\r\n\r\n* Improve tests\r\n\r\n* Improve model testsé\r\n\r\n* Make all tests pass\r\n\r\n* Add VideoMAE to main README\r\n\r\n* Add tests for VideoMAEFeatureExtractor\r\n\r\n* Add integration test\r\n\r\n* Improve conversion script\r\n\r\n* Rename patch embedding class\r\n\r\n* Remove VideoMAELayer from init\r\n\r\n* Update design of patch embeddings\r\n\r\n* Improve comments\r\n\r\n* Improve conversion script\r\n\r\n* Improve conversion script\r\n\r\n* Add conversion of pretrained model\r\n\r\n* Add loss verification of pretrained model\r\n\r\n* Add loss verification of unnormalized targets\r\n\r\n* Add integration test for pretraining model\r\n\r\n* Apply suggestions from code review\r\n\r\n* Fix bug to make feature extractor resize only shorter edge\r\n\r\n* Address more comments\r\n\r\n* Improve normalization of videos\r\n\r\n* Add doc examples\r\n\r\n* Move constants to dedicated script\r\n\r\n* Remove scripts\r\n\r\n* Transfer checkpoints, fix docs\r\n\r\n* Update script\r\n\r\n* Update image mean and std\r\n\r\n* Fix doc tests\r\n\r\n* Set return_tensors to NumPy by default\r\n\r\n* Revert the previous change\r\n\r\nCo-authored-by: Niels Rogge ", "code": "def get_sinusoid_encoding_table(n_position, d_hid):\n \n # TODO: make it with torch instead of numpy", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 6, "token_counts": 86, "n_ast_nodes": 16, "n_identifiers": 3, "random_cut": "def get_sinusoid_encoding_table(n_position, d_hid):\n \n # TODO: make it with torch instead of numpy" }, { "id": 267930, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/sanity/pylint.py", "file_name": "pylint.py", "fun_name": "supported_python_versions", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def supported_python_versions(self) -> t.Optional[t.Tuple[str, ...]]:\n \n return tuple(version for version in CONTROLLER_PYTHON_VERSIONS if str_to_version(version) < (3, 11))\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 30, "n_words": 16, "vocab_size": 16, "complexity": 3, "nloc": 3, "token_counts": 40, "n_ast_nodes": 61, "n_identifiers": 10, "random_cut": "def supported_python_versions(self) -> t.Optional[t.Tuple[str, ...]]:\n \n return tuple(" }, { "id": 41030, "commit_id": "fa680c4226e618710014fd18a756c4f98daef956", "repo": "seaborn", "path": "seaborn/_marks/basic.py", "file_name": "basic.py", "fun_name": "_plot_split", "commit_message": "Update Line and Bar marks with some of the new patterns", "code": "def _plot_split(self, keys, data, ax, kws):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n kws = kws.copy()\n\n markers = self._resolve(data, \"marker\")\n fill = self._resolve(data, \"fill\")\n fill & np.array([m.is_filled() for m in markers])\n\n edgecolors = self._resolve_color(data)\n facecolors = self._resolve_color(data, \"fill\")\n facecolors[~fill, 3] = 0\n\n linewidths = self._resolve(data, \"linewidth\")\n pointsize = self._resolve(data, \"pointsize\")\n\n paths = []\n path_cache = {}\n for m in markers:\n if m not in path_cache:\n path_cache[m] = m.get_path().transformed(m.get_transform())\n paths.append(path_cache[m])\n\n sizes = pointsize ** 2\n offsets = data[[\"x\", \"y\"]].to_numpy()\n\n points = mpl.collections.PathCollection(\n paths=paths,\n sizes=sizes,\n offsets=offsets,\n facecolors=facecolors,\n edgecolors=edgecolors,\n linewidths=linewidths,\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n )\n ax.add_collection(points)\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 377, "n_words": 113, "vocab_size": 86, "complexity": 4, "nloc": 29, "token_counts": 226, "n_ast_nodes": 357, "n_identifiers": 38, "random_cut": "def _plot_split(self, keys, data, ax, kws):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n kws = kws.copy()\n\n markers = self._resolve(data, \"marker\")\n fill = self._resolve(data, \"fill\")\n fill & np.array([m.is_filled() for m in markers])\n\n edgecolors = self._resolve_color(data)\n facecolors = self._resolve_color(data, \"fill\")\n facecolors[~fill, 3] = 0\n\n linewidths = self._resolve(data, \"linewidth\")\n pointsize = self._resolve(data, \"pointsize\")\n\n paths = []\n path_cache = {}\n for m in markers:\n if m not in path_cache:\n path_cache[m] = m.get_path().transformed(m.get_transform())\n paths.append(path_cache[m])\n\n sizes = pointsize ** 2\n offsets = data[[\"x\", \"y\"]].to_numpy()\n\n points = mpl.collections.PathCollection(\n paths=paths,\n sizes=sizes,\n offsets=offsets,\n facecolors=facecolors,\n edgecolors=edgecolors,\n linewidths=linewidths,\n transOffset=ax.transData,\n transform=mpl.transforms.Ident" }, { "id": 40907, "commit_id": "6f3077f12b7837106ba0a79740fbfd547628291b", "repo": "seaborn", "path": "seaborn/tests/_core/test_scales.py", "file_name": "test_scales.py", "fun_name": "test_convert_categories", "commit_message": "Thoroughly update scaling logic and internal API", "code": "def test_convert_categories(self, scale):\n\n x = pd.Series(pd.Categorical([\"a\", \"b\", \"c\"], [\"b\", \"a\", \"c\"]))\n s = CategoricalScale(scale, None, format).setup(x)\n assert_series_equal(s.convert(x), pd.Series([1., 0., 2.]))\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 40, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 4, "token_counts": 74, "n_ast_nodes": 114, "n_identifiers": 13, "random_cut": "def test_convert_categories(self, scale):\n\n x = pd.Series(pd.Categorical([\"a\", \"b\", \"c\"], [\"b\", \"a\", \"c\"]))\n s = CategoricalScale(scale, None, format).setup(x)\n assert_series_equal(s.convert(x), pd.Series([1., 0., 2.]))\n" }, { "id": 251944, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/test_websocket.py", "file_name": "test_websocket.py", "fun_name": "test_drop_message", "commit_message": "make it black!", "code": "def test_drop_message(ws_testdata):\n tctx, playbook, flow = ws_testdata\n assert (\n playbook\n << websocket.WebsocketStartHook(flow)\n >> reply()\n >> DataReceived(tctx.server, b\"\\x81\\x03foo\")\n << websocket.WebsocketMessageHook(flow)\n )\n flow.websocket.messages[-1].drop()\n playbook >> reply()\n playbook << None\n assert playbook\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 84, "n_words": 29, "vocab_size": 20, "complexity": 1, "nloc": 13, "token_counts": 73, "n_ast_nodes": 109, "n_identifiers": 13, "random_cut": "def test_drop_message(ws_testdata):\n tctx, playbook, flow = ws_testdata\n assert (\n playbook\n << websocket.WebsocketStartHook(flow)\n >> reply()\n >> DataReceived(tctx.server, b\"\\x81\\x03foo\")\n << websocket.WebsocketMessageHook(flow)\n )\n flow.websocket.messages[-1].drop()\n playbook >> reply()\n playbook << None\n assert playbook\n\n" }, { "id": 82413, "commit_id": "c1290c9ff89cb00caa5469129fd527e9d82cd820", "repo": "django-cms", "path": "cms/tests/test_page_admin.py", "file_name": "test_page_admin.py", "fun_name": "_parse_page_tree", "commit_message": "ci: Added codespell (#7355)\n\nCo-authored-by: Christian Clauss \r\n\r\n* ci: codespell config taken from #7292", "code": "def _parse_page_tree(self, response, parser_class):\n content = response.content\n content = content.decode(response.charset)\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 10, "vocab_size": 8, "complexity": 2, "nloc": 10, "token_counts": 66, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def _parse_page_tree(self, response, parser_class):\n content = response.content\n content" }, { "id": 64045, "commit_id": "3da2cac772b0557e15ddf4ee9673381b0d98bca1", "repo": "erpnext", "path": "erpnext/accounts/doctype/pricing_rule/utils.py", "file_name": "utils.py", "fun_name": "sorted_by_priority", "commit_message": "chore: undo unnecessary changes", "code": "def sorted_by_priority(pricing_rules, args, doc=None):\n\t# If more than one pricing rules, then sort by priority\n\tpricing_rules_list = []\n\tpricing_rule_dict = {}\n\n\tfor pricing_rule in pricing_rules:\n\t\tpricing_rule = filter_pricing_rules(args, pricing_rule, doc)\n\t\tif pricing_rule:\n\t\t\tif not pricing_rule.get('priority'):\n\t\t\t\tpricing_rule['priority'] = 1\n\n\t\t\tif pricing_rule.get('apply_multiple_pricing_rules'):\n\t\t\t\tpricing_rule_dict.setdefault(cint(pricing_rule.get(\"priority\")), []).append(pricing_rule)\n\n\tfor key in sorted(pricing_rule_dict):\n\t\tpricing_rules_list.extend(pricing_rule_dict.get(key))\n\n\treturn pricing_rules_list\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 35, "n_words": 49, "vocab_size": 40, "complexity": 6, "nloc": 13, "token_counts": 103, "n_ast_nodes": 170, "n_identifiers": 15, "random_cut": "def sorted_by_priority(pricing_rules, args, doc=None):\n\t# If more than one pricing rules, then sort by " }, { "id": 181989, "commit_id": "fd47ef491b7700a4414d85bf573f1e719cfae555", "repo": "textual", "path": "tests/test_css_parse.py", "file_name": "test_css_parse.py", "fun_name": "test_parse_text_foreground", "commit_message": "Separate parsing of scalar, number, duration", "code": "def test_parse_text_foreground():\n css = \n stylesheet = Stylesheet()\n stylesheet.parse(css)\n\n styles = stylesheet.rules[0].styles\n assert styles.text_color == Color.parse(\"green\")\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 9, "token_counts": 39, "n_ast_nodes": 68, "n_identifiers": 9, "random_cut": "def test_parse_text_foreground():\n css = \n stylesheet = Stylesheet()\n stylesheet.parse(css)\n\n styles = stylesheet.rules[0].styles\n assert styles.text_color == Color.parse(\"green\")\n\n" }, { "id": 40466, "commit_id": "399f9b6aeef04623e03613c584bcb0c615d3cb01", "repo": "seaborn", "path": "seaborn/tests/_core/test_data.py", "file_name": "test_data.py", "fun_name": "test_concat_all_operations", "commit_message": "Add tests for many Plot behaviors", "code": "def test_concat_all_operations(self, long_df):\n\n v1 = {\"x\": \"x\", \"y\": \"y\", \"hue\": \"a\"}\n v2 = {\"y\": \"s\", \"size\": \"s\", \"hue\": None}\n\n p1 = PlotData(long_df, v1)\n p2 = p1.concat(None, v2)\n\n for var, key in v2.items():\n if key is None:\n assert var not in p2\n else:\n assert p2.names[var] == key\n assert_vector_equal(p2.frame[var], long_df[key])\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 149, "n_words": 48, "vocab_size": 38, "complexity": 3, "nloc": 11, "token_counts": 101, "n_ast_nodes": 171, "n_identifiers": 15, "random_cut": "def test_concat_all_operations(self, long_df):\n\n v1 = {\"x\": \"x\", \"y\": \"y\", \"hue\": \"a\"}\n v2 = {\"y\": \"s\", \"size\": \"s\", \"hue\": None}\n\n p1 = PlotData(long_df, v1)\n p2 =" }, { "id": 58233, "commit_id": "1cf2a8463d93bed3e445ebebf089ac4872fbd34c", "repo": "prefect", "path": "tests/utilities/test_importtools.py", "file_name": "test_importtools.py", "fun_name": "test_import_object_from_script_with_relative_imports", "commit_message": "Add tests for `import_object`", "code": "def test_import_object_from_script_with_relative_imports(script_path):\n # Remove shared_libs if it exists from a prior test or the module can be cached\n sys.modules.pop(\"shared_libs\", None)\n foobar = import_object(f\"{script_path}:foobar\")\n assert foobar() == \"foobar\"\n\n\n@pytest.mark.parametrize(\n \"script_path\",\n [\n TEST_PROJECTS_DIR / \"nested-project\" / \"explicit_relative.py\",\n TEST_PROJECTS_DIR / \"tree-project\" / \"imports\" / \"explicit_relative.py\",\n TEST_PROJECTS_DIR / \"tree-project\" / \"imports\" / \"implicit_relative.py\",\n ],\n)", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"script_path\",\n [\n TEST_PROJECTS_DIR / \"nested-project\" / \"explicit_relative.py\",\n TEST_PROJECTS_DIR / \"tree-project\" / \"imports\" / \"explicit_relative.py\",\n TEST_PROJECTS_DIR / \"tree-project\" / \"imports\" / \"implicit_relative.py\",\n ],\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 90, "n_words": 51, "vocab_size": 39, "complexity": 1, "nloc": 4, "token_counts": 28, "n_ast_nodes": 123, "n_identifiers": 11, "random_cut": "def test_import_object_from_script_with_relative_imports(script_path):\n # Remove shared_libs if it exists from a prior test or the module can be cached\n sys.modules.pop(\"shared_libs\", None)\n foobar = import_object(f\"{script_path}:foobar\")\n assert foobar() == \"foobar\"\n\n\n@pytest.mark.parametrize(\n \"script_path\",\n [\n TEST_PROJECTS_DIR / \"nested-" }, { "id": 255270, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/automatic_upgrade_test.py", "file_name": "automatic_upgrade_test.py", "fun_name": "test_Div", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_Div(self) -> None:\n self._test_op_upgrade('Div', 1, [[3, 4, 5], [3, 1, 5]], attrs={'consumed_inputs': [0]})\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 20, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 2, "token_counts": 43, "n_ast_nodes": 63, "n_identifiers": 4, "random_cut": "def test_Div(self) -> None:\n self._test_op_upgrade('Div', 1, [[3, 4, 5], [3, 1, 5]], attrs={'consumed_inputs': [0]})\n" }, { "id": 301146, "commit_id": "abf9aab18f9a6953b49c4f8aee1ca7e560911e36", "repo": "core", "path": "tests/components/laundrify/conftest.py", "file_name": "conftest.py", "fun_name": "laundrify_api_fixture", "commit_message": "Add laundrify integration (#65090)\n\n* First version of laundrify integration\r\n\r\n* Code cleanup\r\n\r\n* Code cleanup after review #2\r\n\r\n* Move coordinator to its own file\r\n\r\n* Save devices as dict and implement available prop as fn\r\n\r\n* Validate token on init, abort if already configured\r\n\r\n* Some more cleanup after review\r\n\r\n* Add strict type hints\r\n\r\n* Minor changes after code review\r\n\r\n* Remove OptionsFlow (use default poll interval instead)\r\n\r\n* Fix CODEOWNERS to pass hassfest job\r\n\r\n* Fix formatting to pass prettier job\r\n\r\n* Fix mypy typing error\r\n\r\n* Update internal device property after fetching data\r\n\r\n* Call parental update handler and remove obsolete code\r\n\r\n* Add coordinator tests and fix some config flow tests\r\n\r\n* Refactor tests\r\n\r\n* Refactor fixtures\r\n\r\n* Device unavailable if polling fails", "code": "def laundrify_api_fixture(laundrify_exchange_code, laundrify_validate_token):\n \n with patch(\n \"laundrify_aio.LaundrifyAPI.get_account_id\",\n return_value=VALID_ACCOUNT_ID,\n ), patch(\n \"laundrify_aio.LaundrifyAPI.get_machines\",\n return_value=json.loads(load_fixture(\"laundrify/machines.json\")),\n ) as get_machines_mock:\n yield get_machines_mock\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 63, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 9, "token_counts": 41, "n_ast_nodes": 74, "n_identifiers": 10, "random_cut": "def laundrify_api_fixture(laundrify_exchange_code, laundrify_validate_token):\n \n with patch(\n \"laundrify_aio.LaundrifyAPI.get_account_id\",\n return_value=VALID_ACCOUNT_ID,\n ), patch(\n \"laundrify_aio.LaundrifyAPI.get_machine" }, { "id": 114034, "commit_id": "dc7949207fbf3c63b2ba30b68a84b2ee7f2b5e80", "repo": "mindsdb", "path": "mindsdb/api/mysql/mysql_proxy/classes/sql_query.py", "file_name": "sql_query.py", "fun_name": "_process_query", "commit_message": "test fixes", "code": "def _process_query(self, sql):\n # self.query = parse_sql(sql, dialect='mindsdb')\n\n integrations_names = self.datahub.get_datasources_names()\n integrations_names.append('information_schema')\n integrations_names.append('files')\n integrations_names.append('views')\n\n all_tables = get_all_tables(self.query)\n\n predictor_metadata = {}\n predictors = db.session.query(db.Predictor).filter_by(company_id=self.session.company_id)\n for model_name in set(all_tables):\n for p in predictors:\n if p.name == model_name:\n if isinstance(p.data, dict) and 'error' not in p.data:\n ts_settings = p.learn_args.get('timeseries_settings', {})\n if ts_settings.get('is_timeseries') is True:\n window = ts_settings.get('window')\n order_by = ts_settings.get('order_by')[0]\n group_by = ts_settings.get('group_by')\n if isinstance(group_by, list) is False and group_by is not None:\n group_by = [group_by]\n predictor_metadata[model_name] = {\n 'timeseries': True,\n 'window': window,\n 'horizon': ts_settings.get('horizon'),\n 'order_by_column': order_by,\n 'group_by_columns': group_by\n }\n else:\n predictor_metadata[model_name] = {\n 'timeseries': False\n }\n self.model_types.update(p.data.get('dtypes', {}))\n\n self.planner = query_planner.QueryPlanner(\n self.query,\n integrations=integrations_names,\n predictor_namespace=self.mindsdb_database_name,\n predictor_metadata=predictor_metadata,\n default_namespace=self.database\n )\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 806, "n_words": 105, "vocab_size": 75, "complexity": 9, "nloc": 38, "token_counts": 269, "n_ast_nodes": 446, "n_identifiers": 41, "random_cut": "def _process_query(self, sql):\n # self.query = parse_sql(sql, dialect='mindsdb')\n\n integrations_names = self.datahub.get_datasources_names()\n integrations_names.append('information_schema')\n integrations_names.append('files')\n integrations_names.append('views')\n\n all_tables = get_all_tables(self.query)\n\n predictor_metadata = {}\n predictors = db.session.query(db.Predictor).filter_by(company_id=self.session.company_id)\n for model_name in set(all_tables):\n for p in predictors:\n if p.name == model_name:\n if isinstance(p.data, dict) and 'error' not in p.data:\n ts_settings = p.learn_args.get('timeseries_settings', {})\n if ts_settings.get('is_timeseries') is True:\n window = ts_settings.get('window')\n order_by = ts_settings.get('order_by')[0]\n group_by = ts_settings.get('group_by')\n if isinstance(group_by, list) is False and group_by is not None:\n group_by = [group_by]\n predictor_metadata[model_name] = {\n 'timeseries': True,\n 'window': window,\n 'horizon': ts_settings.get('horizon'),\n 'order_by_column': order_by,\n 'group_by_columns': group_by\n }\n else:\n predictor_metadata[model_name] = {\n 'timeseries': False\n " }, { "id": 289758, "commit_id": "838691f22f27852a05313809cdf9c51094ad3798", "repo": "core", "path": "homeassistant/components/zwave_js/addon.py", "file_name": "addon.py", "fun_name": "async_restart_addon", "commit_message": "Refactor zwave_js add-on manager (#80883)\n\n* Make addon slug an instance attribute\r\n\r\n* Extract addon name and addon config\r\n\r\n* Update docstrings", "code": "async def async_restart_addon(self) -> None:\n \n await async_restart_addon(self._hass, self.addon_slug)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 34, "n_identifiers": 4, "random_cut": "async def async_restart_addon(self) -> None:" }, { "id": 335088, "commit_id": "dcb23b2d7299708442aee5b4dbf23ee6df363f2c", "repo": "diffusers", "path": "src/diffusers/schedulers/scheduling_ddpm.py", "file_name": "scheduling_ddpm.py", "fun_name": "step", "commit_message": "rename image to sample in schedulers", "code": "def step(self, residual, sample, t):\n # 1. compute alphas, betas\n alpha_prod_t = self.get_alpha_prod(t)\n alpha_prod_t_prev = self.get_alpha_prod(t - 1)\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n # 2. compute predicted original sample from predicted noise also called\n # \"predicted x_0\" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf\n pred_original_sample = (sample - beta_prod_t ** (0.5) * residual) / alpha_prod_t ** (0.5)\n\n # 3. Clip \"predicted x_0\"\n if self.clip_predicted_sample:\n pred_original_sample = self.clip(pred_original_sample, -1, 1)\n\n # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t\n # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.get_beta(t)) / beta_prod_t\n current_sample_coeff = self.get_alpha(t) ** (0.5) * beta_prod_t_prev / beta_prod_t\n\n # 5. Compute predicted previous sample µ_t\n # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf\n pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample\n\n return pred_prev_sample\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 267, "n_words": 131, "vocab_size": 68, "complexity": 2, "nloc": 12, "token_counts": 129, "n_ast_nodes": 194, "n_identifiers": 18, "random_cut": "def step(self, residual, sample, t):\n # 1. compute alphas, betas\n alpha_prod_t = self.get_alpha_prod(t)\n alpha_prod_t_prev = self.get_alpha_prod(t - 1)\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n # 2. compute predicted original sample from predicted noise also called\n # \"predicted x_0\" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf\n pred_original_sample = (sample - beta_prod_t ** (0.5) * residual) / alpha_prod_t ** (0.5)\n\n # 3. Clip \"predicted x_0\"\n if self.clip_predicted_sample:\n pred_original_sample = self.clip(pred_original_sample, -1, 1)\n\n # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t\n " }, { "id": 94331, "commit_id": "39cfdcb446e74732c67ce07d7dd8d8d5ace471b1", "repo": "sentry", "path": "tests/sentry/event_manager/test_event_manager.py", "file_name": "test_event_manager.py", "fun_name": "test_culprit_after_stacktrace_processing", "commit_message": "test(event_manager): Fix incorrect invocations of manager.save (#36615)", "code": "def test_culprit_after_stacktrace_processing(self):\n from sentry.grouping.enhancer import Enhancements\n\n enhancement = Enhancements.from_config_string(\n ,\n )\n\n manager = EventManager(\n make_event(\n platform=\"native\",\n exception={\n \"values\": [\n {\n \"type\": \"Hello\",\n \"stacktrace\": {\n \"frames\": [\n {\n \"function\": \"not_in_app_function\",\n },\n {\n \"function\": \"in_app_function\",\n },\n ]\n },\n }\n ]\n },\n )\n )\n manager.normalize()\n manager.get_data()[\"grouping_config\"] = {\n \"enhancements\": enhancement.dumps(),\n \"id\": \"legacy:2019-03-12\",\n }\n event1 = manager.save(self.project.id)\n assert event1.transaction is None\n assert event1.culprit == \"in_app_function\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 682, "n_words": 61, "vocab_size": 44, "complexity": 1, "nloc": 38, "token_counts": 124, "n_ast_nodes": 218, "n_identifiers": 22, "random_cut": "def test_culprit_after_stacktrace_processing(self):\n from sentry.grouping.enhancer import Enhancements\n\n enhancement = Enhancements.from_config_string(\n " }, { "id": 89897, "commit_id": "9fbdf75bdb6a55803da351f2ef881d86d7fdc9a0", "repo": "sentry", "path": "tests/sentry/utils/performance_issues/test_consecutive_db_detector.py", "file_name": "test_consecutive_db_detector.py", "fun_name": "test_does_not_detect_consecutive_db_spans_with_parameterized_query", "commit_message": "(refactor) consecutive db detector tests to assert on detector (#42444)\n\nPart of PERF-1847\r\n\r\nRefactor consecutive db tests to assert on detector output vs tags\r\nMove tests to its own file", "code": "def test_does_not_detect_consecutive_db_spans_with_parameterized_query(self):\n span_duration = 750\n spans = [\n create_span(\n \"db\",\n span_duration,\n \"SELECT m.* FROM authors a INNER JOIN books b ON a.book_id = b.id AND b.another_id = 'another_id_123' ORDER BY b.created_at DESC LIMIT 3\",\n ),\n create_span(\n \"db\",\n span_duration,\n \"SELECT m.* FROM authors a INNER JOIN books b ON a.book_id = b.id AND b.another_id = 'another_id_456' ORDER BY b.created_at DESC LIMIT 3\",\n ),\n create_span(\n \"db\",\n span_duration,\n \"SELECT m.* FROM authors a INNER JOIN books b ON a.book_id = b.id AND b.another_id = 'another_id_789' ORDER BY b.created_at DESC LIMIT 3\",\n ),\n ]\n spans = [modify_span_start(span, span_duration * spans.index(span)) for span in spans]\n event = create_event(spans, \"a\" * 16)\n\n problems = self.find_consecutive_db_problems(event)\n\n assert problems == []\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 362, "n_words": 113, "vocab_size": 51, "complexity": 2, "nloc": 23, "token_counts": 86, "n_ast_nodes": 138, "n_identifiers": 12, "random_cut": "def test_does_not_detect_consecutive_db_spans_with_parameterized_query(self):\n span_duration = 750\n spans = [\n create_span(\n \"db\",\n span_duration,\n \"SELECT m.* FROM authors a INNER JOIN books b ON a.book_id = b.id AND b.another_id = 'another_id_123' ORDER BY b.created_at DESC LIMIT 3\",\n ),\n create_span(\n \"db\",\n span_duration,\n \"SELECT m.* FROM authors a INNER JOIN books b ON a." }, { "id": 313244, "commit_id": "3771c154fa0ea8e0b49d41ece55a7a18c444ee6a", "repo": "core", "path": "homeassistant/components/command_line/sensor.py", "file_name": "sensor.py", "fun_name": "update", "commit_message": "Improve code quality command_line (#65333)", "code": "def update(self) -> None:\n \n self.data.update()\n value = self.data.value\n\n if self._json_attributes:\n self._attr_extra_state_attributes = {}\n if value:\n try:\n json_dict = json.loads(value)\n if isinstance(json_dict, Mapping):\n self._attr_extra_state_attributes = {\n k: json_dict[k]\n for k in self._json_attributes\n if k in json_dict\n }\n else:\n _LOGGER.warning(\"JSON result was not a dictionary\")\n except ValueError:\n _LOGGER.warning(\"Unable to parse output as JSON: %s\", value)\n else:\n _LOGGER.warning(\"Empty reply found when expecting JSON data\")\n\n if value is None:\n value = STATE_UNKNOWN\n elif self._value_template is not None:\n self._attr_native_value = (\n self._value_template.render_with_possible_json_value(\n value, STATE_UNKNOWN\n )\n )\n else:\n self._attr_native_value = value\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 531, "n_words": 85, "vocab_size": 59, "complexity": 9, "nloc": 31, "token_counts": 142, "n_ast_nodes": 235, "n_identifiers": 19, "random_cut": "def update(self) -> None:\n \n self.data.update()\n value = self.data.value\n\n if self._json_attributes:\n self._attr_extra_state_attributes = {}\n if value:\n try:\n json_dict = json.loads(value)\n if isinstance(json_dict, Mapping):\n self._attr_extra_state_attributes = {\n k: json_dict[k]\n for k in self._json_attributes\n if k in json_dict\n }\n else:\n _LOGGER.warning(\"JSON result was not a dictionary\")\n except ValueError:\n _LOGGER.warning(\"Unable to parse ou" }, { "id": 178782, "commit_id": "87f7d22b39a19d15d762c1da63b918c2bf04c6ec", "repo": "Nuitka", "path": "nuitka/plugins/standard/DllFiles.py", "file_name": "DllFiles.py", "fun_name": "getExtraDlls", "commit_message": "Standalone: Added support for including DLL of 'vosk' package.", "code": "def getExtraDlls(self, module):\n full_name = module.getFullName()\n\n # Checking for config, but also allowing fall through for cases that have to\n # have some code still here.\n config = self.config.get(full_name)\n if config:\n for dll_entry_point in self._handleDllConfigs(\n config=config, full_name=full_name\n ):\n yield dll_entry_point\n\n # TODO: This is legacy code, ideally moved to yaml config over time.\n if full_name == \"uuid\" and isLinux():\n uuid_dll_path = self.locateDLL(\"uuid\")\n\n if uuid_dll_path is not None:\n yield self.makeDllEntryPoint(\n uuid_dll_path, os.path.basename(uuid_dll_path), None\n )\n elif full_name == \"iptc\" and isLinux():\n import iptc.util # pylint: disable=I0021,import-error\n\n xtwrapper_dll = iptc.util.find_library(\"xtwrapper\")[0]\n xtwrapper_dll_path = xtwrapper_dll._name # pylint: disable=protected-access\n\n yield self.makeDllEntryPoint(\n xtwrapper_dll_path, os.path.basename(xtwrapper_dll_path), None\n )\n elif full_name == \"coincurve._libsecp256k1\" and isWin32Windows():\n yield self.makeDllEntryPoint(\n os.path.join(module.getCompileTimeDirectory(), \"libsecp256k1.dll\"),\n os.path.join(full_name.getPackageName(), \"libsecp256k1.dll\"),\n full_name.getPackageName(),\n )\n # TODO: This should be its own plugin.\n elif (\n full_name\n in (\n \"pythoncom\",\n \"win32api\",\n \"win32clipboard\",\n \"win32console\",\n \"win32cred\",\n \"win32crypt\",\n \"win32event\",\n \"win32evtlog\",\n \"win32file\",\n \"win32gui\",\n \"win32help\",\n \"win32inet\",\n \"win32job\",\n \"win32lz\",\n \"win32net\",\n \"win32pdh\",\n \"win32pipe\",\n \"win32print\",\n \"win32process\",\n \"win32profile\",\n \"win32ras\",\n \"win32security\",\n \"win32service\",\n \"win32trace\",\n \"win32transaction\",\n \"win32ts\",\n \"win32wnet\",\n )\n and isWin32Windows()\n ):\n pywin_dir = getPyWin32Dir()\n\n if pywin_dir is not None:\n for dll_name in \"pythoncom\", \"pywintypes\":\n\n pythoncom_filename = \"%s%d%d.dll\" % (\n dll_name,\n sys.version_info[0],\n sys.version_info[1],\n )\n pythoncom_dll_path = os.path.join(pywin_dir, pythoncom_filename)\n\n if os.path.exists(pythoncom_dll_path):\n yield self.makeDllEntryPoint(\n pythoncom_dll_path, pythoncom_filename, None\n )\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1240, "n_words": 191, "vocab_size": 126, "complexity": 15, "nloc": 73, "token_counts": 325, "n_ast_nodes": 552, "n_identifiers": 34, "random_cut": "def getExtraDlls(self, module):\n full_name = module.getFullName()\n\n # Checking for config, but also allowing fall through for cases that have to\n # have some code still here.\n config = self.config.get(full_name)\n if config:\n for dll_entry_point in self._handleDllConfigs(\n config=config, full_name=full_name\n ):\n yield dll_entry_point\n\n # TODO: This is legacy code, ideally moved to yaml config over time.\n " }, { "id": 204760, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/serializers/xml_serializer.py", "file_name": "xml_serializer.py", "fun_name": "handle_fk_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def handle_fk_field(self, obj, field):\n \n self._start_relational_field(field)\n related_att = getattr(obj, field.get_attname())\n if related_att is not None:\n if self.use_natural_foreign_keys and hasattr(\n field.remote_field.model, \"natural_key\"\n ):\n related = getattr(obj, field.name)\n # If related object has a natural key, use it\n related = related.natural_key()\n # Iterable natural keys are rolled out as subelements\n for key_value in related:\n self.xml.startElement(\"natural\", {})\n self.xml.characters(str(key_value))\n self.xml.endElement(\"natural\")\n else:\n self.xml.characters(str(related_att))\n else:\n self.xml.addQuickElement(\"None\")\n self.xml.endElement(\"field\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 308, "n_words": 60, "vocab_size": 50, "complexity": 5, "nloc": 18, "token_counts": 133, "n_ast_nodes": 225, "n_identifiers": 22, "random_cut": "def handle_fk_field(self, obj, field):\n \n self._start_relational_field(field)\n related_att = getattr(obj, field.get_attname())\n if related_att is not None:\n if self.use_natural_foreign_keys and hasattr(\n field.remote_field.model, \"natural_key\"\n ):\n " }, { "id": 321879, "commit_id": "046244b54ddb1e95b63da78789137b7efe7b489e", "repo": "qutebrowser", "path": "qutebrowser/browser/webengine/webenginetab.py", "file_name": "webenginetab.py", "fun_name": "_remove_js", "commit_message": "mypy: defer to machinery for conditional: QWebEngineScripts", "code": "def _remove_js(self, name):\n \n scripts = self._widget.page().scripts()\n if machinery.IS_QT6:\n for script in scripts.find(f'_qute_{name}'):\n scripts.remove(script)\n else:\n # Qt 5\n script = scripts.findScript(f'_qute_{name}')\n if not script.isNull():\n scripts.remove(script)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 126, "n_words": 24, "vocab_size": 20, "complexity": 4, "nloc": 9, "token_counts": 68, "n_ast_nodes": 124, "n_identifiers": 13, "random_cut": "def _remove_js(self, name):\n \n scripts = self._widget.page().scripts()\n if machinery.IS_QT6:\n for script in scripts.find(f'_qute_{name}'):\n scripts.remove(script)\n else:\n " }, { "id": 162912, "commit_id": "07960766590650e516a75ce6ceba91b68a5fa551", "repo": "inter", "path": "misc/tools/postprocess-vf.py", "file_name": "postprocess-vf.py", "fun_name": "clear_subfamily_name", "commit_message": "UPM 2048 and opsz axis (#462)\n\n- UPM is adjusted to 2048\r\n- Additional opsz VF axis (multi master) added which will eventually replace the separate Display family\r\n- New tooling that uses fontmake instead of Inter's own fontbuild toolchain. (The old toolchain is still supported, i.e. `make -f Makefile_v1.make ...`)", "code": "def clear_subfamily_name(font):\n nameTable = font[\"name\"]\n rmrecs = []\n for rec in nameTable.names:\n if rec.nameID == SUBFAMILY_NAME or rec.nameID == TYPO_SUBFAMILY_NAME:\n rmrecs.append(rec)\n for rec in rmrecs:\n nameTable.removeNames(rec.nameID, rec.platformID, rec.platEncID, rec.langID)\n\n", "url": "https://github.com/rsms/inter.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 43, "n_words": 29, "vocab_size": 23, "complexity": 5, "nloc": 8, "token_counts": 66, "n_ast_nodes": 102, "n_identifiers": 14, "random_cut": "def clear_subfamily_name(font):\n nameTable = font[\"name\"]\n rmrecs = []\n for rec in nameTable.names:\n if rec.nameID == SUBFAMILY_NAME or rec.nameID ==" }, { "id": 258421, "commit_id": "6c067b2b4f62f11850415a30d75b719aa286adc1", "repo": "haystack", "path": "test/document_stores/test_opensearch.py", "file_name": "test_opensearch.py", "fun_name": "test__validate_and_adjust_document_index_wrong_mapping_raises", "commit_message": "feat: make `score_script` first class citizen via `knn_engine` param (#3284)\n\n* OpenSearchDocumentStore: make score_script accessible via knn_engine\r\n\r\n* blacken\r\n\r\n* fix tests\r\n\r\n* fix format\r\n\r\n* fix naming of 'score_script' consistently\r\n\r\n* fix tests\r\n\r\n* fix test\r\n\r\n* fix ef_search tests\r\n\r\n* always validate index\r\n\r\n* improve clone_embedding_field\r\n\r\n* fix pylint\r\n\r\n* reformat\r\n\r\n* remove port\r\n\r\n* update tests\r\n\r\n* set no_implicit_optional = false\r\n\r\n* fix myp\r\n\r\n* fix test\r\n\r\n* refactorings\r\n\r\n* reformat\r\n\r\n* fix and refactor tests\r\n\r\n* better tests\r\n\r\n* create search_field mappings\r\n\r\n* remove no_implicit_optional = false\r\n\r\n* skip validation for custom mapping\r\n\r\n* format\r\n\r\n* Apply suggestions from docs code review\r\n\r\nCo-authored-by: Agnieszka Marzec <97166305+agnieszka-m@users.noreply.github.com>\r\n\r\n* Apply tougher suggestions from code review\r\n\r\n* fix messages\r\n\r\n* fix typos\r\n\r\n* update tests\r\n\r\n* Update haystack/document_stores/opensearch.py\r\n\r\nCo-authored-by: Agnieszka Marzec <97166305+agnieszka-m@users.noreply.github.com>\r\n\r\n* fix tests\r\n\r\n* fix ef_search validation\r\n\r\n* add test for ef_search nmslib\r\n\r\n* fix assert_not_called\r\n\r\nCo-authored-by: Agnieszka Marzec <97166305+agnieszka-m@users.noreply.github.com>", "code": "def test__validate_and_adjust_document_index_wrong_mapping_raises(self, mocked_document_store, existing_index):\n \n existing_index[\"mappings\"][\"properties\"][\"age\"] = {\"type\": \"integer\"}\n mocked_document_store.search_fields = [\"age\"]\n with pytest.raises(\n DocumentStoreError,\n match=f\"The index '{self.index_name}' needs the 'text' type for the search_field 'age' to run full text search, but got type 'integer'.\",\n ):\n mocked_document_store._validate_and_adjust_document_index(self.index_name)\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 104, "n_words": 36, "vocab_size": 33, "complexity": 1, "nloc": 8, "token_counts": 55, "n_ast_nodes": 106, "n_identifiers": 11, "random_cut": "def test__validate_and_adjust_document_index_wrong_mapping_raises(self, mocked_document_store, existing_index):\n \n existing_index[\"mappings\"][\"properties\"][\"a" }, { "id": 90845, "commit_id": "b9f5a910dc841b85f58d46266ec049ae5a7fd305", "repo": "sentry", "path": "src/sentry/models/activity.py", "file_name": "activity.py", "fun_name": "save", "commit_message": "ref(models): `ActivityType` (#34978)\n\n## Objective:\r\nWe want to separate enum logic from Model logic. This breaks a lot of circular dependencies.", "code": "def save(self, *args, **kwargs):\n created = bool(not self.id)\n\n super().save(*args, **kwargs)\n\n if not created:\n return\n\n # HACK: support Group.num_comments\n if self.type == ActivityType.NOTE.value:\n self.group.update(num_comments=F(\"num_comments\") + 1)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 81, "n_words": 25, "vocab_size": 24, "complexity": 3, "nloc": 7, "token_counts": 63, "n_ast_nodes": 105, "n_identifiers": 16, "random_cut": "def save(self, *args, **kwargs):\n created = bool(not self.id)\n\n super().save(*args, **kwargs)\n\n if not created:\n return\n\n # HACK: support Group.num_comments\n if self.type == ActivityType.NOTE.value:\n self.group.u" }, { "id": 297714, "commit_id": "79d3d4ceaed75cae908064f012c2839d336f4aba", "repo": "core", "path": "homeassistant/components/vera/sensor.py", "file_name": "sensor.py", "fun_name": "update", "commit_message": "Use UnitOfTemperature in integrations (t-z) (#84309)", "code": "def update(self) -> None:\n \n super().update()\n if self.vera_device.category == veraApi.CATEGORY_TEMPERATURE_SENSOR:\n self.current_value = self.vera_device.temperature\n\n vera_temp_units = self.vera_device.vera_controller.temperature_units\n\n if vera_temp_units == \"F\":\n self._temperature_units = UnitOfTemperature.FAHRENHEIT\n else:\n self._temperature_units = UnitOfTemperature.CELSIUS\n\n elif self.vera_device.category == veraApi.CATEGORY_LIGHT_SENSOR:\n self.current_value = self.vera_device.light\n elif self.vera_device.category == veraApi.CATEGORY_UV_SENSOR:\n self.current_value = self.vera_device.light\n elif self.vera_device.category == veraApi.CATEGORY_HUMIDITY_SENSOR:\n self.current_value = self.vera_device.humidity\n elif self.vera_device.category == veraApi.CATEGORY_SCENE_CONTROLLER:\n controller = cast(veraApi.VeraSceneController, self.vera_device)\n value = controller.get_last_scene_id(True)\n time = controller.get_last_scene_time(True)\n if time == self.last_changed_time:\n self.current_value = None\n else:\n self.current_value = value\n self.last_changed_time = time\n elif self.vera_device.category == veraApi.CATEGORY_POWER_METER:\n self.current_value = self.vera_device.power\n elif self.vera_device.is_trippable:\n tripped = self.vera_device.is_tripped\n self.current_value = \"Tripped\" if tripped else \"Not Tripped\"\n else:\n self.current_value = \"Unknown\"\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 416, "n_words": 99, "vocab_size": 46, "complexity": 11, "nloc": 32, "token_counts": 238, "n_ast_nodes": 387, "n_identifiers": 35, "random_cut": "def update(self) -> None:\n \n super().update()\n if self.vera_device.category == veraApi.CATEGORY_TEMPERATURE_SENSOR:\n self.current_value = self.vera_device.temperature\n\n vera_temp_units = self.vera_device.vera_controller.temperature_units\n\n if vera_temp_units == \"F\":\n self._temperature_units = UnitOfTemperature.FAHRENHEIT\n else:\n self._temperature_units = UnitOfTemperature.CELSIUS\n\n elif self.vera_device.category == veraApi.CATEGORY_LIGHT_SENSOR:\n self.current_value = self.vera_device.light\n elif self.vera_device.category == veraApi.CATEGORY_UV_SENSOR:\n self.current_value = self.vera_device.light\n elif self.vera_device.category == veraApi.CATEGORY_HUMIDITY_SENSOR:\n self.current_value = self.vera_device.humidity\n elif self.vera_device.category == veraApi.CATEGORY_SCENE_CONTROLLER:\n controller = cast(veraApi.VeraSceneController, self.vera_device)\n value = controller.get_last_scene_id(True)\n time = controller.get_last_scene_time(True)\n if time == self.last_changed_time:\n self.current_value = None\n else:\n self.current_value = value\n self.last_changed_time = time\n elif self.vera_device.category == veraApi.CATEGORY_POWER_METER:\n self.current_value = self.vera_device.power\n elif self.vera_device.is_trippable:\n tripped = self.vera_device.is_tripped\n self.current_value = \"Tripped\" if" }, { "id": 66458, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/manufacturing/report/bom_operations_time/bom_operations_time.py", "file_name": "bom_operations_time.py", "fun_name": "get_columns", "commit_message": "style: format code with black", "code": "def get_columns(filters):\n\treturn [\n\t\t{\"label\": _(\"BOM ID\"), \"options\": \"BOM\", \"fieldname\": \"name\", \"fieldtype\": \"Link\", \"width\": 220},\n\t\t{\n\t\t\t\"label\": _(\"Item Code\"),\n\t\t\t\"options\": \"Item\",\n\t\t\t\"fieldname\": \"item\",\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"width\": 150,\n\t\t},\n\t\t{\"label\": _(\"Item Name\"), \"fieldname\": \"item_name\", \"fieldtype\": \"Data\", \"width\": 110},\n\t\t{\"label\": _(\"UOM\"), \"options\": \"UOM\", \"fieldname\": \"uom\", \"fieldtype\": \"Link\", \"width\": 100},\n\t\t{\n\t\t\t\"label\": _(\"Operation\"),\n\t\t\t\"options\": \"Operation\",\n\t\t\t\"fieldname\": \"operation\",\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"width\": 140,\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Workstation\"),\n\t\t\t\"options\": \"Workstation\",\n\t\t\t\"fieldname\": \"workstation\",\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"width\": 110,\n\t\t},\n\t\t{\"label\": _(\"Time (In Mins)\"), \"fieldname\": \"time_in_mins\", \"fieldtype\": \"Float\", \"width\": 120},\n\t\t{\n\t\t\t\"label\": _(\"Sub-assembly BOM Count\"),\n\t\t\t\"fieldname\": \"used_as_subassembly_items\",\n\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\"width\": 200,\n\t\t},\n\t]\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 94, "vocab_size": 52, "complexity": 1, "nloc": 34, "token_counts": 200, "n_ast_nodes": 399, "n_identifiers": 3, "random_cut": "def get_columns(filters):\n\treturn [\n\t\t{\"label\": _(\"BOM ID\"), \"options\": \"BOM\", \"fieldname\": \"name\", \"fieldtype\": \"Link\", \"width\": 220},\n\t\t{\n\t\t\t\"label\": _(\"Item Code\"),\n\t\t\t\"options\": \"Item\",\n\t\t\t\"fieldname\": \"item\",\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"width\": 150,\n\t\t},\n\t\t{\"label\": _(\"Item Name\"), \"fieldname\": \"item_name\", \"fieldtype\": \"Data\", \"width\": 110},\n\t\t{\"la" }, { "id": 291107, "commit_id": "34607d4410a78fc6e41337e2b51600d1fdf39580", "repo": "core", "path": "homeassistant/components/mqtt/cover.py", "file_name": "cover.py", "fun_name": "supported_features", "commit_message": "Enforce CoverEntityFeature (#82457)\n\n* Enforce CoverEntityFeature\r\n\r\n* Adjust pylint", "code": "def supported_features(self) -> CoverEntityFeature:\n \n supported_features = CoverEntityFeature(0)\n if self._config.get(CONF_COMMAND_TOPIC) is not None:\n if self._config.get(CONF_PAYLOAD_OPEN) is not None:\n supported_features |= CoverEntityFeature.OPEN\n if self._config.get(CONF_PAYLOAD_CLOSE) is not None:\n supported_features |= CoverEntityFeature.CLOSE\n if self._config.get(CONF_PAYLOAD_STOP) is not None:\n supported_features |= CoverEntityFeature.STOP\n\n if self._config.get(CONF_SET_POSITION_TOPIC) is not None:\n supported_features |= CoverEntityFeature.SET_POSITION\n\n if self._config.get(CONF_TILT_COMMAND_TOPIC) is not None:\n supported_features |= TILT_FEATURES\n\n return supported_features\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 196, "n_words": 54, "vocab_size": 24, "complexity": 7, "nloc": 15, "token_counts": 117, "n_ast_nodes": 186, "n_identifiers": 16, "random_cut": "def supported_features(self) -> CoverEntityFeature:\n \n supported_features = CoverEntityFeature(0)\n if self._config.get(CONF_COMMAND_TOPIC) is not None:\n if self._config.get(CONF_PAYLOAD_OPEN) is not None:\n supported_features |= CoverEntityFeature.OPEN\n if self._config.get(CONF_PAYLOAD_CLOSE) is not None:\n supported_features |= CoverEntityFeature.CLOSE\n if self._config.get(CONF_PAYLOAD_STOP) is not None:\n supported_features |= CoverEntityFeature.STOP\n\n if self._config.get(CON" }, { "id": 250541, "commit_id": "53f60c88b1d7e4d817194f186d9730b32953d1a7", "repo": "mitmproxy", "path": "test/mitmproxy/test_http.py", "file_name": "test_http.py", "fun_name": "test_refresh", "commit_message": "fix a crash when refreshing headers with a negative unix timestamp, fix #5054 (#5078)", "code": "def test_refresh(self):\n r = tresp()\n n = time.time()\n r.headers[\"date\"] = email.utils.formatdate(n, usegmt=True)\n pre = r.headers[\"date\"]\n r.refresh(946681202)\n assert pre == r.headers[\"date\"]\n\n r.refresh(946681262)\n d = email.utils.parsedate_tz(r.headers[\"date\"])\n d = email.utils.mktime_tz(d)\n # Weird that this is not exact...\n assert abs(60 - (d - n)) <= 1\n\n cookie = \"MOO=BAR; Expires=Tue, 08-Mar-2011 00:20:38 GMT; Path=foo.com; Secure\"\n r.headers[\"set-cookie\"] = cookie\n r.refresh()\n # Cookie refreshing is tested in test_cookies, we just make sure that it's triggered here.\n assert cookie != r.headers[\"set-cookie\"]\n\n with mock.patch('mitmproxy.net.http.cookies.refresh_set_cookie_header') as m:\n m.side_effect = ValueError\n r.refresh(n)\n\n # Test negative unixtime, which raises on at least Windows.\n r.headers[\"date\"] = pre = \"Mon, 01 Jan 1601 00:00:00 GMT\"\n r.refresh(946681202)\n assert r.headers[\"date\"] == pre\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 275, "n_words": 107, "vocab_size": 76, "complexity": 1, "nloc": 21, "token_counts": 174, "n_ast_nodes": 301, "n_identifiers": 23, "random_cut": "def test_refresh(self):\n r = tresp()\n " }, { "id": 19322, "commit_id": "32b545fe7c35b57f280cd9d570f62839886f2e4b", "repo": "PythonRobotics", "path": "utils/angle.py", "file_name": "angle.py", "fun_name": "angle_mod", "commit_message": "Enhance dubins path docs (#664)\n\n* Engance dubins path docs\r\n\r\n* Update dubins_path.rst\r\n\r\n* fix doc artifact link in CI\r\n\r\n* wip\r\n\r\n* wip\r\n\r\n* wip\r\n\r\n* Update dubins_path.rst\r\n\r\n* wip\r\n\r\n* wip\r\n\r\n* wip\r\n\r\n* wip\r\n\r\n* wip", "code": "def angle_mod(x, zero_2_2pi=False, degree=False):\n \n if isinstance(x, float):\n is_float = True\n else:\n is_float = False\n\n x = np.asarray(x).flatten()\n if degree:\n x = np.deg2rad(x)\n\n if zero_2_2pi:\n mod_angle = x % (2 * np.pi)\n else:\n mod_angle = (x + np.pi) % (2 * np.pi) - np.pi\n\n if degree:\n mod_angle = np.rad2deg(mod_angle)\n\n if is_float:\n return mod_angle.item()\n else:\n return mod_angle\n", "url": "https://github.com/AtsushiSakai/PythonRobotics.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 141, "n_words": 55, "vocab_size": 30, "complexity": 6, "nloc": 18, "token_counts": 114, "n_ast_nodes": 185, "n_identifiers": 15, "random_cut": "def angle_mod(x, zero_2_2pi=False, degree=False):\n \n if isinstance(x, float):\n is_float = True\n else:\n is_float = False\n\n x = np.asarray(x).flatten()\n if degree:\n x = np.deg2rad(x)\n\n if zero_2_2pi:\n mod_angle = x % (2 * np.pi)\n else:\n mod_angle = (x + np.pi) % (2 * np.pi) - np.pi\n\n if degree:\n mod_angle = np.rad2deg(mod_angle)\n\n if is_float:\n return mod_angle.item()\n else" }, { "id": 122862, "commit_id": "b8ae8e3fa10f9abe998459fac1513915acee776d", "repo": "jax", "path": "tests/custom_object_test.py", "file_name": "custom_object_test.py", "fun_name": "_sp_data_hlo_lowering", "commit_message": "(NFC) Prepare for migration from producing MHLO to producing StableHLO\n\nThis CL renames occurrences of \"mhlo\" in: 1) names, 2) tests, 3) prose in order\nto prepare for the upcoming migration.\n\nUnchanged occurrences:\n 1) Public API that contains \"mhlo\", e.g. XlaLowering.mhlo and the \"mhlo\"\n argument value in Lowering.as_text and Lowering.compiler_ir.\n 2) Documentation (changelog, JEPs, IR examples, etc).\n 3) One rare situation where prose says \"StableHLO\" and \"MHLO\" in one sentence,\n so both are necessary to disambiguate.\n\nPiperOrigin-RevId: 495771153", "code": "def _sp_data_hlo_lowering(ctx, data_and_indices):\n return [data_and_indices[0]]\n\nmlir.register_lowering(sp_data_p, _sp_data_hlo_lowering)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 6, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 33, "n_identifiers": 6, "random_cut": "def _sp_data_hlo_lowering(ctx, data_and_indices):\n return [data_and_indices[0]]\n\nmlir.register_lowering(sp_data_p, _sp_data_hlo_lowering)\n" }, { "id": 45973, "commit_id": "b1fdcdfe6778574c53bdf6bcbd59090c59605287", "repo": "airflow", "path": "airflow/models/mappedoperator.py", "file_name": "mappedoperator.py", "fun_name": "__del__", "commit_message": "Rename task-mapping trigger to 'expand' (#22106)", "code": "def __del__(self):\n if not self._expand_called:\n warnings.warn(f\"{self!r} was never mapped!\")\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 26, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 3, "token_counts": 18, "n_ast_nodes": 36, "n_identifiers": 5, "random_cut": "def __del__(self):\n if not self._expand_called:\n warnings.warn(f\"{self!r} wa" }, { "id": 260892, "commit_id": "f8991210f022270d640a302820ed4b9ec58b42c1", "repo": "scikit-learn", "path": "sklearn/linear_model/tests/test_ransac.py", "file_name": "test_ransac.py", "fun_name": "test_ransac_min_n_samples", "commit_message": "MAINT Clean deprecation for 1.2: Ransac losses (#24408)", "code": "def test_ransac_min_n_samples():\n estimator = LinearRegression()\n ransac_estimator1 = RANSACRegressor(\n estimator, min_samples=2, residual_threshold=5, random_state=0\n )\n ransac_estimator2 = RANSACRegressor(\n estimator,\n min_samples=2.0 / X.shape[0],\n residual_threshold=5,\n random_state=0,\n )\n ransac_estimator5 = RANSACRegressor(\n estimator, min_samples=2, residual_threshold=5, random_state=0\n )\n ransac_estimator6 = RANSACRegressor(estimator, residual_threshold=5, random_state=0)\n ransac_estimator7 = RANSACRegressor(\n estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0\n )\n # GH #19390\n ransac_estimator8 = RANSACRegressor(\n Ridge(), min_samples=None, residual_threshold=5, random_state=0\n )\n\n ransac_estimator1.fit(X, y)\n ransac_estimator2.fit(X, y)\n ransac_estimator5.fit(X, y)\n ransac_estimator6.fit(X, y)\n\n assert_array_almost_equal(\n ransac_estimator1.predict(X), ransac_estimator2.predict(X)\n )\n assert_array_almost_equal(\n ransac_estimator1.predict(X), ransac_estimator5.predict(X)\n )\n assert_array_almost_equal(\n ransac_estimator1.predict(X), ransac_estimator6.predict(X)\n )\n\n with pytest.raises(ValueError):\n ransac_estimator7.fit(X, y)\n\n err_msg = \"`min_samples` needs to be explicitly set\"\n with pytest.raises(ValueError, match=err_msg):\n ransac_estimator8.fit(X, y)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 262, "n_words": 94, "vocab_size": 54, "complexity": 1, "nloc": 39, "token_counts": 251, "n_ast_nodes": 377, "n_identifiers": 25, "random_cut": "def test_ransac_min_n_samples():\n estimator = LinearRegression()\n ransac_estimator1 = RANSACRegressor(\n estimator, min_samples=2, residual_threshold=5, random_state=0\n )\n ransac_estimator2 = RANSACRegressor(\n estimator,\n min_samples=2.0 / X.shape[0],\n residual_threshold=5,\n random_state=0,\n )\n ransac_estimator5 = RANSACRegressor(\n estimator, min_samples=2, residual_threshold=5, random_state=0\n )\n ransac_estimator6 = RANSACRegressor(estimator, residual_threshold=5, random_state=0)\n ransac_estimator7 = RANSACRegressor(\n estimator, min_samples=X.shape[0] +" }, { "id": 193029, "commit_id": "77c8c91cad88a1e48da856ecb7957f4691244e21", "repo": "vision", "path": "test/test_prototype_transforms.py", "file_name": "test_prototype_transforms.py", "fun_name": "test__get_params", "commit_message": "[proto] Ported all transforms to the new API (#6305)\n\n* [proto] Added few transforms tests, part 1 (#6262)\r\n\r\n* Added supported/unsupported data checks in the tests for cutmix/mixup\r\n\r\n* Added RandomRotation, RandomAffine transforms tests\r\n\r\n* Added tests for RandomZoomOut, Pad\r\n\r\n* Update test_prototype_transforms.py\r\n\r\n* Added RandomCrop transform and tests (#6271)\r\n\r\n* [proto] Added GaussianBlur transform and tests (#6273)\r\n\r\n* Added GaussianBlur transform and tests\r\n\r\n* Fixing code format\r\n\r\n* Copied correctness test\r\n\r\n* [proto] Added random color transforms and tests (#6275)\r\n\r\n* Added random color transforms and tests\r\n\r\n* Disable smoke test for RandomSolarize, RandomAdjustSharpness\r\n\r\n* Added RandomPerspective and tests (#6284)\r\n\r\n- replaced real image creation by mocks for other tests\r\n\r\n* Added more functional tests (#6285)\r\n\r\n* [proto] Added elastic transform and tests (#6295)\r\n\r\n* WIP [proto] Added functional elastic transform with tests\r\n\r\n* Added more functional tests\r\n\r\n* WIP on elastic op\r\n\r\n* Added elastic transform and tests\r\n\r\n* Added tests\r\n\r\n* Added tests for ElasticTransform\r\n\r\n* Try to format code as in https://github.com/pytorch/vision/pull/5106\r\n\r\n* Fixed bug in affine get_params test\r\n\r\n* Implemented RandomErase on PIL input as fallback to tensors (#6309)\r\n\r\nAdded tests\r\n\r\n* Added image_size computation for BoundingBox.rotate if expand (#6319)\r\n\r\n* Added image_size computation for BoundingBox.rotate if expand\r\n\r\n* Added tests\r\n\r\n* Added erase_image_pil and eager/jit erase_image_tensor test (#6320)\r\n\r\n* Updates according to the review\r\n\r\nCo-authored-by: Vasilis Vryniotis ", "code": "def test__get_params(self, sigma):\n transform = transforms.GaussianBlur(3, sigma=sigma)\n params = transform._get_params(None)\n\n if isinstance(sigma, float):\n assert params[\"sigma\"][0] == params[\"sigma\"][1] == 10\n else:\n assert sigma[0] <= params[\"sigma\"][0] <= sigma[1]\n assert sigma[0] <= params[\"sigma\"][1] <= sigma[1]\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 92, "n_words": 32, "vocab_size": 21, "complexity": 2, "nloc": 8, "token_counts": 91, "n_ast_nodes": 138, "n_identifiers": 10, "random_cut": "def test__get_params(self, sigma):\n transform " }, { "id": 53023, "commit_id": "247f31c5e8f25511bec7308aa8081ff1e93ed4f2", "repo": "prefect", "path": "tests/utilities/test_logging.py", "file_name": "test_logging.py", "fun_name": "test_start_is_idempotent", "commit_message": "Use a worker fixture to ensure shutdown", "code": "def test_start_is_idempotent(self, worker):\n worker._send_thread = MagicMock()\n worker.start()\n worker.start()\n worker._send_thread.start.assert_called_once()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 36, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 33, "n_ast_nodes": 56, "n_identifiers": 7, "random_cut": "def test_start_is_idempotent(self, worker):\n work" }, { "id": 155068, "commit_id": "6f0ff798a23126a951b509ad3fea07e3d8c6b3db", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/test/test_dataframe.py", "file_name": "test_dataframe.py", "fun_name": "test_reset_index_multicolumns", "commit_message": "FIX-#4023: Fall back to pandas in case of MultiIndex columns (#5149)\n\nSigned-off-by: Andrey Pavlenko ", "code": "def test_reset_index_multicolumns(self, is_multiindex):\n index = (\n pandas.MultiIndex.from_tuples(\n [(i, j, k) for i in range(2) for j in range(3) for k in range(4)],\n names=[\"l1\", \"l2\", \"l3\"],\n )\n if is_multiindex\n else pandas.Index(np.arange(1, len(self.data[\"a\"]) + 1), name=\"index\")\n )\n data = np.array(list(self.data.values())).T\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 132, "n_words": 38, "vocab_size": 32, "complexity": 5, "nloc": 16, "token_counts": 129, "n_ast_nodes": 167, "n_identifiers": 22, "random_cut": "def test_reset_index_multicolumns(self, is_multiindex):\n index = (\n pandas.MultiIndex.from_tuples(\n [(i, j, k) for i in range(2) for j in range(3) for k in range(4)],\n names=[\"l1\"," }, { "id": 210250, "commit_id": "6e1fa92d3b5f8428c2e022669694fca6da9c9459", "repo": "PaddleDetection", "path": "docs/advanced_tutorials/openvino_inference/fairmot_onnx_openvino.py", "file_name": "fairmot_onnx_openvino.py", "fun_name": "prepare_input", "commit_message": "[Tutorial] Added a tutorial for OpenVINO with a demonstration for FairMOT model (#5368)\n\n* added OpenVINO fairmort tutorial\r\n\r\n* added Chinese version Readme.\r\n\r\n* removed unused code", "code": "def prepare_input():\n transforms = [\n T.Resize(size=(target_height, target_width)), \n T.Normalize(mean=(0,0,0), std=(1,1,1), data_format='HWC', to_rgb= True),\n T.Transpose()\n ]\n\n img_file = root_path / \"street.jpeg\"\n img = cv2.imread(str(img_file))\n normalized_img = T.Compose(transforms)(img)\n normalized_img = normalized_img.astype(np.float32, copy=False) / 255.0\n\n # add an new axis in front\n img_input = normalized_img[np.newaxis, :]\n # scale_factor is calculated as: im_shape / original_im_shape\n h_scale = target_height / img.shape[0]\n w_scale = target_width / img.shape[1]\n input = {\"image\": img_input, \"im_shape\": [target_height, target_width], \"scale_factor\": [h_scale, w_scale]}\n return input, img\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 133, "n_words": 73, "vocab_size": 58, "complexity": 1, "nloc": 15, "token_counts": 161, "n_ast_nodes": 245, "n_identifiers": 31, "random_cut": "def prepare_input():\n transforms = [\n T.Resize(size=(target_height, target_width)), \n T.Normalize(mean=(0,0,0), std=(1,1,1), da" }, { "id": 286257, "commit_id": "15e6e4ad85dbfc184db08f8beaf44999bea7f11e", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/stocks_controller.py", "file_name": "stocks_controller.py", "fun_name": "call_qa", "commit_message": "Improve UI/UX styling regarding to spacing (#3073)\n\n* fix styling UI/UX\r\n\r\n* black formatting\r\n\r\n* fix tests\r\n\r\n* Update CONTRIBUTING.md\r\n\r\n* asterisk to hyphen\r\n\r\n* Update CONTRIBUTING.md\r\n\r\nCo-authored-by: James Maslek \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: Henrique Joaquim ", "code": "def call_qa(self, _):\n \n if self.ticker:\n from openbb_terminal.stocks.quantitative_analysis import qa_controller\n\n self.queue = self.load_class(\n qa_controller.QaController,\n self.ticker,\n self.start,\n self.interval,\n self.stock,\n self.queue,\n )\n else:\n console.print(\"Use 'load ' prior to this command!\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 182, "n_words": 27, "vocab_size": 27, "complexity": 2, "nloc": 13, "token_counts": 62, "n_ast_nodes": 95, "n_identifiers": 16, "random_cut": "def call_qa(self, _):\n \n if self.ticker:\n from openbb_terminal.stocks.quantitative_analysis import qa_controller\n\n self.queue = self.load_class(\n qa_controller.QaController,\n self.ticker,\n self.start,\n self.interval,\n self.stock,\n self.queue,\n )\n else:\n " }, { "id": 199500, "commit_id": "2021776197a252d17b8e8f57810bd12dae4d8a16", "repo": "sympy", "path": "sympy/concrete/products.py", "file_name": "products.py", "fun_name": "_eval_simplify", "commit_message": "Simplify function of Sum and Product on simplify", "code": "def _eval_simplify(self, **kwargs):\n from sympy.simplify.simplify import product_simplify\n rv = product_simplify(self, **kwargs)\n return rv.doit() if kwargs['doit'] else rv\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 4, "token_counts": 38, "n_ast_nodes": 59, "n_identifiers": 8, "random_cut": "def _eval_simplify(self, **kwargs):\n from sympy.simplify.simplify import product_simplify\n rv = product_simplify(self, **kwargs)\n return rv." }, { "id": 154031, "commit_id": "6ce9cf4daec7f9996038205289bce2186be87611", "repo": "modin", "path": "modin/_compat/pandas_api/py36/base.py", "file_name": "base.py", "fun_name": "expanding", "commit_message": "FEAT-#4147: Add partial compatibility with Python 3.6 and pandas 1.1 (#4301)\n\nSigned-off-by: Devin Petersohn \r\nSigned-off-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Rehan Durrani \r\nCo-authored-by: Igoshev, Yaroslav \r\nCo-authored-by: Myachev, Anatoly ", "code": "def expanding(self, min_periods=1, center=None, axis=0):\n return self._expanding(min_periods=min_periods, center=center, axis=axis)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 34, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "def expanding(self, min_periods=1, center=None, axis=0):\n ret" }, { "id": 98945, "commit_id": "fdc0fb39fd9bebffb5923c79d52e39b882babd0c", "repo": "sentry", "path": "tests/sentry/discover/test_arithmetic.py", "file_name": "test_arithmetic.py", "fun_name": "test_function_values", "commit_message": "chore(discover): remove use_snql (#33917)\n\n- This removes the parameter that allows the use of snql in discover\r\n- adds a todo to update factes_perf which calls it directly, need to\r\n convert this to snql", "code": "def test_function_values(lhs, op, rhs):\n for with_brackets in [False, True]:\n equation = f\"{lhs}{op}{rhs}\"\n if with_brackets:\n equation = f\"({equation}) + 5\"\n result, fields, functions = parse_arithmetic(equation)\n if with_brackets:\n assert result.operator == \"plus\"\n assert isinstance(result.lhs, Operation)\n assert result.rhs == 5.0\n result = result.lhs\n assert result.operator == op_map[op.strip()], equation\n assert result.lhs == lhs, equation\n assert result.rhs == rhs, equation\n assert len(fields) == 0\n if isinstance(lhs, str):\n assert lhs in functions, equation\n if isinstance(rhs, str):\n assert rhs in functions, equation\n\n\n@pytest.mark.parametrize(\n \"equation\",\n [\n \"1 +\",\n \"+ 1 + 1\",\n \"1 + 1 +\",\n \"1 ** 2\",\n \"1 -- 1\",\n \"hello world\",\n \"\",\n \"+\",\n ],\n)", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"equation\",\n [\n \"1 +\",\n \"+ 1 + 1\",\n \"1 + 1 +\",\n \"1 ** 2\",\n \"1 -- 1\",\n \"hello world\",\n \"\",\n \"+\",\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 312, "n_words": 100, "vocab_size": 59, "complexity": 6, "nloc": 19, "token_counts": 136, "n_ast_nodes": 270, "n_identifiers": 20, "random_cut": "def test_function_values(lhs, op, rhs):\n for with_brackets in [False, True]:\n equation = f\"{lhs}{op}{rhs}\"\n if with_brackets:\n equation = f\"({equation}) + 5\"\n result, fields, functions = parse_arithmetic(equation)\n if with_brackets:\n assert result.operator == \"plus\"\n assert isinstance(result.lhs, Operation)\n assert result.rhs == 5.0\n result = result.lhs\n assert result.operator == op_map[op.strip()], equation\n assert result.lhs == lhs, equation\n assert result.rhs == rhs, equation\n assert len(fields) == 0\n if isinstance(lhs, str):\n assert lhs in functions, equation" }, { "id": 19190, "commit_id": "ee9532aba858a140d26254a7799469137340fd9b", "repo": "mlflow", "path": "examples/lightgbm/lightgbm_sklearn/utils.py", "file_name": "utils.py", "fun_name": "fetch_logged_data", "commit_message": "Autologging functionality for scikit-learn integration with LightGBM (Part 2) (#5200)\n\n* init commit, to-do: examples\r\n\r\nSigned-off-by: Junwen Yao \r\n\r\n* add examples, update doc\r\n\r\nSigned-off-by: Junwen Yao \r\n\r\n* re-start example test\r\n\r\nSigned-off-by: Junwen Yao \r\n\r\n* update\r\n\r\nSigned-off-by: Junwen Yao \r\n\r\n* check sagemaker\r\n\r\nSigned-off-by: Junwen Yao \r\n\r\n* [resolve conflict] update\r\n\r\nSigned-off-by: Junwen Yao ", "code": "def fetch_logged_data(run_id):\n \n client = mlflow.tracking.MlflowClient()\n data = client.get_run(run_id).data\n # Exclude system tags: https://www.mlflow.org/docs/latest/tracking.html#system-tags\n tags = {k: v for k, v in data.tags.items() if not k.startswith(\"mlflow.\")}\n artifacts = list(yield_artifacts(run_id))\n return {\n \"params\": data.params,\n \"metrics\": data.metrics,\n \"tags\": tags,\n \"artifacts\": artifacts,\n }\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 39, "vocab_size": 35, "complexity": 3, "nloc": 11, "token_counts": 84, "n_ast_nodes": 143, "n_identifiers": 18, "random_cut": "def fetch_logged_data(run_id):\n \n client = mlflow.tracking.MlflowClient()\n data = client.get_run(run_id).data\n # Exclude system tags: https://www.mlflow.org/docs/latest/tracking.html#system-tags\n tags = {k: v for " }, { "id": 72287, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_workflows.py", "file_name": "test_workflows.py", "fun_name": "test_status_after_cancel", "commit_message": "Reformat with black", "code": "def test_status_after_cancel(self):\n # start workflow, then cancel\n self.submit()\n self.submit(\"action-cancel-workflow\")\n response = self.client.get(self.edit_url)\n self.assertContains(response, \"Draft saved\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 49, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 36, "n_ast_nodes": 64, "n_identifiers": 8, "random_cut": "def test_status_after_cancel(self):\n # start workflow, then cancel\n self.submit" }, { "id": 149302, "commit_id": "bcf326a035f4efe144de2a6f21544d6702c0eeb8", "repo": "freqtrade", "path": "tests/test_configuration.py", "file_name": "test_configuration.py", "fun_name": "test_validate_ask_orderbook", "commit_message": "Initial steps to change bid/ask pricing to enter/exit", "code": "def test_validate_ask_orderbook(default_conf, caplog) -> None:\n conf = deepcopy(default_conf)\n conf['exit_pricing']['use_order_book'] = True\n conf['exit_pricing']['order_book_min'] = 2\n conf['exit_pricing']['order_book_max'] = 2\n\n validate_config_consistency(conf)\n assert log_has_re(r\"DEPRECATED: Please use `order_book_top` instead of.*\", caplog)\n assert conf['exit_pricing']['order_book_top'] == 2\n\n conf['exit_pricing']['order_book_max'] = 5\n\n with pytest.raises(OperationalException,\n match=r\"Using order_book_max != order_book_min in exit_pricing.*\"):\n validate_config_consistency(conf)\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 97, "n_words": 42, "vocab_size": 32, "complexity": 1, "nloc": 12, "token_counts": 90, "n_ast_nodes": 158, "n_identifiers": 11, "random_cut": "def test_validate_ask_orderbook(default_conf, caplog) -> None:\n conf = deepcopy(default_conf)\n conf['exit_pricing']['use_order_book'] = True\n conf['exit_prici" }, { "id": 181233, "commit_id": "e6336d688259494205ff4616ff2c03d5460b36bc", "repo": "gradio", "path": "test/test_blocks.py", "file_name": "test_blocks.py", "fun_name": "test_with_update", "commit_message": "Fix bug with gr.update and interactive=True (#2639)\n\n* Fix update interactivity\r\n\r\n* Lint\r\n\r\n* CHANGELOG\r\n\r\n* Fix\r\n\r\n* Undo interactive=True\r\n\r\n* Do not call update twice\r\n\r\n* Add unit test\r\n\r\n* Revert change\r\n\r\n* Lint", "code": "def test_with_update(self):\n specific_update = gr.Textbox.get_specific_update(\n {\"lines\": 4, \"__type__\": \"update\", \"interactive\": False}\n )\n assert specific_update == {\n \"lines\": 4,\n \"max_lines\": None,\n \"placeholder\": None,\n \"label\": None,\n \"show_label\": None,\n \"visible\": None,\n \"value\": gr.components._Keywords.NO_VALUE,\n \"__type__\": \"update\",\n \"mode\": \"static\",\n }\n\n specific_update = gr.Textbox.get_specific_update(\n {\"lines\": 4, \"__type__\": \"update\", \"interactive\": True}\n )\n assert specific_update == {\n \"lines\": 4,\n \"max_lines\": None,\n \"placeholder\": None,\n \"label\": None,\n \"show_label\": None,\n \"visible\": None,\n \"value\": gr.components._Keywords.NO_VALUE,\n \"__type__\": \"update\",\n \"mode\": \"dynamic\",\n }\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 343, "n_words": 68, "vocab_size": 29, "complexity": 1, "nloc": 29, "token_counts": 143, "n_ast_nodes": 257, "n_identifiers": 9, "random_cut": "def test_with_update(self):\n specific_update = gr.Textbox.get_specific_update(\n {\"lines\": 4, \"__type__\": \"update\", \"interact" }, { "id": 25696, "commit_id": "f5a45de4a22fecacfcd5b2cd18c07e5cf95ce27c", "repo": "saleor", "path": "saleor/csv/tests/export/test_export.py", "file_name": "test_export.py", "fun_name": "test_append_to_file_for_xlsx", "commit_message": "Feature/gift cards post mvp (#7977)\n\n* Add giftCardBulkCreate mutation\r\n\r\n* Extend OrderFilter with giftCardUsed and giftCardBought fields\r\n\r\n* Allow exporting gift cards\r\n\r\n* Update the name of the email template for export\r\n\r\n* Add exportGiftCards muttaion\r\n\r\n* Add used gift card filter\r\n\r\n* Export only unused gift cards\r\n\r\n* Block mutations for expired gift cards (#8115)\r\n\r\n* Block mutations for expired gift cards\r\n\r\n* Block only resending and activating expired gift cards\r\n\r\n* Add celery schedule task for deactivate expired cards (#8100)\r\n\r\n* Add gift card section to invoice (#8148)\r\n\r\n* Add filtering on gift card events (#8090)\r\n\r\n* Add filtering on gift card events\r\n\r\n* Filter gift card events by orders instead of order_id\r\n\r\n* Update populatedb with gift card data (#8016)\r\n\r\n* Generate gift cards with events in populate db\r\n\r\n* Set product types kinds and add placeholder for gift card product\r\n\r\n* Add dedicated gift card product images\r\n\r\n* Change order of order emails (#8168)\r\n\r\n* Drop duplicated kind field from producttype in populatedb (#8224)\r\n\r\n* Change gift card display_code field to last_4 (#8445)\r\n\r\n* Change gift card display_code field to last_4\r\n\r\n* Change last4 to last4CodeChars\r\n\r\n* Fix github test env action configuration\r\n\r\n* Drop filtering gift cards by tag\r\n\r\n* Fix export gift card tags test\r\n\r\n* Re-add gift card tags query (#8412)\r\n\r\n* Update populatedb with gift card data (#8016)\r\n\r\n* Generate gift cards with events in populate db\r\n\r\n* Set product types kinds and add placeholder for gift card product\r\n\r\n* Add dedicated gift card product images\r\n\r\n* Add giftCardTags model\r\n\r\n* Add giftCardTags query\r\n\r\nCo-authored-by: Iga Karbowiak <40886528+IKarbowiak@users.noreply.github.com>\r\nCo-authored-by: IKarbowiak \r\n\r\n* Do not create EXPIRY_DATE_UPDATED gift card event when expiry date is not changed (#8882)\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def test_append_to_file_for_xlsx(user_export_file, tmpdir, media_root):\n # given\n export_data = [\n {\"id\": \"123\", \"name\": \"test1\", \"collections\": \"coll1\"},\n {\"id\": \"345\", \"name\": \"test2\"},\n ]\n expected_headers = [\"id\", \"name\", \"collections\"]\n delimiter = \",\"\n\n table = etl.fromdicts(\n [{\"id\": \"1\", \"name\": \"A\"}], header=expected_headers, missing=\" \"\n )\n\n temp_file = NamedTemporaryFile(suffix=\".xlsx\")\n etl.io.xlsx.toxlsx(table, temp_file.name)\n\n # when\n append_to_file(export_data, expected_headers, temp_file, FileTypes.XLSX, delimiter)\n\n # then\n user_export_file.refresh_from_db()\n\n wb_obj = openpyxl.load_workbook(temp_file)\n\n sheet_obj = wb_obj.active\n max_col = sheet_obj.max_column\n max_row = sheet_obj.max_row\n expected_headers = expected_headers\n headers = [sheet_obj.cell(row=1, column=i).value for i in range(1, max_col + 1)]\n data = []\n for i in range(2, max_row + 1):\n row = []\n for j in range(1, max_col + 1):\n row.append(sheet_obj.cell(row=i, column=j).value)\n data.append(row)\n\n assert headers == expected_headers\n assert list(export_data[0].values()) in data\n row2 = list(export_data[1].values())\n # add string with space for collections column\n row2.append(\" \")\n assert row2 in data\n\n temp_file.close()\n shutil.rmtree(tmpdir)\n\n\n@patch(\"saleor.csv.utils.export.BATCH_SIZE\", 1)", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@patch(\"saleor.csv.utils.export.BATCH_SIZE\", 1)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 270, "n_words": 132, "vocab_size": 88, "complexity": 4, "nloc": 33, "token_counts": 280, "n_ast_nodes": 480, "n_identifiers": 48, "random_cut": "def test_append_to_file_for_xlsx(user_export_file, tmpdir, media_root):\n # given\n export_data = [\n {\"id\": \"123\", \"name\": \"test1\", \"collections\": \"coll1\"},\n {\"id\": \"345\", \"name\": \"test2\"},\n ]\n expected_headers = [\"id\", \"name\", \"collections\"]\n delimiter = \",\"\n\n table = etl.fromdicts(\n [{\"id\": \"1\", \"name\": \"A\"}], header=expected_headers, missing=\" \"\n )\n\n temp_file = NamedTemporaryFile(suffix=\".xlsx\")\n etl.io.xlsx.toxlsx(table, temp_file.name)\n\n # when\n append_to_file(export_data, expected_headers, temp_file, FileTypes.XLSX, delimiter)\n\n # then\n user_export_file.refresh_from_db()\n\n wb_obj = openpyxl.load_workbook(temp_file)\n\n sheet_obj = wb_obj.active\n max_col = sheet_obj.max_column\n max_row = sheet_obj.max_row\n expected_headers = expected_headers\n headers = [sheet_obj.cell(row=1, column=i).value for i in range(1, max_col + 1)]\n data = []\n for i in range(2, max_row + 1):\n row = []\n for j in range(1, max_col + 1):\n row.append(sheet_obj.cell(row=i, column=j).value)\n data.append(" }, { "id": 192979, "commit_id": "6ca9c76adb6daf2695d603ad623a9cf1c4f4806f", "repo": "vision", "path": "references/optical_flow/utils.py", "file_name": "utils.py", "fun_name": "sequence_loss", "commit_message": "Upgrade usort to `1.0.2` and black to 22.3.0 (#5106)\n\n* upgrade usort to\r\n\r\n* Also update black\r\n\r\n* Actually use 1.0.2\r\n\r\n* Apply pre-commit\r\n\r\nCo-authored-by: Nicolas Hug ", "code": "def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400):\n \n\n if gamma > 1:\n raise ValueError(f\"Gamma should be < 1, got {gamma}.\")\n\n # exlude invalid pixels and extremely large diplacements\n flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()\n valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)\n\n valid_flow_mask = valid_flow_mask[:, None, :, :]\n\n flow_preds = torch.stack(flow_preds) # shape = (num_flow_updates, batch_size, 2, H, W)\n\n abs_diff = (flow_preds - flow_gt).abs()\n abs_diff = (abs_diff * valid_flow_mask).mean(axis=(1, 2, 3, 4))\n\n num_predictions = flow_preds.shape[0]\n weights = gamma ** torch.arange(num_predictions - 1, -1, -1).to(flow_gt.device)\n flow_loss = (abs_diff * weights).sum()\n\n return flow_loss\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 133, "n_words": 86, "vocab_size": 65, "complexity": 2, "nloc": 13, "token_counts": 157, "n_ast_nodes": 244, "n_identifiers": 24, "random_cut": "def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400):\n \n\n if gamma > 1:\n raise ValueError(f\"Gamma should be < 1, " }, { "id": 103955, "commit_id": "b91eaa3b2a48bc7b34d60e3a429170835cf2a882", "repo": "kitty", "path": "publish.py", "file_name": "publish.py", "fun_name": "__call__", "commit_message": "Cleanup the asset upload code", "code": "def __call__(self) -> None:\n # See https://docs.github.com/en/rest/releases/assets#upload-a-release-asset\n # self.clean_older_releases(releases)\n release = self.create_release()\n upload_url = release['upload_url'].partition('{')[0]\n asset_url = f'{self.url_base}/assets/{{}}'\n existing_assets = self.existing_assets(release['id'])\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 62, "n_words": 21, "vocab_size": 17, "complexity": 11, "nloc": 39, "token_counts": 257, "n_ast_nodes": 86, "n_identifiers": 9, "random_cut": "def __call__(self) -> None:\n # See ht" }, { "id": 211023, "commit_id": "c84153a355d9855fe55cf51d203b8b24e7d884e5", "repo": "PaddleDetection", "path": "deploy/pptracking/python/mot/matching/ocsort_matching.py", "file_name": "ocsort_matching.py", "fun_name": "iou_batch", "commit_message": "[MOT] Add OC_SORT tracker (#6272)\n\n* add ocsort tracker\r\n\r\n* add ocsort deploy\r\n\r\n* merge develop\r\n\r\n* fix ocsort tracker codes\r\n\r\n* fix doc, test=document_fix\r\n\r\n* fix doc, test=document_fix", "code": "def iou_batch(bboxes1, bboxes2):\n \n bboxes2 = np.expand_dims(bboxes2, 0)\n bboxes1 = np.expand_dims(bboxes1, 1)\n\n xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])\n yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])\n xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])\n yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])\n w = np.maximum(0., xx2 - xx1)\n h = np.maximum(0., yy2 - yy1)\n wh = w * h\n o = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) *\n (bboxes1[..., 3] - bboxes1[..., 1]) +\n (bboxes2[..., 2] - bboxes2[..., 0]) *\n (bboxes2[..., 3] - bboxes2[..., 1]) - wh)\n return (o)\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 158, "n_words": 83, "vocab_size": 45, "complexity": 1, "nloc": 15, "token_counts": 214, "n_ast_nodes": 298, "n_identifiers": 15, "random_cut": "def iou_batch(bboxes1, bboxes2):\n \n bboxes2 = np.expand_dims(bboxes2, 0)\n bboxes1 = np.expand_dims(bboxes1, 1)\n\n xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])\n yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])\n xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])\n yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])\n w = np.maximum(0., x" }, { "id": 320874, "commit_id": "8ed08eb213f6218f779f5e79884aba6f3a594397", "repo": "qutebrowser", "path": "qutebrowser/browser/commands.py", "file_name": "commands.py", "fun_name": "search_next", "commit_message": "search: Move wrap argument to next/prev_result\n\nThe fact that we need to specify this while searching rather than when\n\"zapping\" through the results make no sense. It makes both the API as\nwell as our own code more complex.", "code": "def search_next(self, count=1):\n \n tab = self._current_widget()\n window_text = self._tabbed_browser.search_text\n window_options = self._tabbed_browser.search_options\n\n if window_text is None:\n raise cmdutils.CommandError(\"No search done yet.\")\n\n tab.scroller.before_jump_requested.emit()\n\n if window_text is not None and window_text != tab.search.text:\n tab.search.clear()\n tab.search.search(window_text, **window_options)\n count -= 1\n\n if count == 0:\n return\n\n cb = functools.partial(self._search_cb, tab=tab,\n old_match=tab.search.match,\n options=window_options, text=window_text,\n prev=False)\n wrap = config.val.search.wrap\n\n for _ in range(count - 1):\n tab.search.next_result(wrap=wrap)\n tab.search.next_result(result_cb=cb, wrap=wrap)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 302, "n_words": 62, "vocab_size": 51, "complexity": 6, "nloc": 21, "token_counts": 170, "n_ast_nodes": 266, "n_identifiers": 33, "random_cut": "def search_next(self, count=1):\n \n tab = self._current_widget()\n window_text = self." }, { "id": 259158, "commit_id": "1c094728a33f05bb6c83d7b856b87254964e0e8c", "repo": "scikit-learn", "path": "sklearn/linear_model/_base.py", "file_name": "_base.py", "fun_name": "fit", "commit_message": "CLN clean _preprocess_data in linear_model (#22762)", "code": "def fit(self, X, y, sample_weight=None):\n \n\n _normalize = _deprecate_normalize(\n self.normalize, default=False, estimator_name=self.__class__.__name__\n )\n\n n_jobs_ = self.n_jobs\n\n accept_sparse = False if self.positive else [\"csr\", \"csc\", \"coo\"]\n\n X, y = self._validate_data(\n X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True\n )\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(\n sample_weight, X, dtype=X.dtype, only_non_negative=True\n )\n\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X,\n y,\n fit_intercept=self.fit_intercept,\n normalize=_normalize,\n copy=self.copy_X,\n sample_weight=sample_weight,\n return_mean=True,\n )\n\n if sample_weight is not None:\n # Sample weight can be implemented via a simple rescaling.\n X, y = _rescale_data(X, y, sample_weight)\n\n if self.positive:\n if y.ndim < 2:\n self.coef_ = optimize.nnls(X, y)[0]\n else:\n # scipy.optimize.nnls cannot handle y with shape (M, K)\n outs = Parallel(n_jobs=n_jobs_)(\n delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1])\n )\n self.coef_ = np.vstack([out[0] for out in outs])\n elif sp.issparse(X):\n X_offset_scale = X_offset / X_scale\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 504, "n_words": 128, "vocab_size": 88, "complexity": 13, "nloc": 54, "token_counts": 431, "n_ast_nodes": 384, "n_identifiers": 47, "random_cut": "def fit(self, X, y, sample_weight=None):\n \n\n _normalize = _deprecate_normalize(\n self.normalize, default=False, estimator_name=self.__class__.__name__\n )\n\n n_jobs_ = self.n_jobs\n\n accept_sparse = False if self.positive else [\"csr\", \"csc\", \"coo\"]\n\n X, y = self._validate_data(\n X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True\n )\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(\n sample_weight, X, dtype=X.dtype, only_non_negative=True\n )\n\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X,\n y,\n fit_intercept=self.fit_intercept,\n norm" }, { "id": 289840, "commit_id": "115a1ceea0c568ffb466e03b4ab322f253ccf151", "repo": "core", "path": "homeassistant/components/unifi/switch.py", "file_name": "switch.py", "fun_name": "options_updated", "commit_message": "Rewrite UniFi block client switch (#80969)\n\n* Refactor UniFi block client switch entities\r\n\r\n* Use new switch loader\r\n\r\n* Rename lambdas\r\n\r\n* Use is_on rather than _attr_is_on when applicable", "code": "async def options_updated(self) -> None:\n \n if self._obj_id not in self.controller.option_block_clients:\n await self.remove_item({self._obj_id})\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 37, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 31, "n_ast_nodes": 53, "n_identifiers": 6, "random_cut": "async def options_updated(self) -> None:\n \n" }, { "id": 42341, "commit_id": "85c8ed632a5e9ec45b7d69d89518bb784a6e05a5", "repo": "seaborn", "path": "tests/test_categorical.py", "file_name": "test_categorical.py", "fun_name": "test_on_facetgrid", "commit_message": "Add label parameter to pointplot (#3016)\n\n* Add label parameter to pointplot\r\n\r\nUnbreaks FacetGrid + pointplot (fixes #3004) and is generally useful.\r\n\r\n* Update default kws in pointplot tests\r\n\r\n* Update release notes", "code": "def test_on_facetgrid(self, long_df):\n\n g = FacetGrid(long_df, hue=\"a\")\n g.map(pointplot, \"a\", \"y\")\n g.add_legend()\n\n order = categorical_order(long_df[\"a\"])\n legend_texts = [t.get_text() for t in g.legend.texts]\n assert legend_texts == order\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 66, "n_words": 25, "vocab_size": 21, "complexity": 2, "nloc": 7, "token_counts": 62, "n_ast_nodes": 103, "n_identifiers": 16, "random_cut": "def test_on_facetgrid(self, long_df):\n\n g = FacetGrid(long_df, hue=\"a\")\n g.map(pointplot, \"a\", \"y\")\n g.add_legend()\n\n order = categorical_order(long_df[\"a\"])\n legend_texts = [t.get_text() for t in g.legend.texts]\n assert legend_texts == order\n\n" }, { "id": 252009, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/test_http.py", "file_name": "test_http.py", "fun_name": "test_items", "commit_message": "make it black!", "code": "def test_items(self):\n headers = Headers(\n [\n (b\"Set-Cookie\", b\"foo\"),\n (b\"Set-Cookie\", b\"bar\"),\n (b\"Accept\", b\"text/plain\"),\n ]\n )\n assert list(headers.items()) == [\n (\"Set-Cookie\", \"foo, bar\"),\n (\"Accept\", \"text/plain\"),\n ]\n assert list(headers.items(multi=True)) == [\n (\"Set-Cookie\", \"foo\"),\n (\"Set-Cookie\", \"bar\"),\n (\"Accept\", \"text/plain\"),\n ]\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 198, "n_words": 35, "vocab_size": 24, "complexity": 1, "nloc": 17, "token_counts": 93, "n_ast_nodes": 153, "n_identifiers": 7, "random_cut": "def test_items(self):\n headers = Headers(\n [\n (b\"Set-Cookie\", b\"foo\"),\n (b\"Set-Cookie\", b\"bar\"),\n (b\"Accept\", b\"text/plain\"),\n ]\n )\n assert list(headers.items()) == [\n (\"Set-Cookie\", \"foo, bar\"),\n " }, { "id": 317065, "commit_id": "5930f056a8245d8f27a7d54cb2c126a64eb13d98", "repo": "core", "path": "tests/components/mqtt/test_init.py", "file_name": "test_init.py", "fun_name": "test_publish_or_subscribe_without_valid_config_entry", "commit_message": "Mqtt support config_entry unload (#70149)\n\n* squashed commits for rebase\r\n\r\n* Flake\r\n\r\n* Fix reloading issue manual legacy items\r\n\r\n* Improve ACS sync for unsubscribe at disconnect\r\n\r\n* Processed review comments\r\n\r\n* Update homeassistant/components/mqtt/client.py\r\n\r\nCo-authored-by: Erik Montnemery \r\n\r\n* No need to await entry setup\r\n\r\n* Remove complication is_connected\r\n\r\n* Update homeassistant/components/mqtt/__init__.py\r\n\r\nCo-authored-by: Erik Montnemery ", "code": "async def test_publish_or_subscribe_without_valid_config_entry(hass, caplog):\n \n with pytest.raises(HomeAssistantError):\n await mqtt.async_publish(\n hass, \"some-topic\", \"test-payload\", qos=0, retain=False, encoding=None\n )\n with pytest.raises(HomeAssistantError):\n await mqtt.async_subscribe(hass, \"some-topic\", lambda: None, qos=0)\n\n\n@patch(\"homeassistant.components.mqtt.PLATFORMS\", [Platform.LIGHT])", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@patch(\"homeassistant.components.mqtt.PLATFORMS\", [Platform.LIGHT])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 65, "n_words": 25, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 64, "n_ast_nodes": 126, "n_identifiers": 15, "random_cut": "async def test_publish_or_subscribe_without_valid_config_entry(hass, caplog):\n \n with py" }, { "id": 58375, "commit_id": "8a4560e237b90a7b64c6bb77b6cb3ee9a6648e33", "repo": "prefect", "path": "tests/test_agent.py", "file_name": "test_agent.py", "fun_name": "test_agent_runs_multiple_work_queues", "commit_message": "Agents support multiple queues", "code": "async def test_agent_runs_multiple_work_queues(orion_client, session, flow):\n # create two deployments\n deployment_a = await models.deployments.create_deployment(\n session=session,\n deployment=schemas.core.Deployment(\n name=\"deployment-a\",\n flow_id=flow.id,\n work_queue_name=\"a\",\n ),\n )\n deployment_b = await models.deployments.create_deployment(\n session=session,\n deployment=schemas.core.Deployment(\n name=\"deployment-b\",\n flow_id=flow.id,\n work_queue_name=\"b\",\n ),\n )\n await session.commit()\n\n # create two runs\n flow_run_a = await orion_client.create_flow_run_from_deployment(\n deployment_a.id,\n state=Scheduled(scheduled_time=pendulum.now(\"utc\")),\n )\n flow_run_b = await orion_client.create_flow_run_from_deployment(\n deployment_b.id,\n state=Scheduled(scheduled_time=pendulum.now(\"utc\")),\n )\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 219, "n_words": 51, "vocab_size": 31, "complexity": 1, "nloc": 36, "token_counts": 227, "n_ast_nodes": 229, "n_identifiers": 26, "random_cut": "async def test_agent_runs_multiple_work_queues(orion_client, session, flow):\n # create two deployments\n deployment_a = await models.deployments.create_deployment(\n session=session,\n deployment=schemas.core.Deployment(\n name=\"deployment-a\",\n flow_id=flow.id,\n work_queue_name=\"a\",\n ),\n )\n deployment_b = await models.deployments.create_deployment(\n session=session,\n deployment=schemas.core.Deployment(\n name=\"deployment-b\",\n flow_id=flow.id,\n " }, { "id": 86475, "commit_id": "5399ff90c187057135dcf409a3367957325f77ed", "repo": "sentry", "path": "src/sentry/ingest/billing_metrics_consumer.py", "file_name": "billing_metrics_consumer.py", "fun_name": "_bulk_commit", "commit_message": "feat(metrics): Add metrics consumer for billing outcomes (#39236)\n\nAdd a consumer that reads from `snuba-metrics` (the topic the metrics\r\nindexer produces to), and calls `track_outcome` for processed\r\ntransactions.\r\n\r\n`d:transactions/duration@millisecond` is the one metric that we\r\nunconditionally extract for every transaction, so the number of elements\r\nin the bucket value corresponds to the number of transactions that\r\ncontributed to the bucket of this metric. For each bucket, report this\r\nlength as the `quantity` of the tracked outcome.\r\n\r\nFuture work:\r\n\r\nReplace the usage of `track_outcome` with a streaming consumer that\r\nawaits delivery of outcomes before committing offsets.\r\n\r\nCo-authored-by: Joris Bayer ", "code": "def _bulk_commit(self) -> None:\n self.__commit(self.__ready_to_commit)\n self.__ready_to_commit = {}\n self.__messages_since_last_commit = 0\n self.__last_commit = datetime.now()\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 41, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 35, "n_ast_nodes": 58, "n_identifiers": 8, "random_cut": "def _bulk_commit(self) -> None:\n self.__commit(self.__ready_to_commit)\n self.__ready_to_commit = {}\n self.__messages_since_last_commit = 0\n self.__last_commit =" }, { "id": 48067, "commit_id": "6178491a117924155963586b246d2bf54be5320f", "repo": "airflow", "path": "tests/api_connexion/endpoints/test_dag_run_endpoint.py", "file_name": "test_dag_run_endpoint.py", "fun_name": "test_should_respond_200", "commit_message": "Add fields to dagrun endpoint (#23440)\n\n* Add below fields to dagrun endpoint :\r\n\r\n* data_interval_start\r\n* data_interval_end\r\n* last_scheduling_decision\r\n* run_type\r\n\r\n* Refactor hardcoded dates with constants.", "code": "def test_should_respond_200(self, session):\n dagrun_model = DagRun(\n dag_id=\"TEST_DAG_ID\",\n run_id=\"TEST_DAG_RUN_ID\",\n run_type=DagRunType.MANUAL,\n execution_date=timezone.parse(self.default_time),\n start_date=timezone.parse(self.default_time),\n external_trigger=True,\n state='running',\n )\n session.add(dagrun_model)\n session.commit()\n result = session.query(DagRun).all()\n assert len(result) == 1\n response = self.client.get(\n \"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID\", environ_overrides={'REMOTE_USER': \"test\"}\n )\n assert response.status_code == 200\n expected_response = {\n 'dag_id': 'TEST_DAG_ID',\n 'dag_run_id': 'TEST_DAG_RUN_ID',\n 'end_date': None,\n 'state': 'running',\n 'logical_date': self.default_time,\n 'execution_date': self.default_time,\n 'external_trigger': True,\n 'start_date': self.default_time,\n 'conf': {},\n 'data_interval_end': None,\n 'data_interval_start': None,\n 'last_scheduling_decision': None,\n 'run_type': 'manual',\n }\n assert response.json == expected_response\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 382, "n_words": 68, "vocab_size": 54, "complexity": 1, "nloc": 34, "token_counts": 179, "n_ast_nodes": 304, "n_identifiers": 30, "random_cut": "def test_should_respond_200(self, session):\n dagrun_model = DagRun(\n dag_id=\"TEST_DAG_ID\",\n run_id=\"TEST_DAG_RUN_ID\",\n run_type=DagRunType.MANUAL,\n execution_date=timezone.parse(self.default_time),\n start_date=timezone.parse(self.default_time),\n external_trigger=True,\n state='running',\n )\n session.add(dagrun_model)\n session.commit()\n result = session.query(DagRun).all()\n assert len(result) == 1\n response = self.client.get(\n \"api/v1/dags/TEST_DAG_ID/dagRuns/TEST_DAG_RUN_ID\", environ_overrides={'REMOTE_USER': \"test\"}\n )\n assert response.status_code == 200\n expected_response = {\n 'dag_id': 'TEST_DAG_ID',\n 'dag_run_id': 'TEST_DAG_RUN_ID',\n 'end_date': None,\n 'state': 'running',\n 'logical_date': self.default_time,\n 'execution_date': self.default_time,\n 'external_trigger': True,\n 'start_date': self.default_time,\n 'conf': {},\n 'data_interval_" }, { "id": 110052, "commit_id": "16c67c8c4cbb6174853738fa7fe401109861e0c2", "repo": "matplotlib", "path": "lib/matplotlib/quiver.py", "file_name": "quiver.py", "fun_name": "_text_shift", "commit_message": "Small cleanups to QuiverKey.\n\n- Text already calls FontProperties._from_any on anything passed as\n fontproperties; no need to do it ourselves again.\n- Group together x and y shifts for text positioning.", "code": "def _text_shift(self):\n return {\n \"N\": (0, +self.labelsep),\n \"S\": (0, -self.labelsep),\n \"E\": (+self.labelsep, 0),\n \"W\": (-self.labelsep, 0),\n }[self.labelpos]\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 57, "n_ast_nodes": 89, "n_identifiers": 4, "random_cut": "def _text_shift(self):\n " }, { "id": 15279, "commit_id": "514e94a57ecedd4a046032680c3225ea20e8d082", "repo": "ccxt", "path": "python/ccxt/async_support/zb.py", "file_name": "zb.py", "fun_name": "parse_transaction", "commit_message": "1.66.57\n\n[ci skip]", "code": "def parse_transaction(self, transaction, currency=None):\n #\n # withdraw\n #\n # {\n # \"code\": 1000,\n # \"message\": \"success\",\n # \"id\": \"withdrawalId\"\n # }\n #\n # fetchWithdrawals\n #\n # {\n # \"amount\": 0.01,\n # \"fees\": 0.001,\n # \"id\": 2016042556231,\n # \"manageTime\": 1461579340000,\n # \"status\": 3,\n # \"submitTime\": 1461579288000,\n # \"toAddress\": \"14fxEPirL9fyfw1i9EF439Pq6gQ5xijUmp\",\n # }\n #\n # fetchDeposits\n #\n # {\n # \"address\": \"1FKN1DZqCm8HaTujDioRL2Aezdh7Qj7xxx\",\n # \"amount\": \"1.00000000\",\n # \"confirmTimes\": 1,\n # \"currency\": \"BTC\",\n # \"description\": \"Successfully Confirm\",\n # \"hash\": \"7ce842de187c379abafadd64a5fe66c5c61c8a21fb04edff9532234a1dae6xxx\",\n # \"id\": 558,\n # \"itransfer\": 1,\n # \"status\": 2,\n # \"submit_time\": \"2016-12-07 18:51:57\",\n # }\n #\n id = self.safe_string(transaction, 'id')\n txid = self.safe_string(transaction, 'hash')\n amount = self.safe_number(transaction, 'amount')\n timestamp = self.parse8601(self.safe_string(transaction, 'submit_time'))\n timestamp = self.safe_integer(transaction, 'submitTime', timestamp)\n address = self.safe_string_2(transaction, 'toAddress', 'address')\n tag = None\n if address is not None:\n parts = address.split('_')\n address = self.safe_string(parts, 0)\n tag = self.safe_string(parts, 1)\n confirmTimes = self.safe_integer(transaction, 'confirmTimes')\n updated = self.safe_integer(transaction, 'manageTime')\n type = None\n currencyId = self.safe_string(transaction, 'currency')\n code = self.safe_currency_code(currencyId, currency)\n if address is not None:\n type = 'withdrawal' if (confirmTimes is None) else 'deposit'\n status = self.parse_transaction_status(self.safe_string(transaction, 'status'))\n fee = None\n feeCost = self.safe_number(transaction, 'fees')\n if feeCost is not None:\n fee = {\n 'cost': feeCost,\n 'currency': code,\n }\n return {\n 'info': transaction,\n 'id': id,\n 'txid': txid,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'network': None,\n 'addressFrom': None,\n 'address': address,\n 'addressTo': address,\n 'tagFrom': None,\n 'tag': tag,\n 'tagTo': tag,\n 'type': type,\n 'amount': amount,\n 'currency': code,\n 'status': status,\n 'updated': updated,\n 'fee': fee,\n }\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 1101, "n_words": 232, "vocab_size": 132, "complexity": 5, "nloc": 47, "token_counts": 303, "n_ast_nodes": 543, "n_identifiers": 28, "random_cut": "def parse_transaction(self, transaction, currency=None):\n #\n # withdraw\n #\n # {\n # \"code\": 1000,\n # \"message\": \"success\",\n # \"id\": \"withdrawalId\"\n # }\n #\n # fetchWithdrawals\n #\n # {\n # \"amount\": 0.01,\n # \"fees\": 0.001,\n # \"id\": 2016042556231,\n # \"manageTime\": 1461579340000,\n # \"status\": 3,\n # \"submitTime\": 1461579288000,\n # \"toAddress\": \"14fxEPirL9fyfw1i9EF439Pq6gQ5xijUmp\",\n # }\n #\n # fetchDeposits\n #\n # {\n # \"address\": \"1FKN1DZqCm8HaTujDioRL2Aezdh7Qj7xxx\",\n # \"amount\": \"1.00000000\",\n # \"confirmTimes\": 1,\n # \"currency\": \"BTC\",\n # \"description\": \"Successfully Confirm\",\n # \"hash\": \"7ce842de187c379abafadd64a5fe66c5c61c8a21fb04edff9532234a1dae6xxx\",\n # \"id\": 558,\n # \"itransfer\": 1,\n # \"status\": 2,\n # \"submit_time\": \"2016-12-07 18:51:57\",\n # }\n #\n id = self.safe_string(transaction, 'id')\n txid = self.safe_string(transaction, 'hash')\n amount = self.safe_number(transaction, 'amount')\n timestamp = self.parse8601(self.safe_string(transaction, 'submit_time'))\n timestamp = self.safe_integer(transaction, 'submitTime', timestamp)\n address = self.safe_string_2(transaction, 'toAddress', 'address')\n tag = None\n if address is not None:\n parts = address.split('_')\n address = self.safe_string(parts, 0)\n tag = self.safe_string(parts, 1)\n confirmTimes = self.safe_integer(transaction, 'confirmTimes')\n updated = self.safe_integer(transaction, 'manageTime')\n type = None\n currencyId = self.safe_string(transaction, 'currency')\n code = self.safe_currency_code(currencyId, currency)\n if address is not None:\n type = 'withdrawal' if (confirmTimes is None) else 'deposit'\n status = self.parse_transaction_status(self.safe_string(transaction, 'status'))\n fee = None\n feeCost = self.safe_number(transaction, 'fees')\n if feeCost is not None:\n fee = {\n 'cost': feeCost,\n 'currency': code,\n }\n return {\n 'info': transaction,\n 'id': id,\n 'txid': txid,\n 'timestamp': time" }, { "id": 65273, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/item_wise_purchase_register/item_wise_purchase_register.py", "file_name": "item_wise_purchase_register.py", "fun_name": "get_items", "commit_message": "style: format code with black", "code": "def get_items(filters, additional_query_columns):\n\tconditions = get_conditions(filters)\n\n\tif additional_query_columns:\n\t\tadditional_query_columns = \", \" + \", \".join(additional_query_columns)\n\telse:\n\t\tadditional_query_columns = \"\"\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tadditional_query_columns\n\t\t)\n\t\t% (conditions),\n\t\tfilters,\n\t\tas_dict=1,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 15, "n_words": 29, "vocab_size": 23, "complexity": 2, "nloc": 30, "token_counts": 56, "n_ast_nodes": 95, "n_identifiers": 11, "random_cut": "def get_items(filters, additional_query_columns):\n\tconditions = get_conditions(filters)\n\n\tif additional_query_columns:\n\t\tadditional_query_columns = \", \" + \", \".join(additional_query_columns)\n\telse:\n\t\tadditional_query_columns = \"\"\n\n\treturn frappe.d" }, { "id": 281962, "commit_id": "57c0f840ac3948a6abd89f8913c1fb55b9b7da89", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/stocks/fundamental_analysis/test_dcf_view.py", "file_name": "test_dcf_view.py", "fun_name": "test_create_workbook", "commit_message": "Tests : fixing skipped tests (#1252)\n\n* Updating tests : stocks\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : etf\r\n\r\n* Updating tests : stock/options\r\n\r\n* Updating tests : black", "code": "def test_create_workbook(mocker):\n excel = dcf_view.CreateExcelFA(ticker=\"AAPL\", audit=False)\n\n # MOCK GENERATE_PATH\n attrs = {\n \"is_file.return_value\": False,\n }\n mock_path = mocker.Mock(**attrs)\n mocker.patch(\n target=\"gamestonk_terminal.stocks.fundamental_analysis.dcf_view.dcf_model.generate_path\",\n return_value=mock_path,\n )\n\n # MOCK SAVE\n mocker.patch(\n target=\"gamestonk_terminal.stocks.fundamental_analysis.dcf_view.Workbook.save\"\n )\n\n excel.create_workbook()\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 89, "n_words": 29, "vocab_size": 23, "complexity": 1, "nloc": 14, "token_counts": 62, "n_ast_nodes": 108, "n_identifiers": 14, "random_cut": "def test_create_workbook(mocker):\n excel = dcf_view.CreateExcelFA(ticker=\"AAPL\", audit=False)\n\n # MOCK GENERATE_PATH\n attrs = {\n \"is_file.return_value\": False,\n }\n mock_path = mocker.Mock(**attrs)\n mocker.patch(\n target=\"gamestonk_terminal" }, { "id": 48669, "commit_id": "c10f2266222c434485889b08cc1463acdb8fa169", "repo": "django-rest-framework", "path": "rest_framework/parsers.py", "file_name": "parsers.py", "fun_name": "get_filename", "commit_message": "Refactor: Replace try/except with contextlib.suppress() (#8676)", "code": "def get_filename(self, stream, media_type, parser_context):\n \n with contextlib.suppress(KeyError):\n return parser_context['kwargs']['filename']\n\n with contextlib.suppress(AttributeError, KeyError, ValueError):\n meta = parser_context['request'].META\n disposition, params = parse_header_parameters(meta['HTTP_CONTENT_DISPOSITION'])\n if 'filename*' in params:\n return params['filename*']\n return params['filename']\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 119, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 9, "token_counts": 74, "n_ast_nodes": 129, "n_identifiers": 15, "random_cut": "def get_filename(self, stream, media_type, parser_context):\n \n with contextlib.suppress(KeyError):\n return parser_context['kwargs']['filename']\n\n with contextlib.suppress(AttributeError, KeyError, ValueError):\n meta = parser_context['request'].META\n disposition, params = parse_header_paramet" }, { "id": 104747, "commit_id": "1a1d32df517bbf531da1c43a7f421c0dd2beb5d5", "repo": "datasets", "path": "datasets/visual_genome/visual_genome.py", "file_name": "visual_genome.py", "fun_name": "image_metadata_url", "commit_message": "Add Visual Genome (#4161)", "code": "def image_metadata_url(self):\n if not self.version.match(_LATEST_VERSIONS[\"image_metadata\"]):\n logger.warning(\n f\"Latest image metadata version is {_LATEST_VERSIONS['image_metadata']}. Trying to generate a dataset of version: {self.version}. Please double check that image data are unchanged between the two versions.\"\n )\n return f\"{_BASE_ANNOTATION_URL}/image_data.json.zip\"\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 85, "n_words": 35, "vocab_size": 34, "complexity": 2, "nloc": 6, "token_counts": 29, "n_ast_nodes": 71, "n_identifiers": 8, "random_cut": "def image_metadata_url(self):\n if not self.version.match(_LATEST_VERSIONS[\"" }, { "id": 39517, "commit_id": "c5694fd203952d0abd3bf783c49659ff23da2009", "repo": "recommenders", "path": "tests/ci/azureml_tests/submit_groupwise_azureml_pytest.py", "file_name": "submit_groupwise_azureml_pytest.py", "fun_name": "submit_experiment_to_azureml", "commit_message": "Durations, disable warnings and exit -1", "code": "def submit_experiment_to_azureml(test, run_config, experiment, test_group, test_kind):\n\n \n\n project_folder = \".\"\n\n script_run_config = ScriptRunConfig(\n source_directory=project_folder,\n script=test,\n run_config=run_config,\n arguments=[\n \"--testgroup\",\n test_group,\n \"--testkind\",\n test_kind,\n ],\n # docker_runtime_config=dc\n )\n run = experiment.submit(script_run_config)\n # waits only for configuration to complete\n run.wait_for_completion(show_output=True, wait_post_processing=True)\n\n # test logs can also be found on azure\n # go to azure portal to see log in azure ws and look for experiment name\n # and look for individual run\n logger.debug(\"files {}\".format(run.get_file_names))\n\n return run\n\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 193, "n_words": 71, "vocab_size": 54, "complexity": 1, "nloc": 17, "token_counts": 82, "n_ast_nodes": 133, "n_identifiers": 21, "random_cut": "def submit_experiment_to_azureml(test, run_config, experiment, test_group, test_kind):\n\n \n\n project_folder = \".\"\n\n script_run_config = ScriptRunConfig(\n source_directory=project_folder,\n script=test,\n run_config=run_config,\n arguments=[\n \"--testgroup\",\n test_group,\n \"--testkind\",\n test_kind,\n ],\n # docker_runtime_" }, { "id": 127903, "commit_id": "47fc690219691cf1eebcf6079ab10da97a68c377", "repo": "ray", "path": "rllib/examples/custom_keras_model.py", "file_name": "custom_keras_model.py", "fun_name": "check_has_custom_metric", "commit_message": "Revert \"[RLlib] Deprecate legacy callbacks. (#28697)\" (#28748)\n\nThis reverts commit d743919ac9b40f3982ade58b2f5c4a13b1188635.\r\n\r\nSigned-off-by: Amog Kamsetty amogkamsetty@yahoo.com", "code": "def check_has_custom_metric(result):\n r = result[\"result\"][\"info\"][LEARNER_INFO]\n if DEFAULT_POLICY_ID in r:\n r = r[DEFAULT_POLICY_ID].get(LEARNER_STATS_KEY, r[DEFAULT_POLICY_ID])\n assert r[\"model\"][\"foo\"] == 42, result\n\n if args.run == \"DQN\":\n extra_config = {\"num_steps_sampled_before_learning_starts\": 0}\n else:\n extra_config = {}\n\n tuner = tune.Tuner(\n args.run,\n run_config=air.RunConfig(\n stop={\"episode_reward_mean\": args.stop},\n ),\n param_space=dict(\n extra_config,\n **{\n \"env\": \"BreakoutNoFrameskip-v4\"\n if args.use_vision_network\n else \"CartPole-v0\",\n # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.\n \"num_gpus\": int(os.environ.get(\"RLLIB_NUM_GPUS\", \"0\")),\n \"callbacks\": {\n \"on_train_result\": check_has_custom_metric,\n },\n \"model\": {\n \"custom_model\": \"keras_q_model\"\n if args.run == \"DQN\"\n else \"keras_model\"\n },\n \"framework\": \"tf\",\n }\n ),\n )\n tuner.fit()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 448, "n_words": 83, "vocab_size": 67, "complexity": 2, "nloc": 5, "token_counts": 50, "n_ast_nodes": 294, "n_identifiers": 24, "random_cut": "def check_has_custom_metric(result):\n r = result[\"result\"][\"info\"][LEARNER_INFO]\n if DEFAULT_POLICY_ID in r:\n r = r[DEFAULT_POLICY_ID].get(LEARNER_STATS_KEY, r[DEFAULT_POLICY_ID])\n assert r[\"model\"][\"foo\"] == 42, result\n\n if args.run == \"DQN\":\n extra_config = {\"num_steps_sampled_before_learning_starts\": 0}\n else:\n extra_config = {}\n\n tuner = tune.Tuner(\n args.run,\n run_config=air.RunConfig(\n stop={\"episode_reward_mean\": args.stop},\n ),\n param_space=dict(\n extra_config,\n **{\n \"env\": \"BreakoutNoFrameskip-v4\"\n if args.use_vision_network" }, { "id": 81781, "commit_id": "663ef2cc6413c0cdb26392bb046b37fe564fb546", "repo": "awx", "path": "awx/main/tests/functional/models/test_workflow.py", "file_name": "test_workflow.py", "fun_name": "test_set_all_ask_for_prompts_false_from_post", "commit_message": "adding prompt-to-launch field on Labels field in Workflow Templates; with necessary UI and testing changes\n\nCo-authored-by: Keith Grant ", "code": "def test_set_all_ask_for_prompts_false_from_post(self, post, organization, inventory, org_admin):\n \n r = post(\n url=reverse('api:workflow_job_template_list'),\n data=dict(\n name='workflow that tests ask_for prompts',\n organization=organization.id,\n inventory=inventory.id,\n job_tags='',\n skip_tags='',\n ),\n user=org_admin,\n expect=201,\n )\n wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])\n\n assert wfjt.ask_inventory_on_launch is False\n assert wfjt.ask_labels_on_launch is False\n assert wfjt.ask_limit_on_launch is False\n assert wfjt.ask_scm_branch_on_launch is False\n assert wfjt.ask_skip_tags_on_launch is False\n assert wfjt.ask_tags_on_launch is False\n assert wfjt.ask_variables_on_launch is False\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 262, "n_words": 55, "vocab_size": 36, "complexity": 1, "nloc": 21, "token_counts": 123, "n_ast_nodes": 189, "n_identifiers": 28, "random_cut": "def test_set_all_ask_for_prompts_false_from_post(self, post, organization, inventory, org_admin):\n \n r = post(\n url=reverse('api:workflow_job_template_list'),\n data=dict(\n name='workflow that tests ask_for prompts',\n organization=organization.id,\n inventory=inventory.id,\n job_tags='',\n skip_tags='',\n ),\n user=org_admin,\n expect=201,\n )\n wfjt = WorkflowJobTemplate.objects.get(id=r.data['id'])\n\n assert wfjt.ask_inventory_on_launch is False\n assert wfjt.ask_labels_on_launch is False\n assert wfjt.ask_limit_on_launch is False\n assert wfjt.ask_scm_branch_on_launch is False\n assert wfjt.ask_skip_tags_on_launch is False\n assert wfjt.ask_tags_on_launch is False\n assert wfjt.ask_variables_on_launch is False\n" }, { "id": 200193, "commit_id": "0b2567032e59cc862b20cfb7c4a71cd30b3951d3", "repo": "sympy", "path": "sympy/functions/elementary/exponential.py", "file_name": "exponential.py", "fun_name": "_eval_rewrite_as_cos", "commit_message": "Cache log atan-table and use already imported constants", "code": "def _eval_rewrite_as_cos(self, arg, **kwargs):\n from sympy.functions.elementary.trigonometric import cos\n return cos(I*arg) + I*cos(I*arg + pi/2)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 27, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 40, "n_ast_nodes": 59, "n_identifiers": 11, "random_cut": "def _eval_rewrite_as_cos(self, arg, **kwargs):\n from sympy.functions.elementary.trigonometric import cos\n return cos(I*arg" }, { "id": 190877, "commit_id": "d34fd16034e307b545c3e3adfa4d9d472a582cc6", "repo": "thumbor", "path": "tests/detectors/test_face_detector.py", "file_name": "test_face_detector.py", "fun_name": "test_should_run_on_cmyk_images", "commit_message": "Feature/optional opencv (#1400)\n\n* Removed opencv dependency\r\n\r\nNow OpenCV is optional and detectors are smart to\r\nskip if cv2 could not be imported.\r\nAlso refactored face detector a bit to make it more\r\nmaintainable.\r\nNow thumbor can be installed with\r\npip install thumbor\r\npip install thumbor[all]\r\npip install thumbor[opencv]\r\npip install thumbor[tests]", "code": "async def test_should_run_on_cmyk_images(self):\n with open(\n abspath(\n \"./tests/fixtures/images/\"\n \"Giunchedi%2C_Filippo_January_2015_01-cmyk.jpg\"\n ),\n \"rb\",\n ) as fixture:\n self.engine.load(fixture.read(), None)\n\n self.context.config.FACE_DETECTOR_CASCADE_FILE = abspath(\n \"./thumbor/detectors/face_detector/haarcascade_frontalface_default.xml\",\n )\n\n if hasattr(FaceDetector, \"cascade\"):\n del FaceDetector.cascade\n await FaceDetector(self.context, 0, None).detect()\n detection_result = self.context.request.focal_points[0]\n expect(detection_result.origin).to_equal(\"Face Detection\")\n expect(detection_result.x).to_be_numeric()\n expect(detection_result.y).to_be_numeric()\n expect(detection_result.width).to_be_numeric()\n expect(detection_result.height).to_be_numeric()\n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 217, "n_words": 38, "vocab_size": 35, "complexity": 2, "nloc": 21, "token_counts": 137, "n_ast_nodes": 235, "n_identifiers": 26, "random_cut": "async def test_should_run_on_cmyk_images(self):\n with open(\n abspath(\n \"./tests/fixtures/images/\"\n \"Giunchedi%2C_Filippo_January_2015_01-cmyk.jpg\"\n ),\n \"rb\",\n ) as fixture:\n self.engine.load(fixture.read(), None)\n\n self.context.config.FACE_DETECTOR_CASCADE_FILE = abspath(\n \"./thumbor/detectors/face_detector/haarcascade_fron" }, { "id": 300547, "commit_id": "6cff2f8571005ac795540839a69ba6ef1446f863", "repo": "core", "path": "tests/components/template/test_light.py", "file_name": "test_light.py", "fun_name": "test_effect_template", "commit_message": "Tweak template light tests (#71729)", "code": "async def test_effect_template(hass, expected_effect, count, effect_template):\n \n light_config = {\n \"test_template_light\": {\n **OPTIMISTIC_ON_OFF_LIGHT_CONFIG,\n \"value_template\": \"{{ 1 == 1 }}\",\n \"set_effect\": {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"entity_id\": \"test.test_state\",\n \"effect\": \"{{effect}}\",\n },\n },\n \"effect_list_template\": \"{{ ['Strobe color', 'Police', 'Christmas', 'RGB', 'Random Loop'] }}\",\n \"effect_template\": effect_template,\n }\n }\n await async_setup_light(hass, count, light_config)\n state = hass.states.get(\"light.test_template_light\")\n assert state is not None\n assert state.attributes.get(\"effect\") == expected_effect\n\n\n@pytest.mark.parametrize(\"count\", [1])\n@pytest.mark.parametrize(\n \"expected_min_mireds,min_mireds_template\",\n [\n (118, \"{{118}}\"),\n (153, \"{{x - 12}}\"),\n (153, \"None\"),\n (153, \"{{ none }}\"),\n (153, \"\"),\n (153, \"{{ 'a' }}\"),\n ],\n)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"count\", [1])\n@pytest.mark.parametrize(\n \"expected_min_mireds,min_mireds_template\",\n [\n (118, \"{{118}}\"),\n (153, \"{{x - 12}}\"),\n (153, \"None\"),\n (153, \"{{ none }}\"),\n (153, \"\"),\n (153, \"{{ 'a' }}\"),\n ],\n)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 317, "n_words": 85, "vocab_size": 65, "complexity": 1, "nloc": 20, "token_counts": 92, "n_ast_nodes": 263, "n_identifiers": 15, "random_cut": "async def test_effect_template(hass, expected_effect, count, effect_template):\n \n light_config = {\n \"test_template_light\": {\n **OPTIMISTIC_ON_OFF_LIGHT_CONFIG,\n \"value_template\": \"{{ 1 == 1 }}\",\n \"set_effect\": {\n \"service\": \"test.automation\",\n \"data_template\": {\n \"entity_id\": \"test.test_state\",\n \"effect\": \"{{effect}}\",\n },\n },\n \"effect_list_template\": \"{{ ['Strobe color', 'Police', 'Christmas', 'RGB', 'Random Loop'] }}\",\n \"effect_template\": effect_template,\n }\n }\n await async_setup_light(hass, count, light_config)\n state = hass.states.get(\"light.test_template_light\")\n assert state is not None\n assert state.attributes.get(\"effect\") =" }, { "id": 282052, "commit_id": "683a8bdd83c1b931df111a5b2b8b19350930b73a", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/etf/screener/test_screener_controller.py", "file_name": "test_screener_controller.py", "fun_name": "test_print_help", "commit_message": "Tests : Economy + Conftest (#1260)\n\n* Updating tests : economy\r\n\r\n* Updating tests : removing breaklines\r\n\r\n* Updating tests : economy\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : economy", "code": "def test_print_help():\n controller = screener_controller.ScreenerController(queue=None)\n controller.print_help()\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"quit\", \"quit\", \"help\"]),\n (\"help/help\", [\"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"scr\",\n ],\n ),\n ],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"quit\", \"quit\", \"help\"]),\n (\"help/help\", [\"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\n \"quit\",\n \"quit\",\n \"reset\",\n \"etf\",\n \"scr\",\n ],\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 203, "n_words": 35, "vocab_size": 30, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 178, "n_identifiers": 11, "random_cut": "def test_print_help():\n controller = screener_controller.ScreenerController(queue=None)\n controller.print_help()\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\"," }, { "id": 319686, "commit_id": "0a34a4a7ad595112dc773467d2e3b98ad28936ad", "repo": "paperless-ngx", "path": "src/documents/tests/test_sanity_check.py", "file_name": "test_sanity_check.py", "fun_name": "test_empty_content", "commit_message": "Makes the sanity check messages better for users", "code": "def test_empty_content(self):\n doc = self.make_test_data()\n doc.content = \"\"\n doc.save()\n messages = check_sanity()\n self.assertFalse(messages.has_error)\n self.assertFalse(messages.has_warning)\n self.assertEqual(len(messages), 1)\n self.assertRegex(messages[doc.pk][0][\"message\"], \"Document has no content.\")\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 73, "n_ast_nodes": 123, "n_identifiers": 15, "random_cut": "def test_empty_content(self):\n doc = self.make_test_data()\n doc.content = \"\"\n doc.save()\n messag" }, { "id": 137686, "commit_id": "8e680c483ce326cefc62e44f68ab1a6948b1c3d2", "repo": "ray", "path": "rllib/algorithms/algorithm_config.py", "file_name": "algorithm_config.py", "fun_name": "_detect_atari_env", "commit_message": "[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)", "code": "def _detect_atari_env(self) -> bool:\n \n # Atari envs are usually specified via a string like \"PongNoFrameskip-v4\"\n # or \"ALE/Breakout-v5\".\n # We do NOT attempt to auto-detect Atari env for other specified types like\n # a callable, to avoid running heavy logics in validate().\n # For these cases, users can explicitly set `environment(atari=True)`.\n if not type(self.env) == str:\n return False\n\n try:\n if self.env.startswith(\"ALE/\"):\n env = gym.make(\"GymV26Environment-v0\", env_id=self.env)\n else:\n env = gym.make(self.env)\n except gym.error.NameNotFound:\n # Not an Atari env if this is not a gym env.\n return False\n\n return is_atari(env)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 242, "n_words": 87, "vocab_size": 65, "complexity": 4, "nloc": 16, "token_counts": 73, "n_ast_nodes": 130, "n_identifiers": 13, "random_cut": "def _detect_atari_env(self) -> bool:\n \n # Atari envs are usually specified via a string like \"PongNoFrameskip-v4\"\n # or \"ALE/Breakout-v5\".\n # We do NOT attempt to auto-detect Atari env for o" }, { "id": 259794, "commit_id": "af5b6a100357852f4c3040ff2cb06cb8691023e9", "repo": "scikit-learn", "path": "sklearn/cluster/tests/test_optics.py", "file_name": "test_optics.py", "fun_name": "test_min_cluster_size_invalid", "commit_message": "ENH Add sparse input support to OPTICS (#22965)\n\nCo-authored-by: huntzhan \r\nCo-authored-by: Clickedbigfoot \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>\r\nCo-authored-by: Thomas J. Fan ", "code": "def test_min_cluster_size_invalid(min_cluster_size):\n clust = OPTICS(min_cluster_size=min_cluster_size)\n with pytest.raises(ValueError, match=\"must be a positive integer or a \"):\n clust.fit(X)\n\n clust = OPTICS(min_cluster_size=min_cluster_size, metric=\"euclidean\")\n with pytest.raises(ValueError, match=\"must be a positive integer or a \"):\n clust.fit(sparse.csr_matrix(X))\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 56, "n_words": 31, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 66, "n_ast_nodes": 114, "n_identifiers": 13, "random_cut": "def test_min_cluster_size_invalid(min_cluster_size):\n clust = OPTICS(min_cluster_size=min_cluster_size)\n with pytest.raises(ValueError, match=\"must be a positive integer or a \"):\n clust.fit(X)\n\n clust = OPTICS(min_cluster_size=min_cluster_size, metric=\"euclidean\")\n with pytest.raises(ValueError, match=\"must be a positive integer or a \"):\n clust.fit(sparse.csr_matrix(X))\n\n" }, { "id": 272047, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/feature_column/dense_features_test.py", "file_name": "dense_features_test.py", "fun_name": "test_dense_feature_with_partitioner", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_dense_feature_with_partitioner(self):\n sparse_input = tf.SparseTensor(\n indices=((0, 0), (1, 0), (2, 0), (3, 0)),\n values=(0, 1, 3, 2),\n dense_shape=(4, 4),\n )\n\n # Create feature columns (categorical and embedding).\n categorical_column = tf.feature_column.categorical_column_with_identity(\n key=\"a\", num_buckets=4\n )\n embedding_dimension = 2\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 121, "n_words": 36, "vocab_size": 31, "complexity": 1, "nloc": 29, "token_counts": 234, "n_ast_nodes": 111, "n_identifiers": 14, "random_cut": "def test_dense_feature_with_partitioner(self):\n sparse_input = tf.SparseTensor(\n indices=((0, 0), (1, 0), (2, 0), (3, 0)),\n values=(0, 1, 3, 2),\n dense_shape=(4, 4),\n )\n\n # Create feature columns (categorical and embedding).\n categorical_column = tf.feature_column.categorical_column_with_identity(\n " }, { "id": 150167, "commit_id": "62f7606d2c025e793aa24bd041a9d50a07bd0748", "repo": "freqtrade", "path": "tests/freqai/conftest.py", "file_name": "conftest.py", "fun_name": "get_patched_freqai_strategy", "commit_message": "Update tests to new variant", "code": "def get_patched_freqai_strategy(mocker, freqaiconf):\n strategy = StrategyResolver.load_strategy(freqaiconf)\n strategy.ft_bot_start()\n\n return strategy\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 22, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "def get_patched_freqai_strategy(mocker, freqaiconf):\n strategy = StrategyResolver.load_strategy(freqaiconf)\n strategy.ft_bot_start(" }, { "id": 83243, "commit_id": "b0ce4f1bce8031881addecb1e86073483517f392", "repo": "zulip", "path": "zerver/tests/test_markdown.py", "file_name": "test_markdown.py", "fun_name": "test_alert_words_returns_user_ids_with_alert_words_with_huge_alert_words", "commit_message": "docs: Fix many spelling mistakes.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_alert_words_returns_user_ids_with_alert_words_with_huge_alert_words(self) -> None:\n\n alert_words_for_users: Dict[str, List[str]] = {\n \"hamlet\": [\"issue124\"],\n \"cordelia\": self.get_mock_alert_words(500, 10),\n \"iago\": self.get_mock_alert_words(500, 10),\n \"othello\": self.get_mock_alert_words(500, 10),\n }\n user_profiles: Dict[str, UserProfile] = {}\n for (username, alert_words) in alert_words_for_users.items():\n user_profile = self.example_user(username)\n user_profiles.update({username: user_profile})\n do_add_alert_words(user_profile, alert_words)\n sender_user_profile = self.example_user(\"polonius\")\n msg = Message(sender=sender_user_profile, sending_client=get_client(\"test\"))\n realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 173, "n_words": 48, "vocab_size": 37, "complexity": 2, "nloc": 26, "token_counts": 177, "n_ast_nodes": 221, "n_identifiers": 25, "random_cut": "def test_alert_words_returns_user_ids_with_alert_words_with_huge_alert_words(self) -> None:\n\n alert_words_f" }, { "id": 188743, "commit_id": "e0fdfa52b94d889e8ab9c1184f901cb3d89df3d0", "repo": "jumpserver", "path": "apps/users/models/user.py", "file_name": "user.py", "fun_name": "add_role_system_component", "commit_message": "feat: 支持 magnus (#7965)\n\n* feat: 支持 magnus\r\n\r\n* perf: 添加 setting 到 api\r\n\r\n* perf: 放出 mongodb\r\n\r\nCo-authored-by: ibuler ", "code": "def add_role_system_component(self):\n role = self.builtin_role.system_component.get_role()\n self.add(role)\n\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 19, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def add_role_system_component(self):\n role = self.builtin_role.system_component.get_role()\n " }, { "id": 312487, "commit_id": "fa09cf663e759ccc94afc972e98a6bae57e8385e", "repo": "core", "path": "homeassistant/components/enocean/sensor.py", "file_name": "sensor.py", "fun_name": "value_changed", "commit_message": "Update black to 22.1.0 (#65788)", "code": "def value_changed(self, packet):\n \n if packet.rorg != 0xA5:\n return\n packet.parse_eep(0x12, 0x01)\n if packet.parsed[\"DT\"][\"raw_value\"] == 1:\n # this packet reports the current value\n raw_val = packet.parsed[\"MR\"][\"raw_value\"]\n divisor = packet.parsed[\"DIV\"][\"raw_value\"]\n self._attr_native_value = raw_val / (10**divisor)\n self.schedule_update_ha_state()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 127, "n_words": 33, "vocab_size": 29, "complexity": 3, "nloc": 9, "token_counts": 75, "n_ast_nodes": 130, "n_identifiers": 10, "random_cut": "def value_changed(self, packet):\n \n if packet.rorg != 0xA5:\n return\n packet.parse_eep(0x12, 0x01)\n if packet.par" }, { "id": 90519, "commit_id": "ff2ed9ef861d4b4af2bcc0293f6ffe3a9a1f0439", "repo": "sentry", "path": "tests/sentry/integrations/slack/test_message_builder.py", "file_name": "test_message_builder.py", "fun_name": "test_metric_value", "commit_message": "feat(workflow): Add graph to slack incident message (#34937)\n\nAdds a metric alert graph to incident slack alerts. Changes the slack messages over to the slack \"blocks\" format. I've added what was previously part of the footer as part of the message since footers aren't supported via blocks.", "code": "def test_metric_value(self):\n alert_rule = self.create_alert_rule()\n incident = self.create_incident(alert_rule=alert_rule, status=2)\n\n # This test will use the action/method and not the incident to build status\n title = f\"Critical: {alert_rule.name}\"\n metric_value = 5000\n trigger = self.create_alert_rule_trigger(alert_rule, CRITICAL_TRIGGER_LABEL, 100)\n self.create_alert_rule_trigger_action(\n alert_rule_trigger=trigger, triggered_for_incident=incident\n )\n timestamp = \"\".format(\n to_timestamp(incident.date_started), \"{date_pretty}\", \"{time}\"\n )\n link = (\n absolute_uri(\n reverse(\n \"sentry-metric-alert-details\",\n kwargs={\n \"organization_slug\": alert_rule.organization.slug,\n \"alert_rule_id\": alert_rule.id,\n },\n )\n )\n + f\"?alert={incident.identifier}\"\n )\n # This should fail because it pulls status from `action` instead of `incident`\n assert SlackIncidentsMessageBuilder(\n incident, IncidentStatus.CRITICAL, metric_value=metric_value\n ).build() == {\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"<{link}|*{title}*> \\n5000 events in the last 10 minutes\\n{timestamp}\",\n },\n }\n ],\n \"color\": LEVEL_TO_COLOR[\"fatal\"],\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 591, "n_words": 114, "vocab_size": 91, "complexity": 1, "nloc": 38, "token_counts": 160, "n_ast_nodes": 297, "n_identifiers": 33, "random_cut": "def test_metric_value(self):\n alert_rule = self.create_alert_rule()\n incident = self.create_incident(alert_rule=alert_rule, status=2)\n\n # This test will use the action/method and not the incident to build status\n title = f\"Critical: {alert_rule.name}\"\n metric_value = 5000\n trigger = self.create_alert_rule_trigger(alert_rule, CRITICAL_TRIGGER_LABEL, 100)\n self.create_alert_rule_trigger_action(\n alert_rule_trigger=trigger, triggered_" }, { "id": 135547, "commit_id": "d329147ae28c57b290f6b932f9f3044523f67c4e", "repo": "ray", "path": "rllib/utils/actor_manager.py", "file_name": "actor_manager.py", "fun_name": "__next__", "commit_message": "[RLlib] Introduce FaultTolerantActorManager (#29703)\n\nSigned-off-by: Jun Gong ", "code": "def __next__(self) -> CallResult:\n if not self._call_results:\n raise StopIteration\n return self._call_results.pop(0)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 24, "n_ast_nodes": 39, "n_identifiers": 6, "random_cut": "def __next__(self) -> CallResult:\n if not self._call_results:\n raise StopIteration\n return self._c" }, { "id": 48372, "commit_id": "71e4deb1b093b7ad9320eb5eb34eca8ea440a238", "repo": "airflow", "path": "airflow/executors/executor_loader.py", "file_name": "executor_loader.py", "fun_name": "get_default_executor", "commit_message": "Add typing for airflow/configuration.py (#23716)\n\n* Add typing for airflow/configuration.py\r\n\r\nThe configuraiton.py did not have typing information and it made\r\nit rather difficult to reason about it-especially that it went\r\na few changes in the past that made it rather complex to\r\nunderstand.\r\n\r\nThis PR adds typing information all over the configuration file", "code": "def get_default_executor(cls) -> \"BaseExecutor\":\n \n if cls._default_executor is not None:\n return cls._default_executor\n\n from airflow.configuration import conf\n\n executor_name = conf.get_mandatory_value('core', 'EXECUTOR')\n cls._default_executor = cls.load_executor(executor_name)\n\n return cls._default_executor\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 77, "n_words": 24, "vocab_size": 19, "complexity": 2, "nloc": 8, "token_counts": 50, "n_ast_nodes": 86, "n_identifiers": 9, "random_cut": "def get_default_executor(cls) -> \"BaseExecutor\":\n \n if cls._default_executor is not None:\n return cls._default_executor\n\n from airflow.c" }, { "id": 47826, "commit_id": "22ba59c5f26d1e6fa2ce59e99304628b2f0a0fd9", "repo": "airflow", "path": "airflow/api/common/mark_tasks.py", "file_name": "mark_tasks.py", "fun_name": "find_task_relatives", "commit_message": "Fix TaskInstance actions with upstream/downstream (#23153)\n\n* Fix Clear+upstream/downstream\r\n\r\nWhen we added clearing individual mapped tasks, we unfortunately broke\r\nthe up/down stream feature\r\n\r\nThis was because when passing task_id/task_id+map_index down that\r\nlimited it to _just_ that task_id.\r\n\r\nSo we need to change it to also support the up/downstream we need to add\r\nthose tasks to the list we pass on, meaning that we have to support\r\ntask_id and (task_id,map_index) tuples in the same `task_id` list.\r\n\r\n* Fix Mark Success/Failure\r\n\r\nSimilar problems as clear.\r\n\r\nNote we changed the eager loading of DagRun (which is also the default)\r\nin to an explicit lazy load to avoid a needless join on to DR for this\r\nfunction.\r\n\r\nCo-authored-by: Tzu-ping Chung ", "code": "def find_task_relatives(tasks, downstream, upstream):\n \n for item in tasks:\n if isinstance(item, tuple):\n task, map_index = item\n yield task.task_id, map_index\n else:\n task = item\n yield task.task_id\n if downstream:\n for relative in task.get_flat_relatives(upstream=False):\n yield relative.task_id\n if upstream:\n for relative in task.get_flat_relatives(upstream=True):\n yield relative.task_id\n\n\n@provide_session", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@provide_session", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 170, "n_words": 41, "vocab_size": 26, "complexity": 7, "nloc": 14, "token_counts": 81, "n_ast_nodes": 134, "n_identifiers": 13, "random_cut": "def find_task_relatives(tasks, downstream, upstream):\n \n for item in tasks:\n if isinstance(item, tuple):\n task, map_index = item\n yield task.task_id, map_index\n else:\n task = item\n yield task.task_id\n if downstream:\n for relative" }, { "id": 40762, "commit_id": "bb1a521d7397339217538494972310da77a03d93", "repo": "seaborn", "path": "seaborn/tests/_core/test_mappings.py", "file_name": "test_mappings.py", "fun_name": "test_numeric_dict_palette_with_norm", "commit_message": "Implement most of the framework for semantic mapping using Plot\n\nSquashed commit of the following:\n\ncommit 597cd89d9ffddc67ef3b92ceb94b2c4810412cfe\nAuthor: Michael Waskom \nDate: Sat Oct 16 15:50:15 2021 -0400\n\n Satisfy linter\n\ncommit f62d8740f08a31b07c34566dd4e89c98b5fa75b5\nAuthor: Michael Waskom \nDate: Sat Oct 16 14:12:45 2021 -0400\n\n Simplify color transform and tests\n\ncommit 42020a0dda4c537a5360c7dcecbb15ffa51844d2\nAuthor: Michael Waskom \nDate: Sat Oct 16 12:42:32 2021 -0400\n\n Initialize default semantics with relevant variable names\n\ncommit c7777d9b71a561afd75199c40d71c815ddce9a46\nAuthor: Michael Waskom \nDate: Tue Oct 12 20:34:03 2021 -0400\n\n Make scale a required parameter of mapping setup\n\ncommit 81482fd4c452fec254f2c1d5907311760a2313b9\nAuthor: Michael Waskom \nDate: Mon Oct 11 21:32:01 2021 -0400\n\n Add from_inferred_type alternate constructor for ScaleWrapper\n\ncommit c3ea2a875c0c672bec73ded24283323e9f554eaf\nAuthor: Michael Waskom \nDate: Sun Oct 10 20:13:50 2021 -0400\n\n Add basic datetime mapping tests\n\ncommit b32633ca0d5057749d32c5461a53954c9e815ba3\nAuthor: Michael Waskom \nDate: Sat Oct 9 17:59:53 2021 -0400\n\n Very messy prototype of mapping datetime data\n\ncommit 8c51ab7d9de549fe556b0eeb3e8c621afde9d610\nAuthor: Michael Waskom \nDate: Sat Oct 9 13:47:46 2021 -0400\n\n Use linestyle rather than dash\n\ncommit 6cb547063887e89a3e7746e0a821479fa4d99639\nAuthor: Michael Waskom \nDate: Sat Oct 9 13:39:25 2021 -0400\n\n Clear out some TODOs\n\ncommit 636f8681c07c95fbfb07c7965fd5912a75ae0f59\nAuthor: Michael Waskom \nDate: Fri Oct 8 20:08:24 2021 -0400\n\n Matplotlib compatability\n\ncommit 30eadfb4450f8139f60c5aea98f3fa8ea8d2c8f5\nAuthor: Michael Waskom \nDate: Fri Oct 8 20:00:52 2021 -0400\n\n Move norm->rgb transform into class and fix typing\n\ncommit 58660ffd962433bb1433b65ec6bfce377c0b1ad3\nAuthor: Michael Waskom \nDate: Thu Oct 7 20:59:01 2021 -0400\n\n Build out continuous semantic tests\n\ncommit 72f60d7df708f14e2b6f65c6c7748defaaf563be\nAuthor: Michael Waskom \nDate: Tue Oct 5 20:47:05 2021 -0400\n\n Start building out boolean and continuous mapping tests\n\ncommit a8408ab57048db3e9e480f478d974d8a9356524f\nAuthor: Michael Waskom \nDate: Mon Oct 4 20:57:11 2021 -0400\n\n Add abstraction in discrete semantic tests\n\ncommit 966218f065aa54a0af159394d7458bbbd4031868\nAuthor: Michael Waskom \nDate: Mon Oct 4 20:37:31 2021 -0400\n\n Name bikeshedding\n\ncommit 7e4a62b1107f21a3f29d3e04725f607c16fe291d\nAuthor: Michael Waskom \nDate: Mon Oct 4 20:30:22 2021 -0400\n\n Move default semantics out of Plot\n\ncommit 51729363a1d35695e677c5c5c9bb01d44ad95ec6\nAuthor: Michael Waskom \nDate: Sun Oct 3 22:23:21 2021 -0400\n\n Add linewidth to prototype out continuous semantic\n\ncommit fc8f466f2cb2c55dcfc58e566c5a94a06473bab1\nAuthor: Michael Waskom \nDate: Sun Oct 3 17:33:28 2021 -0400\n\n Attempt (unsuccessfully) to clean up Point draw logic\n\ncommit af8d37758ea6490b26753798067ae8291c2fc07c\nAuthor: Michael Waskom \nDate: Thu Sep 30 21:19:35 2021 -0400\n\n Fix base attribute typing on Semantic.variable\n\ncommit d861fda490608bfa25810c24c0461236830c3b53\nAuthor: Michael Waskom \nDate: Thu Sep 30 20:44:40 2021 -0400\n\n Change test for too-short palette reaction to warning\n\ncommit 4761b092233c1b2c99dd0fd57d7506f9e1956e5b\nAuthor: Michael Waskom \nDate: Wed Sep 29 20:54:21 2021 -0400\n\n Add prototype of ContinuousSemantic\n\ncommit 8519b5b61ead0701481795c7698778ba330ffe86\nAuthor: Michael Waskom \nDate: Tue Sep 28 20:51:11 2021 -0400\n\n Spec out a BooleanSemantic\n\ncommit 83604c6c271d17839c97136c34002ad34513bfff\nAuthor: Michael Waskom \nDate: Tue Sep 28 19:21:47 2021 -0400\n\n Fix more complex positional variables\n\ncommit cc8f73a548e6337dace4b372873583a8b02b6b39\nAuthor: Michael Waskom \nDate: Tue Sep 28 08:20:10 2021 -0400\n\n Clear mypy failures\n\ncommit 82828708fd9a4529043ea0a887aa67f3946ecdad\nAuthor: Michael Waskom \nDate: Mon Sep 27 07:01:19 2021 -0400\n\n MPL compat\n\ncommit 0b69940a164059dbfec834e029af51a369f70901\nAuthor: Michael Waskom \nDate: Sun Sep 26 22:42:02 2021 -0400\n\n PEP8\n\ncommit a7bfca26e7ce095f6ed8cba5878250efaf4bcd6a\nAuthor: Michael Waskom \nDate: Sun Sep 26 22:24:25 2021 -0400\n\n Add numeric ColorMapping\n\ncommit 06116145750a75b20faece231ea153caca15f40d\nAuthor: Michael Waskom \nDate: Sun Sep 26 20:17:54 2021 -0400\n\n Rename objects in mapping tests\n\ncommit aa8bbd53eb195649e5e1d309527247a770c525fc\nAuthor: Michael Waskom \nDate: Sun Sep 26 20:15:09 2021 -0400\n\n Remove vestigial code\n\ncommit b527b5767e929c3f741d6ed612eab96dca3013d5\nAuthor: Michael Waskom \nDate: Sun Sep 26 17:53:03 2021 -0400\n\n Have map_ methods call scale_ method when appropriate\n\ncommit a8194b4e3c1dade124e16e680a930cfe199b9634\nAuthor: Michael Waskom \nDate: Sun Sep 26 14:43:27 2021 -0400\n\n Begin exposing order in map methods\n\ncommit 708391b1eff34db93798722a93cd921ed66eac6e\nAuthor: Michael Waskom \nDate: Sun Sep 26 14:27:05 2021 -0400\n\n More consistency in argument order\n\ncommit e0be5ff82abe52fbd0facc9482bd5b7950d5f88f\nAuthor: Michael Waskom \nDate: Sun Sep 26 12:41:05 2021 -0400\n\n Partial fix to scale transformation logic\n\ncommit b706c89c30c425ba1ce148c5d5a69fb96a2613e5\nAuthor: Michael Waskom \nDate: Sun Sep 26 08:26:32 2021 -0400\n\n Make it optional to have x/y scale defined\n\ncommit 7e758f8a04c39142dc5b43e4924cda3744c72eba\nAuthor: Michael Waskom \nDate: Sat Sep 25 20:42:02 2021 -0400\n\n Refactor _setup_mappings\n\ncommit 42b2481962630c634d5e00c55f181fa454e198c8\nAuthor: Michael Waskom \nDate: Sat Sep 25 20:21:32 2021 -0400\n\n Begin refactoring setup pipeline\n\ncommit edf272961db0f60d4a7c7aec2e6eae868d62468e\nAuthor: Michael Waskom \nDate: Thu Sep 23 21:02:51 2021 -0400\n\n Partial rearrangement of mapping code into new organization\n\ncommit 7417eb70997e7cd0be5a82fd3773187290e39b48\nAuthor: Michael Waskom \nDate: Mon Sep 20 19:36:39 2021 -0400\n\n Consistent sorting of missing keys\n\ncommit a179cdcd129c2e0f7c963b92a7b2ca07c4a8dce4\nAuthor: Michael Waskom \nDate: Mon Sep 20 19:36:31 2021 -0400\n\n Add compat layer for MarkerStyle\n\ncommit 917600d522844193318be7fe37e52ca5b3a320c1\nAuthor: Michael Waskom \nDate: Sun Sep 19 20:52:12 2021 -0400\n\n Add tests for MarkerMapping and DashMapping\n\ncommit 4ece96368c2f78f6e84bc55bdfa481c4f01dc0c0\nAuthor: Michael Waskom \nDate: Mon Sep 13 20:51:16 2021 -0400\n\n Refactor DictionaryMapping and add DashMapping\n\ncommit 0bf214d24e767fbfc39e4c9557abc292c329b707\nAuthor: Michael Waskom \nDate: Sun Sep 12 18:51:13 2021 -0400\n\n Add (untested/incomplete) prototype of marker mapping\n\ncommit 4ef6d612e9bc62a55159ef04156ed8687e7ab367\nAuthor: Michael Waskom \nDate: Sat Sep 11 21:18:46 2021 -0400\n\n Rename 'hue' -> 'color' in the rest of the new code\n\ncommit d357b3fcad99b384de5ffee5983b3c564c62ea8e\nAuthor: Michael Waskom \nDate: Sat Sep 11 19:01:41 2021 -0400\n\n Add facecolor and edgecolor mappings\n\ncommit 8e87e2857cd39bf02b8d7a9b6d56fb95df95756e\nAuthor: Michael Waskom \nDate: Sat Sep 11 18:07:54 2021 -0400\n\n Rename hue -> color in semantic mapping code", "code": "def test_numeric_dict_palette_with_norm(self, num_vector, num_order, num_scale):\n\n palette = dict(zip(num_order, color_palette()))\n m = ColorSemantic(palette=palette).setup(num_vector, num_scale)\n for level, color in palette.items():\n assert same_color(m(level), color)\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 52, "n_words": 21, "vocab_size": 20, "complexity": 2, "nloc": 5, "token_counts": 60, "n_ast_nodes": 91, "n_identifiers": 16, "random_cut": "def test_numeric_dict_palette_with_norm(self, num_vector, num_order, num_scale):\n\n palette = dict(zip(num_order, color_palette()))\n m = ColorSemantic(palette=palette).setup(num_vector, num_scale)\n for level, color in palette.items():\n " }, { "id": 214328, "commit_id": "74a8c2bbe952a846a41cf71d9539ae466415ba6a", "repo": "flair", "path": "flair/models/text_classification_model.py", "file_name": "text_classification_model.py", "fun_name": "_get_state_dict", "commit_message": "Adapted models _get_state_dict and _init_model_with_state_dict methods to allow for inheritance.", "code": "def _get_state_dict(self):\n model_state = {\n **super()._get_state_dict(),\n \"document_embeddings\": self.document_embeddings,\n \"label_dictionary\": self.label_dictionary,\n \"label_type\": self.label_type,\n \"multi_label\": self.multi_label,\n \"multi_label_threshold\": self.multi_label_threshold,\n \"weight_dict\": self.weight_dict,\n }\n return model_state\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 118, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 11, "token_counts": 56, "n_ast_nodes": 94, "n_identifiers": 10, "random_cut": "def _get_state_dict(self):\n model_state = {\n **super()._get_state_dict(),\n \"document_embeddings\": self.document_embeddings,\n \"label_dictionary\": self.label_dictionary,\n \"label_type\": self.label_type,\n \"multi_label\": self.multi_label,\n \"multi_label_threshold\": se" }, { "id": 256102, "commit_id": "d470b9d0bd7989d391bd0d8664fdf9d5b627dcf8", "repo": "haystack", "path": "test/test_file_converter.py", "file_name": "test_file_converter.py", "fun_name": "test_language_validation", "commit_message": "Improve dependency management (#1994)\n\n* Fist attempt at using setup.cfg for dependency management\r\n\r\n* Trying the new package on the CI and in Docker too\r\n\r\n* Add composite extras_require\r\n\r\n* Add the safe_import function for document store imports and add some try-catch statements on rest_api and ui imports\r\n\r\n* Fix bug on class import and rephrase error message\r\n\r\n* Introduce typing for optional modules and add type: ignore in sparse.py\r\n\r\n* Include importlib_metadata backport for py3.7\r\n\r\n* Add colab group to extra_requires\r\n\r\n* Fix pillow version\r\n\r\n* Fix grpcio\r\n\r\n* Separate out the crawler as another extra\r\n\r\n* Make paths relative in rest_api and ui\r\n\r\n* Update the test matrix in the CI\r\n\r\n* Add try catch statements around the optional imports too to account for direct imports\r\n\r\n* Never mix direct deps with self-references and add ES deps to the base install\r\n\r\n* Refactor several paths in tests to make them insensitive to the execution path\r\n\r\n* Include tstadel review and re-introduce Milvus1 in the tests suite, to fix\r\n\r\n* Wrap pdf conversion utils into safe_import\r\n\r\n* Update some tutorials and rever Milvus1 as default for now, see #2067\r\n\r\n* Fix mypy config\r\n\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_language_validation(Converter, caplog):\n converter = Converter(valid_languages=[\"en\"])\n converter.convert(file_path=SAMPLES_PATH/\"pdf\"/\"sample_pdf_1.pdf\")\n assert (\n \"samples/pdf/sample_pdf_1.pdf is not one of ['en'].\"\n not in caplog.text\n )\n\n converter = Converter(valid_languages=[\"de\"])\n converter.convert(file_path=SAMPLES_PATH/\"pdf\"/\"sample_pdf_1.pdf\")\n assert (\n \"samples/pdf/sample_pdf_1.pdf is not one of ['de'].\"\n in caplog.text\n )\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 85, "n_words": 34, "vocab_size": 20, "complexity": 1, "nloc": 13, "token_counts": 68, "n_ast_nodes": 122, "n_identifiers": 9, "random_cut": "def test_language_validation(Converter, caplog):\n converter = Converter(valid_languages=[\"en\"])\n converter.convert(file_path=SAMPLES_PATH/\"pdf\"/\"sample_pdf_1.pdf\")\n assert (\n \"samples/pdf/sample_pdf_1.pdf is not one of ['en'].\"\n not in caplog.text\n )\n\n converter = Converter(valid_languages=[\"de\"])\n " }, { "id": 91746, "commit_id": "3ffb14a47d868956ef759a0cd837066629676774", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_orderby_percentile_with_pagination", "commit_message": "Revert \"feat(metrics): make indexer more configurable (#35604)\" (#35862)\n\nThis reverts commit 7f60db924ea37f34e0cfe6856777239e2a2ffe13.", "code": "def test_orderby_percentile_with_pagination(self):\n org_id = self.organization.id\n tag1 = indexer.record(org_id, \"tag1\")\n value1 = indexer.record(org_id, \"value1\")\n value2 = indexer.record(org_id, \"value2\")\n\n self._send_buckets(\n [\n {\n \"org_id\": org_id,\n \"project_id\": self.project.id,\n \"metric_id\": self.transaction_lcp_metric,\n \"timestamp\": int(time.time()),\n \"type\": \"d\",\n \"value\": numbers,\n \"tags\": {tag: value},\n \"retention_days\": 90,\n }\n for tag, value, numbers in (\n (tag1, value1, [4, 5, 6]),\n (tag1, value2, [1, 2, 3]),\n )\n ],\n entity=\"metrics_distributions\",\n )\n\n response = self.get_success_response(\n self.organization.slug,\n field=f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n statsPeriod=\"1h\",\n interval=\"1h\",\n groupBy=\"tag1\",\n orderBy=f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n per_page=1,\n )\n groups = response.data[\"groups\"]\n assert len(groups) == 1\n assert groups[0][\"by\"] == {\"tag1\": \"value2\"}\n assert groups[0][\"totals\"] == {f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\": 2}\n\n response = self.get_success_response(\n self.organization.slug,\n field=f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n statsPeriod=\"1h\",\n interval=\"1h\",\n groupBy=\"tag1\",\n orderBy=f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n per_page=1,\n cursor=Cursor(0, 1),\n )\n groups = response.data[\"groups\"]\n assert len(groups) == 1\n assert groups[0][\"by\"] == {\"tag1\": \"value1\"}\n assert groups[0][\"totals\"] == {f\"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})\": 5}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 689, "n_words": 116, "vocab_size": 76, "complexity": 2, "nloc": 51, "token_counts": 317, "n_ast_nodes": 565, "n_identifiers": 35, "random_cut": "def test_orderby_percentile_with_pagination(self):\n org_id = self.organization.id\n tag1 = indexer.record(org_id, \"tag1\")\n value1 = indexer.record(org_id, \"value1\")\n value2 = indexer.record(org_id, \"value2\")\n\n self._send_buckets(\n [\n {\n \"org_id\": org_id,\n \"project" }, { "id": 71371, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/api/test_pages.py", "file_name": "test_pages.py", "fun_name": "test_unpublished_pages_appear_in_list", "commit_message": "Reformat with black", "code": "def test_unpublished_pages_appear_in_list(self):\n total_count = get_total_page_count()\n\n page = models.BlogEntryPage.objects.get(id=16)\n page.unpublish()\n\n response = self.get_response()\n content = json.loads(response.content.decode(\"UTF-8\"))\n self.assertEqual(content[\"meta\"][\"total_count\"], total_count)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 65, "n_ast_nodes": 111, "n_identifiers": 18, "random_cut": "def test_unpublished_pages_appear_in_list(self):\n total_count = get_total_page_count()\n\n page = models.BlogEntryPage.objects.get(id=16)\n page.unpublish()\n\n response = self.get_response()\n content = json.loads(response.content.decode(\"UTF-8\"))\n self.assertEqual(content" }, { "id": 312786, "commit_id": "8b38fa58aa45d1809f6900729b4046d6c02c2230", "repo": "core", "path": "tests/test_config.py", "file_name": "test_config.py", "fun_name": "teardown", "commit_message": "Bump pytest to 7.0.0 (#65981)", "code": "def teardown():\n \n yield\n\n dt_util.DEFAULT_TIME_ZONE = ORIG_TIMEZONE\n\n if os.path.isfile(YAML_PATH):\n os.remove(YAML_PATH)\n\n if os.path.isfile(SECRET_PATH):\n os.remove(SECRET_PATH)\n\n if os.path.isfile(VERSION_PATH):\n os.remove(VERSION_PATH)\n\n if os.path.isfile(AUTOMATIONS_PATH):\n os.remove(AUTOMATIONS_PATH)\n\n if os.path.isfile(SCRIPTS_PATH):\n os.remove(SCRIPTS_PATH)\n\n if os.path.isfile(SCENES_PATH):\n os.remove(SCENES_PATH)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 93, "n_words": 24, "vocab_size": 19, "complexity": 7, "nloc": 15, "token_counts": 107, "n_ast_nodes": 179, "n_identifiers": 14, "random_cut": "def teardown():\n \n yield\n\n dt_util.DEFAULT_TIME_ZONE = ORIG_TIMEZONE\n\n if os.path.isfile(YAML_PATH):\n os.remove(YAML_PATH)\n\n if os.path.isfile(SECRET_PATH)" }, { "id": 136727, "commit_id": "b8b32f3e1b4f878ab784cc331aa595fea483f8b5", "repo": "ray", "path": "rllib/algorithms/a2c/a2c.py", "file_name": "a2c.py", "fun_name": "validate", "commit_message": "[RLlib] Introduce `rollout_fragment_length` vs `train_batch_size` generic check for different on-policy algos to use. (#30361)", "code": "def validate(self) -> None:\n # Call super's validation method.\n super().validate()\n\n # Synchronous sampling, on-policy PG algo -> Check mismatches between\n # `rollout_fragment_length` and `train_batch_size` to avoid user confusion.\n self.validate_train_batch_size_vs_rollout_fragment_length()\n\n if self.microbatch_size:\n if self.num_gpus > 1:\n raise AttributeError(\n \"A2C does not support multiple GPUs when micro-batching is set.\"\n )\n\n # Train batch size needs to be significantly larger than microbatch\n # size.\n if self.train_batch_size / self.microbatch_size < 3:\n logger.warning(\n \"`train_batch_size` should be considerably larger (at least 3x)\"\n \" than `microbatch_size` for a microbatching setup to make \"\n \"sense!\"\n )\n # Rollout fragment length needs to be less than microbatch_size.\n if (\n self.rollout_fragment_length != \"auto\"\n and self.rollout_fragment_length > self.microbatch_size\n ):\n logger.warning(\n \"`rollout_fragment_length` should not be larger than \"\n \"`microbatch_size` (try setting them to the same value)! \"\n \"Otherwise, microbatches of desired size won't be achievable.\"\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 505, "n_words": 134, "vocab_size": 98, "complexity": 6, "nloc": 23, "token_counts": 80, "n_ast_nodes": 152, "n_identifiers": 11, "random_cut": "def validate(self) -> None:\n # Call super's validation method.\n super().validate()\n\n # Synchronous sampling, on-policy PG algo -> Check mismatches between\n # `rollout_fragment_length` and `train_batch_size` to avoid user confusion.\n self.validate_train_batch_size_vs_rollout_fragment_length()\n\n if self.microbatch_size:\n if self.num_gpus > 1:\n raise AttributeError(\n \"A2C does not support multiple GPUs when micro-batching is set.\"\n )\n\n # Train batch size needs to be significantly larger than microbatch\n # size.\n if self.train_batch_size / self.microbatch_size < 3:\n logger.warning(\n \"`train_batch_size` should be considerably larger (at least 3x)\"\n \" than `microbatch_size` for a microbatching setup to make \"\n \"sense!\"\n )\n # Rollout fragment length ne" }, { "id": 17959, "commit_id": "a485e7e15458907349ee510684112af2430e39e4", "repo": "ccxt", "path": "python/ccxt/latoken.py", "file_name": "latoken.py", "fun_name": "fetch_markets", "commit_message": "1.72.67\n\n[ci skip]", "code": "def fetch_markets(self, params={}):\n currencies = self.fetch_currencies_from_cache(params)\n #\n # [\n # {\n # \"id\":\"1a075819-9e0b-48fc-8784-4dab1d186d6d\",\n # \"status\":\"CURRENCY_STATUS_ACTIVE\",\n # \"type\":\"CURRENCY_TYPE_ALTERNATIVE\", # CURRENCY_TYPE_CRYPTO, CURRENCY_TYPE_IEO\n # \"name\":\"MyCryptoBank\",\n # \"tag\":\"MCB\",\n # \"description\":\"\",\n # \"logo\":\"\",\n # \"decimals\":18,\n # \"created\":1572912000000,\n # \"tier\":1,\n # \"assetClass\":\"ASSET_CLASS_UNKNOWN\",\n # \"minTransferAmount\":0\n # },\n # {\n # \"id\":\"db02758e-2507-46a5-a805-7bc60355b3eb\",\n # \"status\":\"CURRENCY_STATUS_ACTIVE\",\n # \"type\":\"CURRENCY_TYPE_FUTURES_CONTRACT\",\n # \"name\":\"BTC USDT Futures Contract\",\n # \"tag\":\"BTCUSDT\",\n # \"description\":\"\",\n # \"logo\":\"\",\n # \"decimals\":8,\n # \"created\":1589459984395,\n # \"tier\":1,\n # \"assetClass\":\"ASSET_CLASS_UNKNOWN\",\n # \"minTransferAmount\":0\n # },\n # ]\n #\n response = self.publicGetPair(params)\n #\n # [\n # {\n # \"id\":\"dba4289b-6b46-4d94-bf55-49eec9a163ad\",\n # \"status\":\"PAIR_STATUS_ACTIVE\", # CURRENCY_STATUS_INACTIVE\n # \"baseCurrency\":\"fb9b53d6-bbf6-472f-b6ba-73cc0d606c9b\",\n # \"quoteCurrency\":\"620f2019-33c0-423b-8a9d-cde4d7f8ef7f\",\n # \"priceTick\":\"0.000000100000000000\",\n # \"priceDecimals\":7,\n # \"quantityTick\":\"0.010000000\",\n # \"quantityDecimals\":2,\n # \"costDisplayDecimals\":7,\n # \"created\":1572957210501,\n # \"minOrderQuantity\":\"0\",\n # \"maxOrderCostUsd\":\"999999999999999999\",\n # \"minOrderCostUsd\":\"0\",\n # \"externalSymbol\":\"\"\n # }\n # ]\n #\n if self.safe_value(self.options, 'adjustForTimeDifference', True):\n self.load_time_difference()\n currenciesById = self.index_by(currencies, 'id')\n result = []\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'id')\n # the exchange shows them inverted\n baseId = self.safe_string(market, 'baseCurrency')\n quoteId = self.safe_string(market, 'quoteCurrency')\n baseCurrency = self.safe_value(currenciesById, baseId)\n quoteCurrency = self.safe_value(currenciesById, quoteId)\n if baseCurrency is not None and quoteCurrency is not None:\n base = self.safe_currency_code(self.safe_string(baseCurrency, 'tag'))\n quote = self.safe_currency_code(self.safe_string(quoteCurrency, 'tag'))\n lowercaseQuote = quote.lower()\n capitalizedQuote = self.capitalize(lowercaseQuote)\n status = self.safe_string(market, 'status')\n result.append({\n 'id': id,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': (status == 'PAIR_STATUS_ACTIVE'), # assuming True\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': self.safe_number(market, 'quantityTick'),\n 'price': self.safe_number(market, 'priceTick'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.safe_number(market, 'minOrderQuantity'),\n 'max': None,\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': self.safe_number(market, 'minOrderCost' + capitalizedQuote),\n 'max': self.safe_number(market, 'maxOrderCost' + capitalizedQuote),\n },\n },\n 'info': market,\n })\n return result\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 2416, "n_words": 299, "vocab_size": 160, "complexity": 5, "nloc": 69, "token_counts": 416, "n_ast_nodes": 759, "n_identifiers": 33, "random_cut": "def fetch_markets(self, params={}):\n currencies = self.fetch_currencies_from_cache(params)\n #\n # [\n # {\n # \"id\":\"1a075819-9e0b-48fc-8784-4dab1d186d6d\",\n # \"status\":\"CURRENCY_STATUS_ACTIVE\",\n # \"type\":\"CURRENCY_TYPE_ALTERNATIVE\", # CURRENCY_TYPE_CRYPTO, CURRENCY_TYPE_IEO\n # \"name\":\"MyCryptoBank\",\n # \"tag\":\"MCB\",\n # \"description\":\"\",\n # \"logo\":\"\",\n # " }, { "id": 111707, "commit_id": "39d9a8d97c3c253904dbb098c5ad745e7f9409c5", "repo": "nni", "path": "nni/compression/pytorch/utils/counter.py", "file_name": "counter.py", "fun_name": "format_results", "commit_message": "add input and output shape information (#4539)", "code": "def format_results(self):\n table = PrettyTable()\n name_counter = Counter([s['name'] for s in self.results])\n has_multi_use = any(map(lambda v: v > 1, name_counter.values()))\n name_counter = Counter() # clear the counter to count from 0\n\n headers = [\n 'Index',\n 'Name',\n 'Type',\n 'Weight Shape',\n 'Input Size',\n 'Output Size',\n 'FLOPs',\n '#Params',\n ]\n if has_multi_use:\n headers.append('#Call')\n\n table.field_names = headers\n for i, result in enumerate(self.results):\n flops_count = int(result['flops'].item()) if isinstance(result['flops'], torch.Tensor) else int(result['flops'])\n row_values = [\n i,\n result['name'],\n result['module_type'],\n str(result['weight_shape']),\n result['input_size'],\n result['output_size'],\n flops_count,\n result['params'],\n ]\n name_counter[result['name']] += 1\n if has_multi_use:\n row_values.append(name_counter[result['name']])\n table.add_row(row_values)\n table.align[\"Name\"] = \"l\"\n return table\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 466, "n_words": 89, "vocab_size": 69, "complexity": 6, "nloc": 36, "token_counts": 212, "n_ast_nodes": 357, "n_identifiers": 29, "random_cut": "def format_results(self):\n table = PrettyTable()\n name_counter = Counter([s['name'] for s in self.results])\n has_multi_use = any(map(lambda v: v > 1, name_counter.values()))\n name_counter = Counter() # clear the counter to count from 0\n\n headers = [\n 'Index',\n 'Name',\n 'Type',\n 'Weight Shape',\n 'Input Size',\n 'Output Size',\n 'FLOPs',\n '#Params',\n ]\n if has_multi_use:\n headers.append('#Call')\n\n table.field_names = headers\n for i, result in enumerate(self.results):\n flops_count = int(result['flops'].item()) if isinstance(result['flops'], torch.Tensor) else int(result['flops'])\n row_values = [\n i,\n result['name'],\n result['module_type'],\n str(result['weight_shape']),\n result['input_size'],\n result['outp" }, { "id": 88517, "commit_id": "fef9c695a1a7d3384fb3ce7ec6c264632e77061d", "repo": "sentry", "path": "tests/sentry/auth/test_access.py", "file_name": "test_access.py", "fun_name": "test_member_role_in_organization_closed_membership", "commit_message": "feature(hybrid-cloud): Access with silo tests (#41305)\n\nGoal of this PR is implement a secondary interface for creating `Access`\r\nobjects that work on service dataclasses only. It validates that\r\nsecondary interface by running the access test suite against both\r\nimplementations *in all silo modes* ensuring full compatibility.\r\n\r\nNotably, while most of the org member access logic is left untouched,\r\nsome parts of existing logic have been slightly refactored:\r\n\r\n1. Organizationless Access objects no longer need the DB, and act on\r\nshared logic from the service layer.\r\n2. sso state and permissions querying is now extracted into the service\r\nlayer, and even the existing access uses that.", "code": "def test_member_role_in_organization_closed_membership(self):\n # disable default allow_joinleave\n with exempt_from_silo_limits():\n self.org.update(flags=0)\n member_user = self.create_user(is_superuser=False)\n self.create_member(\n user=member_user, organization=self.org, role=\"member\", teams=[self.team1]\n )\n\n request = self.make_request(member_user, is_superuser=False)\n result = self.from_request(request, self.org)\n\n assert result.role == \"member\"\n assert result.team_ids_with_membership == frozenset({self.team1.id})\n assert result.has_team_access(self.team1)\n assert result.project_ids_with_team_membership == frozenset({self.project1.id})\n assert result.has_project_access(self.project1)\n assert result.has_project_membership(self.project1)\n assert not result.has_project_membership(self.project2)\n\n # member_user should not have visibility to other teams or projects\n assert not result.has_global_access\n assert not result.has_team_access(self.team2)\n assert not result.has_project_access(self.project2)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 214, "n_words": 67, "vocab_size": 48, "complexity": 1, "nloc": 19, "token_counts": 178, "n_ast_nodes": 284, "n_identifiers": 30, "random_cut": "def test_member_role_in_organization_closed_membership(self):\n # disable default allow_joinleave\n with exempt_from_silo_limits():\n self.org.update(flags=0)\n member_user = self.create_user(is_superuser=False)\n self.create_member(\n user=member_user, organization=self.org, role=\"member\", teams=[self.team1]\n )\n\n request = self.make_request(member_user, is_superuser=False)\n result = self.from_request(request, self.org)\n\n assert result.role == \"member\"\n assert result.team_ids_with_membership == frozenset({self.team1" }, { "id": 317328, "commit_id": "b3ef6f4d047358048ecf12a4b4e07379cc90bcba", "repo": "core", "path": "homeassistant/components/guardian/binary_sensor.py", "file_name": "binary_sensor.py", "fun_name": "_async_update_from_latest_data", "commit_message": "Simplify Guardian entity inheritance hierarchy (#75274)", "code": "def _async_update_from_latest_data(self) -> None:\n \n if self.entity_description.key == SENSOR_KIND_AP_INFO:\n self._attr_is_on = self.coordinator.data[\"station_connected\"]\n self._attr_extra_state_attributes[\n ATTR_CONNECTED_CLIENTS\n ] = self.coordinator.data.get(\"ap_clients\")\n elif self.entity_description.key == SENSOR_KIND_LEAK_DETECTED:\n self._attr_is_on = self.coordinator.data[\"wet\"]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 103, "n_words": 23, "vocab_size": 18, "complexity": 3, "nloc": 9, "token_counts": 67, "n_ast_nodes": 112, "n_identifiers": 12, "random_cut": "def _async_update_from_latest_data(self) -> None:\n \n if self.entity_description.key == SENSOR_KIND_AP_INFO:\n self._attr_is_on = self.coordinator.data[\"station_connected\"]\n self._attr_extra_state_attributes[\n ATTR_CONNECTED_CLIENTS\n ] = self.coordinator.data.get(\"ap_clients\")\n elif self.entity_description.key == SENSOR_KIND_LEAK_DETECTED:\n self._attr_is_on = self.coordinator.data[\"wet\"]\n" }, { "id": 299023, "commit_id": "24b4690e5d855be362613583a3ba6fd6f60e9929", "repo": "core", "path": "homeassistant/components/zwave_js/climate.py", "file_name": "climate.py", "fun_name": "_set_modes_and_presets", "commit_message": "Use climate enums in zwave_js (#70757)", "code": "def _set_modes_and_presets(self) -> None:\n \n all_modes: dict[HVACMode, int | None] = {}\n all_presets: dict[str, int | None] = {PRESET_NONE: None}\n\n # Z-Wave uses one list for both modes and presets.\n # Iterate over all Z-Wave ThermostatModes and extract the hvac modes and presets.\n if self._current_mode is None:\n self._hvac_modes = {\n ZW_HVAC_MODE_MAP[ThermostatMode.HEAT]: ThermostatMode.HEAT\n }\n return\n for mode_id, mode_name in self._current_mode.metadata.states.items():\n mode_id = int(mode_id)\n if mode_id in THERMOSTAT_MODES:\n # treat value as hvac mode\n if hass_mode := ZW_HVAC_MODE_MAP.get(mode_id):\n all_modes[hass_mode] = mode_id\n else:\n # treat value as hvac preset\n all_presets[mode_name] = mode_id\n self._hvac_modes = all_modes\n self._hvac_presets = all_presets\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 317, "n_words": 94, "vocab_size": 62, "complexity": 5, "nloc": 18, "token_counts": 123, "n_ast_nodes": 197, "n_identifiers": 23, "random_cut": "def _set_modes_and_presets(self) -> None:\n \n all_modes: dict[HVACMode, int | None] = {}\n all_presets: dict[str, int | None] = {PRESET_NONE: None}\n\n # Z-Wave uses one list for both modes and presets.\n # Iterate over all Z-Wave ThermostatModes and extract the hvac modes and presets.\n if self._current" }, { "id": 22301, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "Assembler/assembler.py", "file_name": "assembler.py", "fun_name": "loadFile", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def loadFile(fileName):\n \n global lines\n fo = open(fileName)\n for line in fo:\n lines.append(line)\n fo.close()\n\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 8, "random_cut": "def loadFile(fileName):\n " }, { "id": 126169, "commit_id": "66ed41b22a50deec7e7d6952dc811c14b3f1063c", "repo": "ray", "path": "python/ray/air/tests/test_checkpoints.py", "file_name": "test_checkpoints.py", "fun_name": "test_repr", "commit_message": "[AIR] Add `__repr__` to AIR classes (#27006)", "code": "def test_repr():\n checkpoint = Checkpoint(data_dict={\"foo\": \"bar\"})\n\n representation = repr(checkpoint)\n\n assert len(representation) < MAX_REPR_LENGTH\n pattern = re.compile(\"^Checkpoint\\\\((.*)\\\\)$\")\n assert pattern.match(representation)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 32, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 44, "n_ast_nodes": 80, "n_identifiers": 12, "random_cut": "def test_repr():\n checkpoint = Checkpoint(data_dict={\"foo\": \"bar\"})\n\n repre" }, { "id": 272122, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/feature_column/sequence_feature_column_test.py", "file_name": "sequence_feature_column_test.py", "fun_name": "test_from_config", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_from_config(self, trainable, name):\n cols = [tf.feature_column.sequence_numeric_column(\"a\")]\n orig_layer = ksfc.SequenceFeatures(cols, trainable=trainable, name=name)\n config = orig_layer.get_config()\n\n new_layer = ksfc.SequenceFeatures.from_config(config)\n\n self.assertEqual(new_layer.name, orig_layer.name)\n self.assertEqual(new_layer.trainable, trainable)\n self.assertLen(new_layer._feature_columns, 1)\n self.assertEqual(new_layer._feature_columns[0].name, \"a\")\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 81, "n_words": 26, "vocab_size": 23, "complexity": 1, "nloc": 9, "token_counts": 101, "n_ast_nodes": 157, "n_identifiers": 18, "random_cut": "def test_from_config(self, trainable, name):\n cols = [tf.feature_column.sequence_numeric_column(\"a\")]\n orig_layer = ksfc.SequenceFeatures(cols, trainable=trainable, name=name)\n config = orig_layer.get_confi" }, { "id": 125546, "commit_id": "042450d319eaae4e845e46c4f8adf8775b21c8a9", "repo": "ray", "path": "python/ray/data/tests/test_dataset_formats.py", "file_name": "test_dataset_formats.py", "fun_name": "test_numpy_roundtrip", "commit_message": "Revert \"[Datasets] Automatically cast tensor columns when building Pandas blocks. (#26684)\" (#26921)\n\nThis reverts commit 0c139914bbb3e3557f13738b5f3f9fe8d2d428b4.", "code": "def test_numpy_roundtrip(ray_start_regular_shared, fs, data_path):\n ds = ray.data.range_tensor(10, parallelism=2)\n ds.write_numpy(data_path, filesystem=fs)\n ds = ray.data.read_numpy(data_path, filesystem=fs)\n assert str(ds) == (\n \"Dataset(num_blocks=2, num_rows=None, \"\n \"schema={__value__: })\"\n )\n np.testing.assert_equal(ds.take(2), [np.array([0]), np.array([1])])\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 29, "vocab_size": 26, "complexity": 1, "nloc": 9, "token_counts": 90, "n_ast_nodes": 140, "n_identifiers": 18, "random_cut": "def test_numpy_roundtrip(ray_start_regular_shared, fs, data_path):\n ds = ray.data.range_tensor(10, parallelism=2)\n ds.write_numpy(data_path, filesystem=fs)\n ds = ray.data" }, { "id": 55671, "commit_id": "b201f7b5fef84a2aa641e6ba1fa6e10c397fcda5", "repo": "prefect", "path": "tests/blocks/test_core.py", "file_name": "test_core.py", "fun_name": "test_create_block_from_nonexistant_name", "commit_message": "Adds missings tests and Block from_name method", "code": "async def test_create_block_from_nonexistant_name(self, block_class):\n with pytest.raises(\n ValueError,\n match=\"Unable to find block document named blocky for block type x\",\n ):\n await block_class.from_name(\"blocky\")\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 67, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 6, "token_counts": 27, "n_ast_nodes": 49, "n_identifiers": 8, "random_cut": "async def test_create_block_from_nonexistant_name(self, block_class):\n with pytest.raises(\n ValueError,\n match=\"Unable to find block document named blocky for block type x\",\n ):\n await block_class.from_" }, { "id": 208667, "commit_id": "e5d78c598bcd615b1007c41508a2486a97598a97", "repo": "ipython", "path": "IPython/core/tests/test_interactiveshell.py", "file_name": "test_interactiveshell.py", "fun_name": "test_run_cell", "commit_message": "Increase test coverage", "code": "def test_run_cell(self):\n with tt.AssertPrints(\"-34\"):\n ip.run_cell(\"print(12 + 22)\")\n\n # A named reference to a number shouldn't be transformed.\n ip.user_ns[\"n\"] = 55\n with tt.AssertNotPrints(\"-55\"):\n ip.run_cell(\"print(n)\")\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 72, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 6, "token_counts": 41, "n_ast_nodes": 82, "n_identifiers": 8, "random_cut": "def test_run_cell(self):\n with tt.AssertPrin" }, { "id": 97705, "commit_id": "9e0a1d7f6e82eff650bc76677236bb1697f151b8", "repo": "sentry", "path": "tests/sentry/snuba/metrics/fields/test_histogram.py", "file_name": "test_histogram.py", "fun_name": "_call", "commit_message": "feat(metrics): Implement basic histogram functions on metrics_v2 API + QueryBuilder (#32992)\n\nCo-authored-by: Joris Bayer \r\nCo-authored-by: Ahmed Etefy ", "code": "def _call(data, histogram_from, histogram_to, histogram_buckets, output):\n return pytest.param(\n dict(\n data=data,\n histogram_from=histogram_from,\n histogram_to=histogram_to,\n histogram_buckets=histogram_buckets,\n ),\n output,\n id=f\"f({data}, {histogram_from}, {histogram_to}, {histogram_buckets})\",\n )\n\n\n@pytest.mark.parametrize(\n \"kwargs,output\",\n [\n _call([], None, None, 0, output=[]),\n _call([(1, 2, 3)], None, None, 0, output=[]),\n _call([(1, 2, 3)], 5, 6, 1, output=[(5.0, 6.0, 0.0)]),\n _call([(1, 2, 3)], 0, 0, 1, output=[(0.0, 0.0, 0.0)]),\n _call([(1, 2, 3)], 0, 1, 1, output=[(0.0, 1.0, 0.0)]),\n _call([(1, 2, 3)], 0, 1.5, 1, output=[(0.0, 1.5, 1.5)]),\n _call([(1, 2, 3)], 0, 2, 1, output=[(0.0, 2.0, 3.0)]),\n ],\n)", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"kwargs,output\",\n [\n _call([], None, None, 0, output=[]),\n _call([(1, 2, 3)], None, None, 0, output=[]),\n _call([(1, 2, 3)], 5, 6, 1, output=[(5.0, 6.0, 0.0)]),\n _call([(1, 2, 3)], 0, 0, 1, output=[(0.0, 0.0, 0.0)]),\n _call([(1, 2, 3)], 0, 1, 1, output=[(0.0, 1.0, 0.0)]),\n _call([(1, 2, 3)], 0, 1.5, 1, output=[(0.0, 1.5, 1.5)]),\n _call([(1, 2, 3)], 0, 2, 1, output=[(0.0, 2.0, 3.0)]),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 215, "n_words": 82, "vocab_size": 44, "complexity": 1, "nloc": 11, "token_counts": 46, "n_ast_nodes": 342, "n_identifiers": 12, "random_cut": "def _call(data, histogram_from, histogram_to, histogram_buckets, output):\n return pytest.param(\n dict(\n data=data,\n histogram_from=histogram_from,\n histogram_to=histogram_to,\n histogram_buckets=histogram_buckets,\n ),\n output,\n id=f\"f({data}, {histogram_from}, {histogram_to}, {histogram_buckets})\",\n )\n\n\n@pytest.mark.parametrize(\n \"kwargs,output\",\n [\n _call([], None, None, 0, output=[]),\n _call([(1, 2, 3)], None, None, 0, output=[]),\n _call([(1, 2, 3)], 5, 6, 1, output=[(5.0, 6.0, 0.0)]),\n _call([(1, 2, 3)], 0, 0, 1, output=[(0.0, 0.0, 0.0)]),\n _call([(1, 2, 3)], 0, 1, 1, output=[(0.0, 1.0, 0.0)]),\n _call([(1, 2, 3)], 0, 1.5, 1, output=[(0.0, 1.5, 1.5)]),\n " }, { "id": 83556, "commit_id": "55882fb3430d40a1b08161e06d8cfa675980f288", "repo": "zulip", "path": "zerver/tests/test_message_flags.py", "file_name": "test_message_flags.py", "fun_name": "test_huddle_messages_unread_mention", "commit_message": "python: Use modern set comprehension syntax.\n\nGenerated by pyupgrade.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_huddle_messages_unread_mention(self) -> None:\n sender = self.example_user(\"cordelia\")\n receiver = self.example_user(\"hamlet\")\n user1 = self.example_user(\"othello\")\n message_ids = [\n # self.send_huddle_message(sender, receiver, content=\"Hello\") for i in range(4)\n self.send_huddle_message(\n from_user=sender, to_users=[receiver, user1], content=\"@**King Hamlet**\"\n )\n for i in range(4)\n ]\n self.login(\"hamlet\")\n for message_id in message_ids:\n um = UserMessage.objects.get(\n user_profile_id=receiver.id,\n message_id=message_id,\n )\n self.assertFalse(um.flags.read)\n result = self.client_post(\n \"/json/messages/flags\",\n {\"messages\": orjson.dumps(message_ids).decode(), \"op\": \"add\", \"flag\": \"read\"},\n )\n self.assert_json_success(result)\n for message_id in message_ids:\n um = UserMessage.objects.get(\n user_profile_id=receiver.id,\n message_id=message_id,\n )\n self.assertTrue(um.flags.read)\n messages_to_unread = message_ids[2:]\n messages_still_read = message_ids[:2]\n\n params = {\n \"messages\": orjson.dumps(messages_to_unread).decode(),\n \"op\": \"remove\",\n \"flag\": \"read\",\n }\n\n events: List[Mapping[str, Any]] = []\n\n # Use the tornado_redirected_to_list context manager to capture\n # events.\n with self.tornado_redirected_to_list(events, expected_num_events=1):\n result = self.api_post(receiver, \"/api/v1/messages/flags\", params)\n\n self.assert_json_success(result)\n event = events[0][\"event\"]\n self.assertEqual(event[\"messages\"], messages_to_unread)\n unread_message_ids = {str(message_id) for message_id in messages_to_unread}\n self.assertSetEqual(set(event[\"message_details\"].keys()), unread_message_ids)\n for message_id in event[\"message_details\"]:\n self.assertEqual(event[\"message_details\"][message_id][\"mentioned\"], True),\n\n for message_id in messages_to_unread:\n um = UserMessage.objects.get(\n user_profile_id=receiver.id,\n message_id=message_id,\n )\n self.assertFalse(um.flags.read)\n for message_id in messages_still_read:\n um = UserMessage.objects.get(\n user_profile_id=receiver.id,\n message_id=message_id,\n )\n self.assertTrue(um.flags.read)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 729, "n_words": 153, "vocab_size": 91, "complexity": 8, "nloc": 57, "token_counts": 401, "n_ast_nodes": 652, "n_identifiers": 48, "random_cut": "def test_huddle_messages_unread_mention(self) -> None:\n sender = self.example_user(\"cordelia\")\n receiver = self.example_user(\"hamlet\")\n user1 = self.example_user(\"othello\")\n message_ids = [\n # self.send_huddle_message(sender, receiver, content=\"Hello\") for i in range(4)\n self.send_huddle_message(\n from_user=sender, to_users=[receiver, user1], content=\"@**King Hamlet**\"\n )\n for i in range(4)\n ]\n self.login(\"hamlet\")\n for message_id in message_ids:\n um = UserMessage.objects.get(\n user_profile_id=receiver.id,\n message_id=message_id,\n )\n self.assertFalse(um.flags.read)\n result = self.client_post(\n \"/json/messages/flags\",\n {\"messages\": orjson.dumps(message_ids).decode(), \"op\": \"add\", \"flag\": \"read\"},\n )\n self.assert_json_success(result)\n for message_id in message_ids:\n um = UserMessage.objects.get(\n user_profile_id=receiver.id,\n message_id=message_id,\n )\n self.assertTrue(um.flags.read)\n messages_to_unread = message_ids[2:]\n messages_still_read = message_ids[:2]\n\n params = {\n \"messages\": orjson.dumps(messages_to_unread).decode(),\n \"op\": \"remove\",\n \"flag\": \"read\",\n }\n\n events: List[Mapping[str, Any]] = []\n\n # Use the tornado_redirected_to_list context manager to capture\n # events.\n with self.tornado_redirected_to_list(events, expected_num_events=1):\n result = self.api_post(receiver, \"/api/v1/messages/flags\", params)\n\n self.assert_json_success(result)\n event = events[0][\"event\"]\n self.assertEqual(event[\"messages\"], messages_to_unread)\n unread_message_ids = {str(message_id) for message_id in messages_to_unread}\n self.assertSetEqual(set(event[\"message_details\"].keys()), unread_message_ids)\n for message_id in event[\"message_details\"]:\n self.assertEqual(event[\"message_details\"][message_id][\"mentioned\"], True),\n\n for message_id in messages_to_unread:\n um = UserMess" }, { "id": 136228, "commit_id": "ef628023539cea354fbff94561ba8d91fd27a275", "repo": "ray", "path": "rllib/algorithms/tests/test_algorithm.py", "file_name": "test_algorithm.py", "fun_name": "test_add_delete_policy", "commit_message": "[RLlib] Unify policy mapping function usage (#30216)\n\nSigned-off-by: Artur Niederfahrenhorst ", "code": "def test_add_delete_policy(self):\n config = pg.PGConfig()\n config.environment(\n env=MultiAgentCartPole,\n env_config={\n \"config\": {\n \"num_agents\": 4,\n },\n },\n ).rollouts(num_rollout_workers=2, rollout_fragment_length=50).resources(\n num_cpus_per_worker=0.1\n ).training(\n train_batch_size=100,\n ).multi_agent(\n # Start with a single policy.\n policies={\"p0\"},\n policy_mapping_fn=lambda agent_id, episode, worker, **kwargs: \"p0\",\n # And only two policies that can be stored in memory at a\n # time.\n policy_map_capacity=2,\n ).evaluation(\n evaluation_num_workers=1,\n evaluation_config={\n \"num_cpus_per_worker\": 0.1,\n },\n )\n # Don't override existing model settings.\n config.model.update(\n {\n \"fcnet_hiddens\": [5],\n \"fcnet_activation\": \"linear\",\n }\n )\n\n obs_space = gym.spaces.Box(-2.0, 2.0, (4,))\n act_space = gym.spaces.Discrete(2)\n\n for fw in framework_iterator(config):\n # Pre-generate a policy instance to test adding these directly to an\n # existing algorithm.\n if fw == \"tf\":\n policy_obj = pg.PGTF1Policy(obs_space, act_space, config.to_dict())\n elif fw == \"tf2\":\n policy_obj = pg.PGTF2Policy(obs_space, act_space, config.to_dict())\n else:\n policy_obj = pg.PGTorchPolicy(obs_space, act_space, config.to_dict())\n\n # Construct the Algorithm with a single policy in it.\n algo = config.build()\n pol0 = algo.get_policy(\"p0\")\n r = algo.train()\n self.assertTrue(\"p0\" in r[\"info\"][LEARNER_INFO])\n for i in range(1, 3):\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 674, "n_words": 148, "vocab_size": 108, "complexity": 9, "nloc": 139, "token_counts": 848, "n_ast_nodes": 434, "n_identifiers": 52, "random_cut": "def test_add_delete_policy(self):\n config = pg.PGConfig()\n config.environment(\n env=MultiAgentCartPole,\n env_config={\n \"config\": {\n \"num_agents\": 4,\n },\n },\n ).rollouts(num_rollout_workers=2, rollout_fragment_length=50).resources(\n num_cpus_per_worker=0.1\n ).training(\n train_batch_size=10" }, { "id": 162515, "commit_id": "d5a398988bb4db5ea610e3cb2548f0e084a1137e", "repo": "yt-dlp", "path": "test/test_youtube_lists.py", "file_name": "test_youtube_lists.py", "fun_name": "test_youtube_playlist_noplaylist", "commit_message": "Update to ytdl-commit-78ce962\n\n[youtube] Support channel search\nhttps://github.com/ytdl-org/youtube-dl/commit/78ce962f4fe020994c216dd2671546fbe58a5c67", "code": "def test_youtube_playlist_noplaylist(self):\n dl = FakeYDL()\n dl.params['noplaylist'] = True\n ie = YoutubeTabIE(dl)\n result = ie.extract('https://www.youtube.com/watch?v=OmJ-4B-mS-Y&list=PLydZ2Hrp_gPRJViZjLFKaBMgCQOYEEkyp&index=2')\n self.assertEqual(result['_type'], 'url')\n self.assertEqual(result['ie_key'], YoutubeIE.ie_key())\n self.assertEqual(YoutubeIE.extract_id(result['url']), 'OmJ-4B-mS-Y')\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 20, "vocab_size": 17, "complexity": 1, "nloc": 8, "token_counts": 74, "n_ast_nodes": 130, "n_identifiers": 13, "random_cut": "def test_youtube_playlist_noplaylist(self):\n dl = FakeYDL()\n dl.params['noplaylist'] = True\n ie = YoutubeTabIE(dl)\n result = ie.extract('https://www.youtube.com/watch?v=OmJ-4B-mS-Y&list=PLydZ2Hrp_gPRJViZjLFKaBMgCQOYEEkyp&index" }, { "id": 154709, "commit_id": "0a2c0de4451f7e2e8f337a9478d7595473aa348e", "repo": "modin", "path": "modin/pandas/test/test_series.py", "file_name": "test_series.py", "fun_name": "test_cumsum", "commit_message": "REFACTOR-#5026: Change exception names to simplify grepping (#5027)\n\nSigned-off-by: Myachev ", "code": "def test_cumsum(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.cumsum(skipna=skipna)\n except Exception as err:\n with pytest.raises(type(err)):\n modin_series.cumsum(skipna=skipna)\n else:\n df_equals(modin_series.cumsum(skipna=skipna), pandas_result)\n\n\n@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 66, "n_words": 24, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 66, "n_ast_nodes": 134, "n_identifiers": 19, "random_cut": "def test_cumsum(data, skipna):\n modin_series, pandas_series = create_test_series(data)\n try:\n pandas_result = pandas_series.cumsum(skipna=skipna)\n except Exception as err:\n with pytest.raises(type(err)):\n " }, { "id": 259205, "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", "repo": "scikit-learn", "path": "sklearn/preprocessing/_encoders.py", "file_name": "_encoders.py", "fun_name": "get_feature_names", "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def get_feature_names(self, input_features=None):\n \n check_is_fitted(self)\n cats = [\n self._compute_transformed_categories(i)\n for i, _ in enumerate(self.categories_)\n ]\n if input_features is None:\n input_features = [\"x%d\" % i for i in range(len(cats))]\n elif len(input_features) != len(cats):\n raise ValueError(\n \"input_features should have length equal to number of \"\n \"features ({}), got {}\".format(len(cats), len(input_features))\n )\n\n feature_names = []\n for i in range(len(cats)):\n names = [input_features[i] + \"_\" + str(t) for t in cats[i]]\n feature_names.extend(names)\n\n return np.array(feature_names, dtype=object)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 240, "n_words": 70, "vocab_size": 57, "complexity": 7, "nloc": 18, "token_counts": 141, "n_ast_nodes": 229, "n_identifiers": 23, "random_cut": "def get_feature_names(self, input_features=None):\n \n check_is_fi" }, { "id": 241790, "commit_id": "effaf8c82c822a24754b77890e2c394c5b8eaeca", "repo": "scipy", "path": "scipy/sparse/csgraph/tests/test_graph_laplacian.py", "file_name": "test_graph_laplacian.py", "fun_name": "test_laplacian_value_error", "commit_message": "ENH: Update `laplacian` function introducing the new `copy` option (#15251)\n\nCo-authored-by: Pamphile Roy ", "code": "def test_laplacian_value_error():\n for t in int, float, complex:\n for m in ([1, 1],\n [[[1]]],\n [[1, 2, 3], [4, 5, 6]],\n [[1, 2], [3, 4], [5, 5]]):\n A = np.array(m, dtype=t)\n assert_raises(ValueError, csgraph.laplacian, A)\n\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 115, "n_words": 33, "vocab_size": 30, "complexity": 3, "nloc": 8, "token_counts": 92, "n_ast_nodes": 123, "n_identifiers": 14, "random_cut": "def test_laplacian_value_error():\n for t in int, float, complex:\n for m in ([1, 1],\n [[[1]]],\n [[1, 2, 3], [4, 5, 6]],\n [[1, 2], [3, 4], [5, 5]]):\n " }, { "id": 276327, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/testing_infra/test_combinations.py", "file_name": "test_combinations.py", "fun_name": "_v2_eager_test", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _v2_eager_test(f, test_or_class, *args, **kwargs):\n with tf.__internal__.eager_context.eager_mode():\n with test_utils.run_eagerly_scope(True):\n f(test_or_class, *args, **kwargs)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 32, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 69, "n_identifiers": 11, "random_cut": "def _v2_eager_test(f, test_or_class, *args, **kwargs):\n with tf.__internal__.eager_context.eager_mode():\n with test_utils.run_eagerly_scope(True):\n f(test_or_class, *args, **kwargs)\n\n" }, { "id": 282543, "commit_id": "e1b6022b9cf156ffc0697d0d25a5ed2772ea8d68", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/stocks/options/test_tradier_model.py", "file_name": "test_tradier_model.py", "fun_name": "test_get_option_chains_invalid_status", "commit_message": "Global plot styles (#1228)\n\n* Add default stylesheets\r\n\r\n* Add terminal style helper class and global style initialization in cfg\r\n\r\n* Style comments and docstrings\r\n\r\n* Load rich terminal theme from config file\r\n\r\n* Add application chart styles to candle charts\r\n\r\n* Add todos\r\n\r\n* Remove explicit color setting for some ta charts\r\n\r\n* Add user styles folder to gitignore\r\n\r\n* Update default stylesheets\r\n\r\n* Add matplotlib font manager support\r\n\r\n* Add matplotlib font manager support\r\n\r\n* Update docstrings and default style\r\n\r\n* Update stocks candle chart formatting (return fig to style title)\r\n\r\n* Style common ta overlap view\r\n\r\n* Make up and down market colors a part of the style helper\r\n\r\n* Update stylesheets\r\n\r\n* Style common ta volume view\r\n\r\n* Style common ta momentum view\r\n\r\n* Style common ta trend indicators view\r\n\r\n* Style common ta volatility view\r\n\r\n* Style common ta volume view\r\n\r\n* Style common ta custom indicators view\r\n\r\n* Fix styling bugs and remove the obvious time x lablel\r\n\r\n* Style charts in the covid menu\r\n\r\n* Set legend position to upper left in the mpl stylesheet\r\n\r\n* Add mpl_rcparams configs for parameters not covered by stylesheets\r\n\r\n* Remove font configuration files\r\n\r\n* Update style class utility functions\r\n\r\n* Implement passing external axes and style utility usage in ema & stoch\r\n\r\n* Add theme watermark and output helpers\r\n\r\n* Rename style to theme\r\n\r\n* Update helper usage in ta/ma and ta/stoch\r\n\r\n* Update style to theme in sample menus\r\n\r\n* Style forex (#1305)\r\n\r\n* Make tight layout optional 'cause mplfinance doesn't support it\r\n\r\n* Apply global style to the forex menu\r\n\r\n* Update code layout in oanda view and black\r\n\r\n* Style common TA (#1315)\r\n\r\n* Make tight layout optional 'cause mplfinance doesn't support it\r\n\r\n* Apply global style to the forex menu\r\n\r\n* Add linewidth to theme for use in mpf's addplots\r\n\r\n* Add vwap to the stocks notebook api\r\n\r\n* Update common/ta overlap to follow charting style\r\n\r\n* Apply style on TerminalStyle init\r\n\r\n* Enable infrastructure for excluding non-trading days from plots\r\n\r\n* Update notebook api to include there and resolve bandit warning\r\n\r\n* Update ta/common/overlap to exclude non-trading days\r\n\r\n* Enable external ax, style and non-trading days in common/ta/momentum\r\n\r\n* Enable external ax, style and non-trading days in common/ta/trend\r\n\r\n* Update vwap to the argument naming convention\r\n\r\n* Enable external ax, style and non-trading days in common/ta/volatility\r\n\r\n* Enable external ax, style and non-trading days in common/ta/volume\r\n\r\n* Enable external ax, style and non-trading days in common/ta/custom\r\n\r\n* Fix controller tests\r\n\r\n* Forgot to disable rewriting of the cassettes ...\r\n\r\n* Fix controller errors that came up because a merge conflict\r\n\r\n* Fix price label position on fib\r\n\r\n* Fix line having wrong x values in fib\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\n\r\n* Style economy (#1308)\r\n\r\n* Began converting\r\n\r\n* Added alphavan_view\r\n\r\n* Added CNN View\r\n\r\n* Updated nasdaq view, fixed glitch\r\n\r\n* Added fred\r\n\r\n* Refactored URL\r\n\r\n* Theo's requested changes\r\n\r\n* Updated docstrings\r\n\r\n* Updated tests\r\n\r\n* Fixed pylint\r\n\r\n* Fixed tests\r\n\r\n* Theo changes\r\n\r\n* Econ Fix\r\n\r\n* Refactor chart style for Crypto context (#1306)\r\n\r\n* Remove mock for gff\r\n\r\n* Mock visualize_output helper function\r\n\r\n* Refactor\r\n\r\n* Fix plot helper\r\n\r\n* Update legend loc\r\n\r\n* Refactor mplfinance candle plot\r\n\r\n* Fix errors in the helper function\r\n\r\n* Fix binbook having the wrong call_ function name\r\n\r\n* Remove hardcoded style params\r\n\r\n* Resolve kwargs future warning from pandas\r\n\r\n* Remove warnings import\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* funds + custom (#1311)\r\n\r\n* funds + custom\r\n\r\n* cleanup cleanup everybody everywhere\r\n\r\n* Fix external axes conditional and a typo\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Add external axes mode to covid charts (#1328)\r\n\r\n* Add portfolio menu plots (#1318)\r\n\r\n* Portfolio view plots (commenting out report stuff)\r\n\r\n* PA Menu broken. Commenting out and fix tests\r\n\r\n* portfolio optimization\r\n\r\n* comment out commented api line\r\n\r\n* Add notes on disabling the pa submenu\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Plot updates in common BA (#1335)\r\n\r\n* Add external axes support to common/ba/finbrain\r\n\r\n* Add external axes support to common/ba/twitter\r\n\r\n* Add external axes support to common/ba/google\r\n\r\n* Add external axes support to common/ba/sentimentinvestor\r\n\r\n* Add sentimentinvestor to the notebooks API\r\n\r\n* Fix tests\r\n\r\n* Etf refactor (#1323)\r\n\r\n* Refactored no ETF\r\n\r\n* Fixed gtff import\r\n\r\n* Fixed tests\r\n\r\n* Fix pie chart style\r\n\r\n* Refactored etf/candle\r\n\r\n* Added pylint fix\r\n\r\n* Fixed tests\r\n\r\n* Update candle chart layout\r\n\r\n* Update etf controller test\r\n\r\n* Remove strange binary file\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Expose ETF candle function in the notebooks API\r\n\r\n* Common BA and Common QA charts update (#1342)\r\n\r\n* Add external axes support to common/ba/finbrain\r\n\r\n* Add external axes support to common/ba/twitter\r\n\r\n* Add external axes support to common/ba/google\r\n\r\n* Add external axes support to common/ba/sentimentinvestor\r\n\r\n* Add sentimentinvestor to the notebooks API\r\n\r\n* Fix tests\r\n\r\n* Update stylesheet files\r\n\r\n* Refactor charts for common/qa\r\n\r\n* Update the forgotten line plot\r\n\r\n* Update tests\r\n\r\n* Add missing arg to a docstring\r\n\r\n* Remove scientific notation\r\n\r\n* Black imports\r\n\r\nCo-authored-by: Minh Hoang \r\n\r\n* Options refactor (#1324)\r\n\r\n* Fixed alphaquery_view\r\n\r\n* finished options\r\n\r\n* Fixed pylint\r\n\r\n* Fixed tests\r\n\r\n* Fixed tests\r\n\r\n* Fixed tests\r\n\r\n* update yfinance\r\n\r\n* Tradier + Chartexchange\r\n\r\n* change mocks from gtff to theme.visualize output\r\n\r\n* tests\r\n\r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: james \r\n\r\n* Refactor Stocks menu (#1325)\r\n\r\n* Fix backtesting menu\r\n\r\n* Refactor comparison analysis\r\n\r\n* Refactor Dark pool shorts\r\n\r\n* Refactor rest of menu\r\n\r\n* Fix test\r\n\r\n* Fix tests failing\r\n\r\n* Fix tests fail\r\n\r\n* Fix test failing\r\n\r\n* Remove record mode=none to record new output\r\n\r\n* Rewrite test output\r\n\r\n* Rewrite test outputs\r\n\r\n* Adding more rewritten test output\r\n\r\n* Mock plt.show\r\n\r\n* Mock missing plt.show\r\n\r\n* Missing @pytest.mark.vcr\r\n\r\n* Updating tests : common/behavioural_analysis/finbrain\r\n\r\n* Improve notebooks API coverage for CA and DPS\r\n\r\n* Silence annoying flake8 warning\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Charts update for common/pred (#1344)\r\n\r\n* Add external axes support to common/ba/finbrain\r\n\r\n* Add external axes support to common/ba/twitter\r\n\r\n* Add external axes support to common/ba/google\r\n\r\n* Add external axes support to common/ba/sentimentinvestor\r\n\r\n* Add sentimentinvestor to the notebooks API\r\n\r\n* Fix tests\r\n\r\n* Update stylesheet files\r\n\r\n* Refactor charts for common/qa\r\n\r\n* Update the forgotten line plot\r\n\r\n* Update tests\r\n\r\n* Add missing arg to a docstring\r\n\r\n* Style pred helper and controllers\r\n\r\n* Update ETS plot\r\n\r\n* Update plots in KNN and pred helper\r\n\r\n* Update plot and pretty table for arima\r\n\r\n* Update plot for common/pred/regression\r\n\r\n* Refactor mc_view\r\n\r\n* Fix linting\r\n\r\n* Fix mypy\r\n\r\n* Move plot title to the axis level to make more vertical space\r\n\r\nCo-authored-by: Minh Hoang \r\nCo-authored-by: jmaslek \r\n\r\n* linter\r\n\r\n* Update common/ba test data\r\n\r\n* Change etf candle to match stock candle\r\n\r\n* try updating sia test\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: jmaslek \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: Minh Hoang \r\nCo-authored-by: Chavithra PARANA ", "code": "def test_get_option_chains_invalid_status(mocker):\n mock_response = requests.Response()\n mock_response.status_code = 400\n mocker.patch(target=\"requests.get\", new=mocker.Mock(return_value=mock_response))\n\n result_df = tradier_model.get_option_chains(symbol=\"AAPL\", expiry=\"2022-02-25\")\n\n assert result_df.empty\n\n\n@pytest.mark.vcr", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 30, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 54, "n_ast_nodes": 101, "n_identifiers": 20, "random_cut": "def test_get_option_chains_invalid_status(mocker):\n mock_response = requests.Response()\n mock_response.status_code = 400\n mocker.patch(target=\"requests.get\", new=mocker.Mock(return_value=mock_response))\n\n result_df = tradier_model.get_option_chains(symbol=\"AAPL\", expiry=\"2022-02-25\")\n\n as" }, { "id": 28554, "commit_id": "c464bf733d0d29ff79fbf7d3e22c4450c0eb45bb", "repo": "saleor", "path": "saleor/warehouse/tests/test_stock_management.py", "file_name": "test_stock_management.py", "fun_name": "test_allocate_stocks", "commit_message": "[RFC] Sorting warehouses within channel (#10416)\n\n* Add spec for sorting warehouses within shipping zone\r\n\r\n* Alter warehouse-channel relation through model\r\n\r\n* Add ChannelReorderWarehouses mutation\r\n\r\n* Improve channelReorderWarehouses mutation and extend tests\r\n\r\n* Clean up\r\n\r\n* Add preview label to ChannelReorderWarehouses mutation\r\n\r\n* Add ADDED_IN_37 label in ChannelReorderWarehouses mutation\r\n\r\n* Add spec for sorting warehouses within shipping zone\r\n\r\n* Extend Channel model with allocation_strategy field\r\n\r\n* Add temporay default value for channel.allocation_strategy\r\n\r\n* Extend the Channel type with allocationSettings field\r\n\r\n* Allow defining allocationSettings in channelCreate and channelUpdate mutation\r\n\r\n* Remove default value for Channel.allocation_strategy model\r\n\r\n* Add ADDED_IN_37 label\r\n\r\n* Set PRIORITIZE_SORTING_ORDER as default channel.allocation_strategy\r\n\r\n* Do not require allocationSettings in channelCreate mutation\r\n\r\n* Update saleor/graphql/channel/types.py\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>\r\n\r\n* Small refactor of allocate_stocks method\r\n\r\n* Change channel_slug to channel parameter in allocate_stocks method\r\n\r\n* Handle allocation strategies in stock allocation\r\n\r\n* Make sort_stocks public and add typing\r\n\r\n* Replace channel_slug parameter with channel in reserve_stocks method\r\n\r\n* Handle allocation strategies in stock reservation\r\n\r\n* Rename allocationSettings to stockSettings\r\n\r\n* Update changelog\r\n\r\n* Update StockSettings and AllocationStrategyEnum descriptions\r\n\r\n* Update moves description in ChannelReorderWarehouses mutation\r\n\r\n* Fix sorting stocks by PRIORITIZE_SORTING_ORDER strategy\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def test_allocate_stocks(order_line, stock, channel_USD):\n stock.quantity = 100\n stock.save(update_fields=[\"quantity\"])\n\n line_data = OrderLineInfo(line=order_line, variant=order_line.variant, quantity=50)\n\n allocate_stocks(\n [line_data], COUNTRY_CODE, channel_USD, manager=get_plugins_manager()\n )\n\n stock.refresh_from_db()\n assert stock.quantity == 100\n assert stock.quantity == 100\n allocation = Allocation.objects.get(order_line=order_line, stock=stock)\n assert allocation.quantity_allocated == stock.quantity_allocated == 50\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 74, "n_words": 38, "vocab_size": 27, "complexity": 1, "nloc": 12, "token_counts": 101, "n_ast_nodes": 153, "n_identifiers": 21, "random_cut": "def test_allocate_stocks(order_line, stock, channel_USD):\n stock.quantity = 100\n stock.save(update_fields=[\"quantity\"])\n\n line_data = OrderLineInfo(line=order_line, variant=order_line.var" }, { "id": 420, "commit_id": "cb633cfba63ae52fa28d3d48350fd1da8c22ed8b", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/node_service/generic_payload/syft_message.py", "file_name": "syft_message.py", "fun_name": "payload", "commit_message": "ADD Pydantic Payload, RequestPayload and Response payload", "code": "def payload(self) -> Payload:\n kwargs_dict = {}\n\n if hasattr(self.kwargs, \"upcast\"):\n kwargs_dict = self.kwargs.upcast() # type: ignore\n else:\n kwargs_dict = self.kwargs # type: ignore\n\n # If it's not a reply message then load kwargs as a proper request payload.\n if not self.reply:\n return self.request_payload_type(**kwargs_dict)\n # If it's a reply message, then load kwargs as a proper reply payload.\n else:\n return self.reply_payload_type(**kwargs_dict)\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 154, "n_words": 60, "vocab_size": 34, "complexity": 3, "nloc": 10, "token_counts": 61, "n_ast_nodes": 107, "n_identifiers": 10, "random_cut": "def payload(self) -> Payload:\n kwargs_dict = {}\n\n if hasattr(self.kwargs, \"upcast\"):\n kwargs_dict = self.kwargs.upcast() # type: ignore\n else:\n" }, { "id": 64744, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/account/chart_of_accounts/chart_of_accounts.py", "file_name": "chart_of_accounts.py", "fun_name": "get_chart", "commit_message": "style: format code with black", "code": "def get_chart(chart_template, existing_company=None):\n\tchart = {}\n\tif existing_company:\n\t\treturn get_account_tree_from_existing_company(existing_company)\n\n\telif chart_template == \"Standard\":\n\t\tfrom erpnext.accounts.doctype.account.chart_of_accounts.verified import (\n\t\t\tstandard_chart_of_accounts,\n\t\t)\n\n\t\treturn standard_chart_of_accounts.get()\n\telif chart_template == \"Standard with Numbers\":\n\t\tfrom erpnext.accounts.doctype.account.chart_of_accounts.verified import (\n\t\t\tstandard_chart_of_accounts_with_account_number,\n\t\t)\n\n\t\treturn standard_chart_of_accounts_with_account_number.get()\n\telse:\n\t\tfolders = (\"verified\",)\n\t\tif frappe.local.flags.allow_unverified_charts:\n\t\t\tfolders = (\"verified\", \"unverified\")\n\t\tfor folder in folders:\n\t\t\tpath = os.path.join(os.path.dirname(__file__), folder)\n\t\t\tfor fname in os.listdir(path):\n\t\t\t\tfname = frappe.as_unicode(fname)\n\t\t\t\tif fname.endswith(\".json\"):\n\t\t\t\t\twith open(os.path.join(path, fname), \"r\") as f:\n\t\t\t\t\t\tchart = f.read()\n\t\t\t\t\t\tif chart and json.loads(chart).get(\"name\") == chart_template:\n\t\t\t\t\t\t\treturn json.loads(chart).get(\"tree\")\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 22, "n_whitespaces": 53, "n_words": 81, "vocab_size": 54, "complexity": 10, "nloc": 27, "token_counts": 206, "n_ast_nodes": 342, "n_identifiers": 35, "random_cut": "def get_chart(chart_template, existing_company=None):\n\tchart = {}\n\tif existing_company:\n\t\treturn get_account_tree_from_existing_company(existing_company)\n\n\telif chart_template == \"Standard\":\n\t\tfrom erpnext.accounts.doctype.account.chart_of_accounts.verified import (\n\t\t\tstandard_chart_of_accounts,\n\t\t)\n\n\t\treturn standard_chart_of_accounts.get()\n\telif chart_template == \"Standard with Numbers\":\n\t\tfrom erpnext.accounts.doctype.account.chart_of_accounts.verified import (\n\t\t\tstandard_chart_of_accounts_with_account_number,\n\t\t)\n\n\t\treturn standard_chart_of_accounts_with_account_number.get()\n\telse:\n\t\tfolders = (\"verified\",)\n\t\tif frappe.local.flags.allow_unverified_charts:\n\t\t\tfolders = (\"verified\", \"unverified\")\n\t\tfor folder in folders:\n\t\t\tpath = os.path.join(os.path.dirname(__file__), folder)\n\t\t\tfor fname in os.listdir(path):\n\t\t\t\tfname = frappe.as_unicode(fname)\n\t\t\t\tif fname.endswith(\".json\"):\n\t\t\t\t\twith open(os.path.join(path, fname), \"r\") as f:\n\t\t\t\t\t\tchart = f.read()\n\t\t\t\t\t\tif chart and json.loads(chart).get(\"name\") == chart_template:\n\t\t\t\t\t\t\treturn json.loads(chart).get(\"tree\")\n\n\n@frappe.whiteli" }, { "id": 295820, "commit_id": "e6d8aa34fa34d7f8e45280e4cc545d2ba15fd117", "repo": "core", "path": "homeassistant/components/iaqualink/light.py", "file_name": "light.py", "fun_name": "supported_features", "commit_message": "Use EntityFeature enum in components (i**) (#69409)", "code": "def supported_features(self) -> int:\n \n if self.dev.is_color:\n return LightEntityFeature.EFFECT\n\n return 0\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 42, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 5, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "def supported_features(self) -> int:\n \n if self.dev.is_color:\n return LightEntityFeature.EFFECT\n\n ret" }, { "id": 75237, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_image_operations.py", "file_name": "test_image_operations.py", "fun_name": "resize", "commit_message": "Reformat with black", "code": "def resize(self, size):\n \n self._check_size(size)\n clone = self.clone()\n clone.operations.append((\"resize\", size))\n clone.size = size\n return clone\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 40, "n_ast_nodes": 68, "n_identifiers": 7, "random_cut": "def resize(self, size):\n \n self._check_size(size)\n clone = self.clone()\n clone.operations.append((\"resize\", size))\n clone.size = siz" }, { "id": 111606, "commit_id": "c223cd7a86f460f3dabb9e7369eef136a653218e", "repo": "spaCy", "path": "spacy/tests/test_cli.py", "file_name": "test_cli.py", "fun_name": "test_applycli_user_data", "commit_message": "Add apply CLI (#11376)\n\n* annotate cli first try\r\n\r\n* add batch-size and n_process\r\n\r\n* rename to apply\r\n\r\n* typing fix\r\n\r\n* handle file suffixes\r\n\r\n* walk directories\r\n\r\n* support jsonl\r\n\r\n* typing fix\r\n\r\n* remove debug\r\n\r\n* make suffix optional for walk\r\n\r\n* revert unrelated\r\n\r\n* don't warn but raise\r\n\r\n* better error message\r\n\r\n* minor touch up\r\n\r\n* Update spacy/tests/test_cli.py\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Update spacy/cli/apply.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Update spacy/cli/apply.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* update tests and bugfix\r\n\r\n* add force_overwrite\r\n\r\n* typo\r\n\r\n* fix adding .spacy suffix\r\n\r\n* Update spacy/cli/apply.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Update spacy/cli/apply.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Update spacy/cli/apply.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* store user data and rename cmd arg\r\n\r\n* include test for user attr\r\n\r\n* rename cmd arg\r\n\r\n* better help message\r\n\r\n* documentation\r\n\r\n* prettier\r\n\r\n* black\r\n\r\n* link fix\r\n\r\n* Update spacy/cli/apply.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update website/docs/api/cli.md\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update website/docs/api/cli.md\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update website/docs/api/cli.md\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* addressing reviews\r\n\r\n* dont quit but warn\r\n\r\n* prettier\r\n\r\nCo-authored-by: Adriane Boyd \r\nCo-authored-by: Sofie Van Landeghem \r\nCo-authored-by: Paul O'Leary McCann ", "code": "def test_applycli_user_data():\n Doc.set_extension(\"ext\", default=0)\n val = (\"ext\", 0)\n with make_tempdir() as data_path:\n output = data_path / \"testout.spacy\"\n nlp = spacy.blank(\"en\")\n doc = nlp(\"testing apply cli.\")\n doc._.ext = val\n docbin = DocBin(store_user_data=True)\n docbin.add(doc)\n docbin.to_disk(data_path / \"testin.spacy\")\n apply(data_path, output, \"blank:en\", \"\", 1, 1)\n result = list(DocBin().from_disk(output).get_docs(nlp.vocab))\n assert result[0]._.ext == val\n\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 126, "n_words": 48, "vocab_size": 39, "complexity": 1, "nloc": 14, "token_counts": 121, "n_ast_nodes": 208, "n_identifiers": 25, "random_cut": "def test_applycli_user_data():\n Doc.set_extension(\"ext\", default=0)\n val = (\"ext\", 0)\n with mak" }, { "id": 257398, "commit_id": "7caca41c5d3fc625af4df68e237db5df6b02b724", "repo": "haystack", "path": "test/pipelines/test_eval.py", "file_name": "test_eval.py", "fun_name": "test_extractive_qa_eval_isolated", "commit_message": "Support context matching in `pipeline.eval()` (#2482)\n\n* calculate context pred metrics\r\n\r\n* Update Documentation & Code Style\r\n\r\n* extend doc_relevance_col values\r\n\r\n* fix import order\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix mypy\r\n\r\n* fix typings literal import\r\n\r\n* add option for custom document_id_field\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix tests and dataframe col-order\r\n\r\n* Update Documentation & Code Style\r\n\r\n* rename content to context in eval dataframe\r\n\r\n* add backward compatibility to EvaluationResult.load()\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add docstrings\r\n\r\n* Update Documentation & Code Style\r\n\r\n* support sas\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add answer_scope param\r\n\r\n* Update Documentation & Code Style\r\n\r\n* rework doc_relevance_col and keep document_id col in case of custom_document_id_field\r\n\r\n* Update Documentation & Code Style\r\n\r\n* improve docstrings\r\n\r\n* Update Documentation & Code Style\r\n\r\n* rename document_relevance_criterion into document_scope\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add document_scope and answer_scope to print_eval_report\r\n\r\n* support all new features in execute_eval_run()\r\n\r\n* fix imports\r\n\r\n* fix mypy\r\n\r\n* Update Documentation & Code Style\r\n\r\n* rename pred_label_sas_grid into pred_label_matrix\r\n\r\n* update dataframe schema and sorting\r\n\r\n* Update Documentation & Code Style\r\n\r\n* pass through context_matching params and extend document_scope test\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add answer_scope tests\r\n\r\n* fix context_matching_threshold for document metrics\r\n\r\n* shorten dataframe apply calls\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix queries getting lost if nothing was retrieved\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Update Documentation & Code Style\r\n\r\n* use document_id scopes\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix answer_scope literal\r\n\r\n* Update Documentation & Code Style\r\n\r\n* update the docs (lg changes)\r\n\r\n* Update Documentation & Code Style\r\n\r\n* update tutorial 5\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix tests\r\n\r\n* Add minor lg updates\r\n\r\n* final docstring changes\r\n\r\n* fix single quotes in docstrings\r\n\r\n* Update Documentation & Code Style\r\n\r\n* dataframe scopes added for each column\r\n\r\n* better docstrings for context_matching params\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix summarizer eval test\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix test\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>\r\nCo-authored-by: agnieszka-m ", "code": "def test_extractive_qa_eval_isolated(reader, retriever_with_docs):\n pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)\n eval_result: EvaluationResult = pipeline.eval(\n labels=EVAL_LABELS,\n sas_model_name_or_path=\"sentence-transformers/paraphrase-MiniLM-L3-v2\",\n add_isolated_node_eval=True,\n )\n\n metrics_top_1 = eval_result.calculate_metrics(simulated_top_k_reader=1, document_scope=\"document_id\")\n\n assert metrics_top_1[\"Reader\"][\"exact_match\"] == 0.5\n assert metrics_top_1[\"Reader\"][\"f1\"] == 0.5\n assert metrics_top_1[\"Reader\"][\"sas\"] == pytest.approx(0.5833, abs=1e-4)\n assert metrics_top_1[\"Retriever\"][\"mrr\"] == 0.5\n assert metrics_top_1[\"Retriever\"][\"map\"] == 0.5\n assert metrics_top_1[\"Retriever\"][\"recall_multi_hit\"] == 0.5\n assert metrics_top_1[\"Retriever\"][\"recall_single_hit\"] == 0.5\n assert metrics_top_1[\"Retriever\"][\"precision\"] == 1.0 / 10\n assert metrics_top_1[\"Retriever\"][\"ndcg\"] == 0.5\n\n metrics_top_1 = eval_result.calculate_metrics(simulated_top_k_reader=1, eval_mode=\"isolated\")\n\n assert metrics_top_1[\"Reader\"][\"exact_match\"] == 1.0\n assert metrics_top_1[\"Reader\"][\"f1\"] == 1.0\n assert metrics_top_1[\"Reader\"][\"sas\"] == pytest.approx(1.0, abs=1e-4)\n\n\n@pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True)\n@pytest.mark.parametrize(\"document_store_with_docs\", [\"memory\"], indirect=True)\n@pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True)", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True)\n@pytest.mark.parametrize(\"document_store_with_docs\", [\"memory\"], indirect=True)\n@pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 152, "n_words": 84, "vocab_size": 43, "complexity": 1, "nloc": 21, "token_counts": 236, "n_ast_nodes": 437, "n_identifiers": 24, "random_cut": "def test_extractive_qa_eval_isolated(reader, retriever_with_docs):\n pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)\n eval_result: EvaluationResult = pipeline.eval(\n labels=EVAL_LABELS,\n sas_model_name_or_path=\"sentence-transformers/paraphrase-MiniLM-L3-v2\",\n add_isolated_node_eval=True,\n )\n\n metrics_top_1 = eval_result.calculate_metrics(simulated_top_k_reader=1, document_scope=\"document_id\")\n\n assert metrics_top_1[\"Reader\"][\"exact_match\"] == 0.5\n assert metrics_top_1[\"Reader\"][\"f1\"] == 0.5\n assert metrics_top_1[\"Reader\"][\"sas\"] == pytest.approx(0.5833, abs=1e-4)\n assert metrics_top_1[\"Retriever\"][\"mrr\"] == 0.5\n assert metrics_top_1[\"Retriever\"][\"map\"] == 0.5\n assert metrics_top_1[\"Retriever\"][\"recall_multi_hit\"] == 0.5\n assert metrics_top_1[\"Retriever\"][\"recall_single_hit\"] == 0.5\n assert metrics_top_1[\"Retriever\"][\"precision\"] == 1.0 / 10\n assert metrics_top_1[\"Retriever\"][\"ndcg\"] == 0.5\n\n metrics_top_1 = eval_result.calculate_metrics(simulated_top_k_reader=1, eval_mode=\"isolated\")\n\n assert metrics_top_1[\"Reader\"][\"exact_match\"] == 1.0\n assert metrics_top_1[\"Reader\"][\"f1\"] == 1.0\n assert metrics_top_1[\"Reader\"][\"sas\"] == pytest.approx(1.0, abs=1e-4)\n\n\n@pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True)\n@pytest.mark.parametrize(\"document_store_with_docs\", [\"memory\"], indirect=True)\n@pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True)" }, { "id": 100068, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_rules_configuration.py", "file_name": "test_project_rules_configuration.py", "fun_name": "run_mock_rules_test", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def run_mock_rules_test(self, expected_actions, querystring_params, rules=None):\n if not rules:\n rules = self.rules\n with patch(\"sentry.api.endpoints.project_rules_configuration.rules\", rules):\n response = self.get_success_response(\n self.organization.slug, self.project.slug, qs_params=querystring_params\n )\n\n assert len(response.data[\"actions\"]) == expected_actions\n assert len(response.data[\"conditions\"]) == 0\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 112, "n_words": 29, "vocab_size": 26, "complexity": 2, "nloc": 9, "token_counts": 76, "n_ast_nodes": 121, "n_identifiers": 14, "random_cut": "def run_mock_rules_test(self, expected_actions, querystring_params, rules=None):\n if not rules:\n rules = self.rules\n with patch(\"sentry.api.endpoints.project_rules_configuration.rules\", rules):\n response = self.get_success_response(\n self.organization.slug, self.project.slug, qs_params=" }, { "id": 114449, "commit_id": "4b49bf89ad95fcc645a249983efd764c1a73e3bb", "repo": "mindsdb", "path": "mindsdb/integrations/lightwood_handler/lightwood_handler/lightwood_handler.py", "file_name": "lightwood_handler.py", "fun_name": "check_status", "commit_message": "copy from /lightwood_handler branch: added generic serializer for dill support, lw integration functional", "code": "def check_status(self) -> Dict[str, int]:\n # noqa\n # todo: potentially nothing to do here, as we can assume user to install requirements first\n try:\n import lightwood\n year, major, minor, hotfix = lightwood.__version__.split('.')\n assert int(year) >= 22\n assert int(major) >= 2\n assert int(minor) >= 3\n print(\"Lightwood OK!\")\n return {'status': '200'}\n except AssertionError as e:\n print(\"Cannot import lightwood!\")\n return {'status': '503', 'error': e}\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 189, "n_words": 61, "vocab_size": 51, "complexity": 2, "nloc": 13, "token_counts": 83, "n_ast_nodes": 150, "n_identifiers": 15, "random_cut": "def check_status(self) -> Dict[str, int]:\n # noqa\n # " }, { "id": 216705, "commit_id": "81e08e9dd22299cfdffa5d657183cd7ea62e70f6", "repo": "Open-Assistant", "path": "bot/channel_handlers.py", "file_name": "channel_handlers.py", "fun_name": "read", "commit_message": "add teaser msgs & remaining task handling", "code": "async def read(self) -> discord.Message:\n \n if self.expired:\n raise ChannelExpiredException()\n\n msg = await self.queue.get()\n if msg is None:\n if self.expired:\n raise ChannelExpiredException()\n else:\n raise RuntimeError(\"Unexpected None message read\")\n return msg\n", "url": "https://github.com/LAION-AI/Open-Assistant.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 127, "n_words": 29, "vocab_size": 21, "complexity": 4, "nloc": 11, "token_counts": 52, "n_ast_nodes": 92, "n_identifiers": 10, "random_cut": "async def read(self) -> discord.Message:\n \n if self.expired:\n raise Chann" }, { "id": 126302, "commit_id": "c624d04842b6fc1bbd7279e18287d51fd801218b", "repo": "ray", "path": "dashboard/modules/job/tests/test_http_job_server.py", "file_name": "test_http_job_server.py", "fun_name": "test_submit_still_accepts_job_id_or_submission_id", "commit_message": "Add back job_id to submit_job API to maintain backwards-compatibility (#27110)\n\nFix for a unintentional backwards-compatibility breakage for #25902\r\njob submit api should still accept job_id as a parameter\r\n\r\nSigned-off-by: Alan Guo aguo@anyscale.com", "code": "def test_submit_still_accepts_job_id_or_submission_id(job_sdk_client):\n \n client = job_sdk_client\n\n client._do_request(\n \"POST\",\n \"/api/jobs/\",\n json_data={\"entrypoint\": \"ls\", \"job_id\": \"raysubmit_12345\"},\n )\n\n wait_for_condition(_check_job_succeeded, client=client, job_id=\"raysubmit_12345\")\n\n client._do_request(\n \"POST\",\n \"/api/jobs/\",\n json_data={\"entrypoint\": \"ls\", \"submission_id\": \"raysubmit_23456\"},\n )\n\n wait_for_condition(_check_job_succeeded, client=client, job_id=\"raysubmit_23456\")\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 93, "n_words": 27, "vocab_size": 19, "complexity": 1, "nloc": 14, "token_counts": 75, "n_ast_nodes": 138, "n_identifiers": 8, "random_cut": "def test_submit_still_accepts_job_id_or_submission_id(job_sdk_client):\n \n " }, { "id": 197458, "commit_id": "9a3ffc6781bd44c47cf49e128ef154389c32876a", "repo": "sympy", "path": "sympy/physics/vector/vector.py", "file_name": "vector.py", "fun_name": "diff", "commit_message": "Some pep8 cleanup of sympy.physics.vector.", "code": "def diff(self, var, frame, var_in_dcm=True):\n \n\n from sympy.physics.vector.frame import _check_frame\n\n var = sympify(var)\n _check_frame(frame)\n\n inlist = []\n\n for vector_component in self.args:\n measure_number = vector_component[0]\n component_frame = vector_component[1]\n if component_frame == frame:\n inlist += [(measure_number.diff(var), frame)]\n else:\n # If the direction cosine matrix relating the component frame\n # with the derivative frame does not contain the variable.\n if not var_in_dcm or (frame.dcm(component_frame).diff(var) ==\n zeros(3, 3)):\n inlist += [(measure_number.diff(var), component_frame)]\n else: # else express in the frame\n reexp_vec_comp = Vector([vector_component]).express(frame)\n deriv = reexp_vec_comp.args[0][0].diff(var)\n inlist += Vector([(deriv, frame)]).args\n\n return Vector(inlist)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 368, "n_words": 86, "vocab_size": 61, "complexity": 5, "nloc": 19, "token_counts": 167, "n_ast_nodes": 261, "n_identifiers": 21, "random_cut": "def diff(self, var, frame, var_in_dcm=True):\n \n\n from sympy.physics.vector.frame import _check_frame\n\n var = sympify(var)\n _check_frame(frame)\n\n inlist = []\n\n for vector_component in self.args:\n measure_number = vector_component[0]\n component_frame = vector_component[1]\n if component_frame == frame:\n inlist += [(measure_number.diff(var), frame)]\n else:\n # If the direction cosine matrix relating the component frame\n # with the derivative frame does not contain the variable.\n if not var_in_dcm or (frame.dcm(component_frame).diff(var) ==\n zeros(3, 3)):\n inlist += [(measure_number.diff(var), component_frame)]\n else: # else express in the frame\n reexp_vec_comp = Vector([vector_component]).express(frame)\n deriv = reexp_v" }, { "id": 167861, "commit_id": "f74a1866eeb3593498ae5aff8728255a49724e1c", "repo": "pandas", "path": "pandas/tests/io/excel/test_style.py", "file_name": "test_style.py", "fun_name": "test_styler_to_s3", "commit_message": "BUG: Add `storage_option` parameter to to_excel method in Styler (#46491)\n\nCo-authored-by: Jeff Reback ", "code": "def test_styler_to_s3(s3_resource, s3so):\n # GH#46381\n\n mock_bucket_name, target_file = \"pandas-test\", \"test.xlsx\"\n df = DataFrame({\"x\": [1, 2, 3], \"y\": [2, 4, 6]})\n styler = df.style.set_sticky(axis=\"index\")\n styler.to_excel(f\"s3://{mock_bucket_name}/{target_file}\", storage_options=s3so)\n timeout = 5\n while True:\n if target_file in (\n obj.key for obj in s3_resource.Bucket(\"pandas-test\").objects.all()\n ):\n break\n time.sleep(0.1)\n timeout -= 0.1\n assert timeout > 0, \"Timed out waiting for file to appear on moto\"\n result = read_excel(\n f\"s3://{mock_bucket_name}/{target_file}\", index_col=0, storage_options=s3so\n )\n tm.assert_frame_equal(result, df)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 176, "n_words": 67, "vocab_size": 58, "complexity": 4, "nloc": 18, "token_counts": 136, "n_ast_nodes": 230, "n_identifiers": 26, "random_cut": "def test_styler_to_s3(s3_resource, s3so):\n " }, { "id": 292935, "commit_id": "9c440d8aa64ee4df9f8c4b8594b5f55703f82445", "repo": "core", "path": "homeassistant/components/alexa/capabilities.py", "file_name": "capabilities.py", "fun_name": "get_valid_inputs", "commit_message": "Guard for non-string inputs in Alexa (#67348)", "code": "def get_valid_inputs(source_list):\n \n input_list = []\n for source in source_list:\n if not isinstance(source, str):\n continue\n formatted_source = (\n source.lower().replace(\"-\", \"\").replace(\"_\", \"\").replace(\" \", \"\")\n )\n if formatted_source in Inputs.VALID_SOURCE_NAME_MAP:\n input_list.append(\n {\"name\": Inputs.VALID_SOURCE_NAME_MAP[formatted_source]}\n )\n\n return input_list\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 184, "n_words": 33, "vocab_size": 27, "complexity": 4, "nloc": 13, "token_counts": 79, "n_ast_nodes": 139, "n_identifiers": 12, "random_cut": "def get_valid_inputs(source_list):\n \n input_list = []\n for source in source_list:\n if not isinstance(source, str" }, { "id": 64659, "commit_id": "1b2c6a5b78d4ee2e31817eb78bb1f614b672eda4", "repo": "erpnext", "path": "erpnext/loan_management/doctype/loan_repayment/loan_repayment.py", "file_name": "loan_repayment.py", "fun_name": "calculate_amounts", "commit_message": "fix: Code cleanup", "code": "def calculate_amounts(against_loan, posting_date, payment_type=''):\n\tamounts = {\n\t\t'penalty_amount': 0.0,\n\t\t'interest_amount': 0.0,\n\t\t'pending_principal_amount': 0.0,\n\t\t'payable_principal_amount': 0.0,\n\t\t'payable_amount': 0.0,\n\t\t'unaccrued_interest': 0.0,\n\t\t'due_date': ''\n\t}\n\n\tamounts = get_amounts(amounts, against_loan, posting_date)\n\n\t# update values for closure\n\tif payment_type == 'Loan Closure':\n\t\tamounts['payable_principal_amount'] = amounts['pending_principal_amount']\n\t\tamounts['interest_amount'] += amounts['unaccrued_interest']\n\t\tamounts['payable_amount'] = amounts['payable_principal_amount'] + amounts['interest_amount'] + amounts['penalty_amount']\n\n\treturn amounts\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 35, "n_words": 52, "vocab_size": 39, "complexity": 2, "nloc": 16, "token_counts": 108, "n_ast_nodes": 172, "n_identifiers": 6, "random_cut": "def calculate_amounts(against_loan, posting_date, payment_type=''):\n\tamounts = {\n\t\t'penalty_amount': 0.0,\n\t\t'interest_amount': 0.0,\n\t\t'pending_principal_amount': 0.0,\n\t\t'payable_principal_amount': 0.0,\n\t\t'payable_amount': 0.0,\n\t\t'unaccrued_interest': 0.0,\n\t\t'due_date': ''\n\t}\n\n\tamounts = get_amounts(amounts, against_loan, posting_date)\n\n\t# update values for closure\n\tif payment_type == 'Loan Closure':\n\t\tamounts['payable_principal_amount'] = amounts['pending_principal_amount']\n\t\tam" }, { "id": 283989, "commit_id": "01391c256412f922561a0ac6ae52862355f39c12", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/alternative/oss/test_oss_controller.py", "file_name": "test_oss_controller.py", "fun_name": "test_print_help", "commit_message": "New Open Source menu (#1603)\n\n* adds os menu\r\n\r\n* remove tests\r\n\r\n* fixed tests\r\n\r\n* fixed date in sh\r\n\r\n* updated alt controller tests\r\n\r\n* updated api fecthing/handling\r\n\r\n* lint\r\n\r\n* updated website folder name and tests\r\n\r\n* alt tests\r\n\r\n* tests\r\n\r\n* tests\r\n\r\n* keys for github\r\n\r\n* updated tests\r\n\r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: Jeroen Bouma \r\nCo-authored-by: Theodore Aptekarev ", "code": "def test_print_help():\n controller = oss_controller.OSSController(queue=None)\n controller.print_help()\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"home\", \"help\"]),\n (\"help/help\", [\"help\", \"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\"quit\", \"quit\", \"reset\", \"alternative\", \"oss\"],\n ),\n ],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"an_input, expected_queue\",\n [\n (\"\", []),\n (\"/help\", [\"home\", \"help\"]),\n (\"help/help\", [\"help\", \"help\"]),\n (\"q\", [\"quit\"]),\n (\"h\", []),\n (\n \"r\",\n [\"quit\", \"quit\", \"reset\", \"alternative\", \"oss\"],\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 115, "n_words": 33, "vocab_size": 31, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 177, "n_identifiers": 11, "random_cut": "def test_print_help():\n controller = oss_controller.OSSController(queue=None)\n controller.print_help()\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametriz" }, { "id": 163375, "commit_id": "5ba7d714014ae8feaccc0dd4a98890828cf2832d", "repo": "pandas", "path": "pandas/tests/arrays/interval/test_interval.py", "file_name": "test_interval.py", "fun_name": "test_shift", "commit_message": "CLN: assorted, privatize, easy issues (#45305)", "code": "def test_shift(self):\n # https://github.com/pandas-dev/pandas/issues/31495, GH#22428, GH#31502\n a = IntervalArray.from_breaks([1, 2, 3])\n result = a.shift()\n # int -> float\n expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)])\n tm.assert_interval_array_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 67, "n_words": 26, "vocab_size": 23, "complexity": 1, "nloc": 5, "token_counts": 62, "n_ast_nodes": 89, "n_identifiers": 13, "random_cut": "def test_shift(self):\n # https://github.com/pandas-dev/pandas/issues/31495, GH#22428, GH#31502\n a = IntervalArray.from_breaks([1, 2, 3])\n result = a.shift()\n # int -> float\n ex" }, { "id": 295117, "commit_id": "69ee4cd978194e2074d4cd57ebbbf5a028f71f7a", "repo": "core", "path": "homeassistant/helpers/entity.py", "file_name": "entity.py", "fun_name": "_async_write_ha_state", "commit_message": "Deprecate temperature conversion in base entity class (#68978)\n\nCo-authored-by: Paulus Schoutsen ", "code": "def _async_write_ha_state(self) -> None:\n \n if self._platform_state == EntityPlatformState.REMOVED:\n # Polling returned after the entity has already been removed\n return\n\n if self.registry_entry and self.registry_entry.disabled_by:\n if not self._disabled_reported:\n self._disabled_reported = True\n assert self.platform is not None\n _LOGGER.warning(\n \"Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration\",\n self.entity_id,\n self.platform.platform_name,\n )\n return\n\n start = timer()\n\n attr = self.capability_attributes\n attr = dict(attr) if attr else {}\n\n available = self.available # only call self.available once per update cycle\n state = self._stringify_state(available)\n if available:\n attr.update(self.state_attributes or {})\n attr.update(self.extra_state_attributes or {})\n\n if (unit_of_measurement := self.unit_of_measurement) is not None:\n attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement\n\n entry = self.registry_entry\n\n if assumed_state := self.assumed_state:\n attr[ATTR_ASSUMED_STATE] = assumed_state\n\n if (attribution := self.attribution) is not None:\n attr[ATTR_ATTRIBUTION] = attribution\n\n if (\n device_class := (entry and entry.device_class) or self.device_class\n ) is not None:\n attr[ATTR_DEVICE_CLASS] = str(device_class)\n\n if (entity_picture := self.entity_picture) is not None:\n attr[ATTR_ENTITY_PICTURE] = entity_picture\n\n if (icon := (entry and entry.icon) or self.icon) is not None:\n attr[ATTR_ICON] = icon\n\n if (name := (entry and entry.name) or self.name) is not None:\n attr[ATTR_FRIENDLY_NAME] = name\n\n if (supported_features := self.supported_features) is not None:\n attr[ATTR_SUPPORTED_FEATURES] = supported_features\n\n end = timer()\n\n if end - start > 0.4 and not self._slow_reported:\n self._slow_reported = True\n report_issue = self._suggest_report_issue()\n _LOGGER.warning(\n \"Updating state for %s (%s) took %.3f seconds. Please %s\",\n self.entity_id,\n type(self),\n end - start,\n report_issue,\n )\n\n # Overwrite properties that have been set in the config file.\n if DATA_CUSTOMIZE in self.hass.data:\n attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 814, "n_words": 240, "vocab_size": 138, "complexity": 28, "nloc": 65, "token_counts": 445, "n_ast_nodes": 587, "n_identifiers": 51, "random_cut": "def _async_write_ha_state(self) -> None:\n \n if self._platform_state == EntityPlatformState.REMOVED:\n # Polling returned after the entity has already been removed\n return\n\n if self.registry_entry and self.registry_entry.disabled_by:\n if not self._disabled_reported:\n self._disabled_reported = True\n assert self.platform is not None\n _LOGGER.warning(\n \"Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration\",\n self.entity_id,\n self.platform.platform_name,\n )\n return\n\n start = timer()\n\n attr = self.capability_attributes\n attr = dict(attr) if attr else {}\n\n available = self.available # only call self.available once per update cycle\n state = self._stringify_state(available)\n if available:\n attr.update(self.state_attributes or {})\n attr.update(self.extra_state_attributes or {})\n\n if (unit_of_measurement := self.unit_of_measurement) is not None:\n attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement\n\n entry = self.registry_entry\n\n if assumed_state := self.assumed_state:\n attr[ATTR_ASSUMED_STATE] = assumed_state\n\n if (attribution := self.attribution) is not None:\n attr[ATTR_ATTRIBUTION] = attribution\n\n if (\n device_class := (entry and entry.device_class) or self.device_class\n ) is not None:\n attr[ATTR_DEVICE_CLASS] = str(device_class)\n\n if (entity_picture := self.entity_picture) is not None:\n attr[ATTR_ENTITY_PICTURE] = entity_picture\n\n if (icon := (entry and entry.icon) or self.icon) is not None:\n attr[ATTR_ICON] = icon\n\n if (name := (entry and entry.name) or self.name) is not None:\n attr[ATTR_FRIENDLY_NAME] = name\n\n if (supported_features := self.supported_features) is not None:\n attr[ATTR_SUPPORTED_FEATURES] = supported_features\n\n end = timer()\n\n if end - start > 0.4 and not self._slow_reported:\n self._slow_reported = True\n report_issue = self._suggest_report_issue()\n _LOGGER.warning(\n \"Updating state for %s (%s) took %.3f seconds. Please %s\",\n self.entity_id,\n type(self),\n end - start,\n report_issue,\n )\n\n # Overwrite properties that have been set in the config file.\n if DATA_CUSTOMIZE in self.hass.data:\n attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))\n" }, { "id": 296351, "commit_id": "c932407560d828eac683460daef4d5710c63ce76", "repo": "core", "path": "homeassistant/components/senz/climate.py", "file_name": "climate.py", "fun_name": "target_temperature", "commit_message": "Add SENZ OAuth2 integration (#61233)", "code": "def target_temperature(self) -> float:\n \n return self._thermostat.setpoint_temperature\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 5, "random_cut": "def target_temperature(self) -> float:\n \n " }, { "id": 16891, "commit_id": "37cf10f5c4bff230d5c72f4586516cb5a984f892", "repo": "ccxt", "path": "python/ccxt/async_support/bitstamp.py", "file_name": "bitstamp.py", "fun_name": "parse_ticker", "commit_message": "1.70.95\n\n[ci skip]", "code": "def parse_ticker(self, ticker, market=None):\n #\n # {\n # \"high\": \"37534.15\",\n # \"last\": \"36487.44\",\n # \"timestamp\":\n # \"1643370585\",\n # \"bid\": \"36475.15\",\n # \"vwap\": \"36595.67\",\n # \"volume\": \"2848.49168527\",\n # \"low\": \"35511.32\",\n # \"ask\": \"36487.44\",\n # \"open\": \"37179.62\"\n # }\n #\n symbol = self.safe_symbol(None, market)\n timestamp = self.safe_timestamp(ticker, 'timestamp')\n vwap = self.safe_string(ticker, 'vwap')\n baseVolume = self.safe_string(ticker, 'volume')\n quoteVolume = Precise.string_mul(baseVolume, vwap)\n last = self.safe_string(ticker, 'last')\n return self.safe_ticker({\n 'symbol': symbol,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'high': self.safe_string(ticker, 'high'),\n 'low': self.safe_string(ticker, 'low'),\n 'bid': self.safe_string(ticker, 'bid'),\n 'bidVolume': None,\n 'ask': self.safe_string(ticker, 'ask'),\n 'askVolume': None,\n 'vwap': vwap,\n 'open': self.safe_string(ticker, 'open'),\n 'close': last,\n 'last': last,\n 'previousClose': None,\n 'change': None,\n 'percentage': None,\n 'average': None,\n 'baseVolume': baseVolume,\n 'quoteVolume': quoteVolume,\n 'info': ticker,\n }, market, False)\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 525, "n_words": 112, "vocab_size": 79, "complexity": 1, "nloc": 29, "token_counts": 203, "n_ast_nodes": 353, "n_identifiers": 17, "random_cut": "def parse_ticker(self, ticker, market=None):\n #\n # {\n # \"high\": \"" }, { "id": 100049, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_details.py", "file_name": "test_project_details.py", "fun_name": "test_digests_delay", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_digests_delay(self):\n self.get_success_response(self.org_slug, self.proj_slug, digestsMinDelay=1000)\n assert self.project.get_option(\"digests:mail:minimum_delay\") == 1000\n\n self.get_success_response(self.org_slug, self.proj_slug, digestsMaxDelay=1200)\n assert self.project.get_option(\"digests:mail:maximum_delay\") == 1200\n\n self.get_success_response(\n self.org_slug, self.proj_slug, digestsMinDelay=300, digestsMaxDelay=600\n )\n assert self.project.get_option(\"digests:mail:minimum_delay\") == 300\n assert self.project.get_option(\"digests:mail:maximum_delay\") == 600\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 96, "n_words": 30, "vocab_size": 19, "complexity": 1, "nloc": 10, "token_counts": 101, "n_ast_nodes": 159, "n_identifiers": 9, "random_cut": "def test_digests_delay(self):\n self.get_success_response(self.org_slug, self.proj_slug, digestsMinDelay=1000)\n assert self." }, { "id": 306694, "commit_id": "23052dc7b57de3193d56db9885b68e61d618bf87", "repo": "core", "path": "homeassistant/components/yamaha_musiccast/media_player.py", "file_name": "media_player.py", "fun_name": "async_volume_down", "commit_message": "Improve entity type hints [y] (#77888)", "code": "async def async_volume_down(self) -> None:\n \n await self.coordinator.musiccast.volume_down(self._zone_id)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 38, "n_identifiers": 6, "random_cut": "async def async_volume_down(self) -> None:\n \n await self.coordinator.musiccast.volume_down(self._" }, { "id": 187234, "commit_id": "d09112ab1f6db6aa605650fe1ff6a3028344f90d", "repo": "streamlink", "path": "tests/test_api_validate.py", "file_name": "test_api_validate.py", "fun_name": "obj", "commit_message": "plugin.api.validate: rewrite tests\n\nCompletely rewrite tests using pytest, with full coverage", "code": "def obj(self):\n obj1 = self.Subject()\n obj2 = self.Subject()\n setattr(obj1, \"bar\", obj2)\n\n return obj1\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 13, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 6, "random_cut": "def obj(self):\n obj1 = self.Subject()\n obj2 = self.Subject()\n setattr(obj1, \"bar\"," }, { "id": 6685, "commit_id": "c71540da789433b7661382b93d71749be00cd91f", "repo": "ludwig", "path": "ludwig/data/dataset/ray.py", "file_name": "ray.py", "fun_name": "pipeline", "commit_message": "Added distributed evaluation on Ray (#1957)", "code": "def pipeline(self, shuffle=True, fully_executed=True) -> DatasetPipeline:\n if not fully_executed and not _ray112:\n raise ValueError(f\"Cannot set fully_execute=False in ray {ray.__version__}\")\n\n if fully_executed and _ray112:\n # set instance state so calls to __len__ will also use the fully_executed version\n self.ds = self.ds.fully_executed()\n\n pipe = self.ds.repeat()\n if shuffle:\n pipe = pipe.random_shuffle_each_window()\n return pipe\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 128, "n_words": 50, "vocab_size": 38, "complexity": 6, "nloc": 9, "token_counts": 65, "n_ast_nodes": 113, "n_identifiers": 13, "random_cut": "def pipeline(self, shuffle=True, fully_executed=True) -> DatasetPipeline:\n if not fully_executed and not _ray112:\n raise ValueError(f\"Cannot set fully_execute=False in ray {ray.__version__}\")\n\n if fully_executed and _ray112:\n # set instance state so calls to __len__ will also use the fully_executed version\n self.ds = self.ds.fully_executed()\n\n pipe = self.ds.repeat()\n if shuffle" }, { "id": 182278, "commit_id": "927d04d6b08ff0983d4486733ec8d728cad43bf0", "repo": "textual", "path": "tests/test_styles.py", "file_name": "test_styles.py", "fun_name": "test_render_styles_border", "commit_message": "Update tests/test_styles.py\n\nCo-authored-by: Darren Burns ", "code": "def test_render_styles_border():\n base = Styles()\n inline = Styles()\n styles_view = RenderStyles(None, base, inline)\n\n base.border_top = (\"heavy\", \"red\")\n # Base has border-top: heavy red\n assert styles_view.border_top == (\"heavy\", Color.parse(\"red\"))\n\n inline.border_left = (\"rounded\", \"green\")\n # Base has border-top heavy red, inline has border-left: rounded green\n assert styles_view.border_top == (\"heavy\", Color.parse(\"red\"))\n assert styles_view.border_left == (\"rounded\", Color.parse(\"green\"))\n assert styles_view.border == (\n (\"heavy\", Color.parse(\"red\")),\n (\"\", Color.default()),\n (\"\", Color.default()),\n (\"rounded\", Color.parse(\"green\")),\n )\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 129, "n_words": 66, "vocab_size": 40, "complexity": 1, "nloc": 15, "token_counts": 136, "n_ast_nodes": 235, "n_identifiers": 12, "random_cut": "def test_render_styles_border():\n base = Styles()\n inline = Styles()\n styles_view = RenderStyles(None, base, inline)\n\n base.border_top = (\"heavy\", \"red\")\n # Base has border-top: heavy red\n assert styles_view.border_top == (\"heavy\", Color.parse(\"red\"))\n\n inline.border_left = (\"rounded\", \"green\")\n # Base has border-top heavy red, inline has border-left: rounded green\n assert styles_view.border_top == (\"heavy\", Color.parse(\"red\"))\n assert styles_view.border_left == (\"rounded\", Color.parse(\"green\"))\n assert styles_" }, { "id": 71429, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_copy_page.py", "file_name": "test_copy_page.py", "fun_name": "test_page_copy_alias_post_copy_subpages", "commit_message": "Reformat with black", "code": "def test_page_copy_alias_post_copy_subpages(self):\n post_data = {\n \"new_title\": \"Hello world 2\",\n \"new_slug\": \"hello-world-2\",\n \"new_parent_page\": str(self.root_page.id),\n \"copy_subpages\": True,\n \"publish_copies\": False,\n \"alias\": True,\n }\n response = self.client.post(\n reverse(\"wagtailadmin_pages:copy\", args=(self.test_page.id,)), post_data\n )\n\n # Check that the user was redirected to the parents explore page\n self.assertRedirects(\n response, reverse(\"wagtailadmin_explore\", args=(self.root_page.id,))\n )\n\n # Get copy\n page_copy = self.root_page.get_children().get(slug=\"hello-world-2\")\n\n # Check the copy is an alias of the original\n self.assertEqual(page_copy.alias_of, self.test_page.page_ptr)\n\n # Check that the copy is live\n # Note: publish_copies is ignored. Alias pages always keep the same state as their original\n self.assertTrue(page_copy.live)\n self.assertFalse(page_copy.has_unpublished_changes)\n\n # Check that the owner of the page is set correctly\n self.assertEqual(page_copy.owner, self.user)\n\n # Check that the children were copied\n self.assertEqual(page_copy.get_children().count(), 2)\n\n # Check the the child pages\n # Neither of them should be live\n child_copy = page_copy.get_children().filter(slug=\"child-page\").first()\n self.assertIsNotNone(child_copy)\n self.assertEqual(child_copy.alias_of, self.test_child_page.page_ptr)\n self.assertTrue(child_copy.live)\n self.assertFalse(child_copy.has_unpublished_changes)\n\n unpublished_child_copy = (\n page_copy.get_children().filter(slug=\"unpublished-child-page\").first()\n )\n self.assertIsNotNone(unpublished_child_copy)\n self.assertEqual(\n unpublished_child_copy.alias_of, self.test_unpublished_child_page.page_ptr\n )\n self.assertFalse(unpublished_child_copy.live)\n self.assertTrue(unpublished_child_copy.has_unpublished_changes)\n\n # treebeard should report no consistency problems with the tree\n self.assertFalse(\n any(Page.find_problems()), \"treebeard found consistency problems\"\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 530, "n_words": 158, "vocab_size": 107, "complexity": 1, "nloc": 38, "token_counts": 287, "n_ast_nodes": 483, "n_identifiers": 37, "random_cut": "def test_page_copy_alias_post_copy_subpages(self):\n post_data = {\n \"new_title\": \"Hello world 2\",\n \"new_slug\": \"hello-world-2\",\n \"new_parent_page\": str(self.root_page.id),\n \"copy_subpages\": True,\n \"publish_copies\": False,\n \"alias\": True,\n }\n response = self.client.post(\n reverse(\"wagtailadmin_pages:copy\", args=(self.test_page.id,)), post_data\n )\n\n # Check that the user was redirected to the parents explore page\n self.assertRedirects(\n response, reverse(\"wagtailadmin_explore\", args=(self.root_page.id,))\n )\n\n # Get copy\n page_copy = self.root_page.get_children().get(slug=\"hello-world-2\")\n\n # Check the copy is an alias of the original\n self.assertEqual(page_copy.alias_of, self.test_page.page_ptr)\n\n # Check that the copy is live\n # Note: publish_copies is ignored. Alias pages always keep the same state as their original\n self.assertTrue(page_copy.live)\n self.assertFalse(page_copy.has_unpublished_changes)\n\n # Check that the owner of the page is set correctly\n" }, { "id": 156617, "commit_id": "73c985c2bed5d61be6d05d67f7f381435fe8d1e2", "repo": "dask", "path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "fun_name": "test_pyarrow_schema_mismatch_error", "commit_message": "Enfore consistent schema in `to_parquet` pyarrow (#9131)\n\n* Enfore consistent schema in `to_parquet` pyarrow\r\n\r\nPreviously each partition written in `to_parquet` with\r\n`engine=\"pyarrow\"` had their schema inferred based solely on data within\r\nthat partition alone. This leads to a few common problems:\r\n\r\n- Mismatched inferred schemas between partitions aren't caught, leading\r\n to issues in downstream consumers. Most readers assume a uniform\r\n parquet schema across files, which wasn't enforced by default by the\r\n pyarrow engine.\r\n- Inference was supported with `schema=\"infer\"`, but:\r\n - It was expensive, requiring computing at least one partition\r\n (sometimes more) to get a sample of any `object` dtypes.\r\n - Inference here for non-string object dtypes is silently incorrect,\r\n users should really be explicit here when trying to write more\r\n complicated schemas containing structured fields.\r\n - Inference was off by default\r\n- When writing a `_metadata` file, differences in per-file schemas would\r\n result in opaque errors when merging the metadata before writing. This\r\n error is deep in the `arrow` codebase, and doesn't provide enough\r\n information for the user to debug where the schema mismatch occured.\r\n Turning on schema enforcement by default lets us generate a better\r\n error message,\r\n\r\nThis PR changes the `schema` parameter in the following way:\r\n- `schema=\"infer\"` is the default\r\n- schema inference assumes object fields are strings (common), and makes\r\n no attempt at more complicated inference. Users with more complicated\r\n schemas should specify those explicitly. This means that schema\r\n inference is now cheap.\r\n- A better error message is generated for mismatched partition dtypes in\r\n `to_parquet` for the `pyarrow` engine. The fastparquet engine already\r\n does this.", "code": "def test_pyarrow_schema_mismatch_error(tmpdir):\n df1 = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [4.5, 6, 7]})\n df2 = pd.DataFrame({\"x\": [4, 5, 6], \"y\": [\"a\", \"b\", \"c\"]})\n\n ddf = dd.from_delayed(\n [dask.delayed(df1), dask.delayed(df2)], meta=df1, verify_meta=False\n )\n\n with pytest.raises(ValueError) as rec:\n ddf.to_parquet(str(tmpdir), engine=\"pyarrow\")\n\n msg = str(rec.value)\n assert \"Failed to convert partition to expected pyarrow schema\" in msg\n assert \"y: double\" in str(rec.value)\n assert \"y: string\" in str(rec.value)\n\n\n@PYARROW_MARK", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@PYARROW_MARK", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 100, "n_words": 61, "vocab_size": 47, "complexity": 1, "nloc": 12, "token_counts": 146, "n_ast_nodes": 241, "n_identifiers": 23, "random_cut": "def test_pyarrow_schema_mismatch_error(tmpdir):\n df1 = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [4.5, 6, 7]})\n df2 = pd.DataFrame({\"x\": [4, 5, 6], \"y\": [\"a\", \"b\", \"c\"]})\n\n ddf = dd.from_delayed(\n [dask.delayed(df1), dask.delayed(df2)], meta=df1, verify_meta=False\n )\n\n with pytest.raises(ValueError) " }, { "id": 315621, "commit_id": "6540ba623978813668a30e5822b97e076fc05a93", "repo": "core", "path": "homeassistant/components/hassio/__init__.py", "file_name": "__init__.py", "fun_name": "get_supervisor_ip", "commit_message": "Remove hassio from mypy ignore list (#74603)\n\n* Remove hassio from mypy ignore list\r\n\r\n* Avoid if TYPE_CHECKING", "code": "def get_supervisor_ip() -> str | None:\n \n if \"SUPERVISOR\" not in os.environ:\n return None\n return os.environ[\"SUPERVISOR\"].partition(\":\")[0]\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 34, "n_ast_nodes": 61, "n_identifiers": 5, "random_cut": "def get_supervisor_ip() -> str | None:\n \n if \"SUPERVISOR\" not in os.environ:\n return None\n return os.environ[\"SUPERVISOR\"].partition(\":\")[0]\n\n" }, { "id": 264889, "commit_id": "3a461d02793e6f9d41c2b1a92647e691de1abaac", "repo": "netbox", "path": "netbox/dcim/tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_cable_cannot_have_the_same_terminination_on_both_ends", "commit_message": "Update Cable instantiations to match new signature", "code": "def test_cable_cannot_have_the_same_terminination_on_both_ends(self):\n \n cable = Cable(a_terminations=[self.interface1], b_terminations=[self.interface1])\n with self.assertRaises(ValidationError):\n cable.clean()\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 41, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 67, "n_identifiers": 10, "random_cut": "def test_cable_cannot_have_the_same_terminination_on_both_ends(self):\n \n cable = Cable(a_terminations=[self.interface1], b_terminations=[self.interface1])\n with self.assertRaises(ValidationError):\n cable.clea" }, { "id": 48048, "commit_id": "511d0ee256b819690ccf0f6b30d12340b1dd7f0a", "repo": "airflow", "path": "tests/providers/google/cloud/transfers/test_bigquery_to_mssql.py", "file_name": "test_bigquery_to_mssql.py", "fun_name": "test_execute_good_request_to_bq", "commit_message": "Bigquery assets (#23165)", "code": "def test_execute_good_request_to_bq(self, mock_hook):\n destination_table = 'table'\n operator = BigQueryToMsSqlOperator(\n task_id=TASK_ID,\n source_project_dataset_table=f'{TEST_PROJECT_ID}.{TEST_DATASET}.{TEST_TABLE_ID}',\n mssql_table=destination_table,\n replace=False,\n )\n\n operator.execute(context=mock.MagicMock())\n # fmt: off\n mock_hook.return_value.list_rows.assert_called_once_with(\n dataset_id=TEST_DATASET,\n table_id=TEST_TABLE_ID,\n max_results=1000,\n selected_fields=None,\n start_index=0,\n )\n # fmt: on\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 182, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 16, "token_counts": 73, "n_ast_nodes": 123, "n_identifiers": 26, "random_cut": "def test_execute_good_request_to_bq(self, mock_hook):\n destination_table = 'table'\n operator = " }, { "id": 291310, "commit_id": "003e4224c89a6da381960dc5347750d1521d85c9", "repo": "core", "path": "homeassistant/components/text/__init__.py", "file_name": "__init__.py", "fun_name": "pattern_cmp", "commit_message": "Add `text` platform (#79454)\n\nCo-authored-by: Franck Nijhof \r\nCo-authored-by: Franck Nijhof ", "code": "def pattern_cmp(self) -> re.Pattern | None:\n \n if self.pattern is None:\n self.__pattern_cmp = None\n return None\n if not self.__pattern_cmp or self.pattern != self.__pattern_cmp.pattern:\n self.__pattern_cmp = re.compile(self.pattern)\n return self.__pattern_cmp\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 88, "n_words": 27, "vocab_size": 18, "complexity": 4, "nloc": 8, "token_counts": 58, "n_ast_nodes": 94, "n_identifiers": 7, "random_cut": "def pattern_cmp(self) -> re.Pattern | None:\n \n if self.pattern is None:\n self.__pattern_cmp = None\n " }, { "id": 74587, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_tests.py", "file_name": "test_tests.py", "fun_name": "test_nested_form_data", "commit_message": "Reformat with black", "code": "def test_nested_form_data(self):\n result = nested_form_data(\n {\n \"foo\": \"bar\",\n \"parent\": {\n \"child\": \"field\",\n },\n }\n )\n self.assertEqual(result, {\"foo\": \"bar\", \"parent-child\": \"field\"})\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 126, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 10, "token_counts": 41, "n_ast_nodes": 79, "n_identifiers": 5, "random_cut": "def test_nested_form_data(self):\n result = nested_form_data(\n {\n \"foo\": \"bar\",\n \"parent\": {\n " }, { "id": 6054, "commit_id": "0fea5903b211823b5319ec03cd5262aadf97969e", "repo": "ludwig", "path": "tests/ludwig/decoders/test_sequence_decoder.py", "file_name": "test_sequence_decoder.py", "fun_name": "test_sequence_rnn_decoder", "commit_message": "Squeeze explicilty. (#1726)", "code": "def test_sequence_rnn_decoder(cell_type, num_layers, batch_size):\n hidden_size = 256\n vocab_size = 50\n max_sequence_length = 10\n\n combiner_outputs = {HIDDEN: torch.rand([batch_size, hidden_size])}\n sequence_rnn_decoder = SequenceRNNDecoder(\n hidden_size, vocab_size, max_sequence_length, cell_type, num_layers=num_layers\n )\n\n output = sequence_rnn_decoder(combiner_outputs, target=None)\n\n assert list(output.size()) == [batch_size, max_sequence_length, vocab_size]\n\n\n@pytest.mark.parametrize(\"num_layers\", [1, 2])\n@pytest.mark.parametrize(\"batch_size\", [20, 1])", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"num_layers\", [1, 2])\n@pytest.mark.parametrize(\"batch_size\", [20, 1])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 71, "n_words": 43, "vocab_size": 37, "complexity": 1, "nloc": 10, "token_counts": 77, "n_ast_nodes": 160, "n_identifiers": 20, "random_cut": "def test_sequence_rnn_decoder(cell_type, num_layers, batch_size):\n hidden_size = 256\n vocab_size = 50\n max_sequence_length = 10\n\n combiner_outputs = {HIDDEN: torch.rand([batch_size, hidden_size])}\n sequence_rnn_decoder = SequenceRNNDecoder(\n hidden_size, vocab_size, max_sequence_length, cell_type, num_layers=num_layers\n )\n\n output = sequence_rnn_decoder(combiner_outputs, target=None)\n\n assert list(output.size()) == [batch_size, max_sequence_length, vocab_size]\n\n\n@pytest.mark.parametrize(\"num_layers\", [1, 2])\n@pytest.mark.parametrize(\"batch_size\", [20, 1])" }, { "id": 152293, "commit_id": "3b1b1444d4d90415fb42252406437b3d2ceb2110", "repo": "stable-diffusion-webui", "path": "modules/sd_hijack.py", "file_name": "sd_hijack.py", "fun_name": "nonlinearity_hijack", "commit_message": "Complete cross attention update", "code": "def nonlinearity_hijack(x):\r\n # swish\r\n t = torch.sigmoid(x)\r\n x *= t\r\n del t\r\n\r\n return x\r\n\r", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 28, "n_words": 14, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def nonlinearity_hijack(x):\r\n # swish\r\n t = torch.sigmoid(x)\r\n x *= t\r\n del t\r\n\r\n return x\r\n\r" }, { "id": 104670, "commit_id": "bf08ea3f95e8209a7afd2b50410ad5db51409d11", "repo": "datasets", "path": "src/datasets/builder.py", "file_name": "builder.py", "fun_name": "_save_info", "commit_message": "Avoid writing empty license files (#4090)\n\n* Avoid writing empty license files\r\n\r\n* Fix empty license file for MetricInfo", "code": "def _save_info(self):\n if os.path.exists(self._cache_dir):\n super()._save_info()\n else:\n import apache_beam as beam\n\n fs = beam.io.filesystems.FileSystems\n with fs.create(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)) as f:\n self.info._dump_info(f)\n if self.info.license:\n with fs.create(os.path.join(self._cache_dir, config.LICENSE_FILENAME)) as f:\n self.info._dump_license(f)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 144, "n_words": 27, "vocab_size": 21, "complexity": 3, "nloc": 11, "token_counts": 108, "n_ast_nodes": 179, "n_identifiers": 23, "random_cut": "def _save_info(self):\n if os.path.exists(self._cache_dir):\n super()._save_info()\n else:\n import apache_beam as beam\n\n fs = beam.io.filesystems.FileSystems\n with fs.create(os.pa" }, { "id": 198668, "commit_id": "19114acf6514bc87f5c8cfde35e0fcab88965be7", "repo": "sympy", "path": "sympy/simplify/hyperexpand.py", "file_name": "hyperexpand.py", "fun_name": "try_shifted_sum", "commit_message": "Code optimizations", "code": "def try_shifted_sum(func, z):\n \n abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)\n if len(abuckets[S.Zero]) != 1:\n return None\n r = abuckets[S.Zero][0]\n if r <= 0:\n return None\n if S.Zero not in bbuckets:\n return None\n l = list(bbuckets[S.Zero])\n l.sort()\n k = l[0]\n if k <= 0:\n return None\n\n nap = list(func.ap)\n nap.remove(r)\n nbq = list(func.bq)\n nbq.remove(k)\n k -= 1\n nap = [x - k for x in nap]\n nbq = [x - k for x in nbq]\n\n ops = []\n for n in range(r - 1):\n ops.append(ShiftA(n + 1))\n ops.reverse()\n\n fac = factorial(k)/z**k\n fac *= Mul(*[rf(b, k) for b in nbq])\n fac /= Mul(*[rf(a, k) for a in nap])\n\n ops += [MultOperator(fac)]\n\n p = 0\n for n in range(k):\n m = z**n/factorial(n)\n m *= Mul(*[rf(a, n) for a in nap])\n m /= Mul(*[rf(b, n) for b in nbq])\n p += m\n\n return Hyper_Function(nap, nbq), ops, -p\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 287, "n_words": 143, "vocab_size": 75, "complexity": 13, "nloc": 36, "token_counts": 314, "n_ast_nodes": 481, "n_identifiers": 37, "random_cut": "def try_shifted_sum(func, z):\n \n abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1)\n if len(abuckets[S.Zero]) != 1:\n return None\n r = abuckets[S.Zero][0]\n if r <= 0:\n return None\n if S.Zero not in bbuckets:\n return None\n l = list(bbuckets[S.Zero])\n l.sort()\n k = l[0]\n if k <= 0:\n return None\n\n nap = list(func.ap)\n nap.remove(r)\n nbq = " }, { "id": 255020, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/shape.py", "file_name": "shape.py", "fun_name": "export", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export() -> None:\n x = np.array([\n [1, 2, 3],\n [4, 5, 6],\n ]).astype(np.float32)\n test_shape('_example', x) # preserve names of original test cases\n\n x = np.random.randn(3, 4, 5).astype(np.float32)\n\n test_shape('', x) # preserve names of original test cases\n\n test_shape('_start_1', x, start=1)\n\n test_shape('_end_1', x, end=1)\n\n test_shape('_start_negative_1', x, start=-1)\n\n test_shape('_end_negative_1', x, end=-1)\n\n test_shape('_start_1_end_negative_1', x, start=1, end=-1)\n\n test_shape('_start_1_end_2', x, start=1, end=2)\n\n test_shape('_clip_start', x, start=-10)\n\n test_shape('_clip_end', x, end=10)\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 177, "n_words": 63, "vocab_size": 44, "complexity": 1, "nloc": 16, "token_counts": 163, "n_ast_nodes": 256, "n_identifiers": 11, "random_cut": "def export() -> None:\n x = np.array([\n [1, 2, 3],\n [4, 5, 6],\n ]).astype(np.float32)\n test_shape('_example', x) # preserve names of original test cases\n\n x = np.random.randn(3, 4, 5).astype(np.float32)\n\n test_shape('', x) # preserve names of original test cases\n\n test_shape('_start_1', x, start=1)\n\n test_shape('_end_1', x, end=1)\n\n test_shape('_start_negative_1', x, start=-1)\n\n test_shape('_end_negative_1', x, end=-1)\n\n test_shape('_start_1_end_negative_1', x, start=1, end=-1)\n\n test_shape('_start_1_end_2', x, start=1, end=2)\n\n test_shape('_clip_" }, { "id": 93618, "commit_id": "f2fd5c3780796affcf1d7685ba6a3a0634bb6ceb", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_profiling_profile.py", "file_name": "test_project_profiling_profile.py", "fun_name": "test_sort_missing", "commit_message": "feat(profiling): Use new functions endpoint for suspect functions query (#36922)\n\nThis change uses the new endpoint to query for suspect functions which is backed\r\nby the functions table.", "code": "def test_sort_missing(self):\n with self.feature(PROFILING_FEATURES):\n response = self.client.get(self.url)\n assert response.status_code == 400\n assert response.data == {\n \"detail\": ErrorDetail(\n string=\"Invalid query: Missing value for sort\", code=\"parse_error\"\n )\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 101, "n_words": 26, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 50, "n_ast_nodes": 86, "n_identifiers": 13, "random_cut": "def test_sort_missing(self):\n with self.f" }, { "id": 200888, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/aggregation/tests.py", "file_name": "tests.py", "fun_name": "test_backwards_m2m_annotate", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_backwards_m2m_annotate(self):\n authors = (\n Author.objects.filter(name__contains=\"a\")\n .annotate(Avg(\"book__rating\"))\n .order_by(\"name\")\n )\n self.assertQuerysetEqual(\n authors,\n [\n (\"Adrian Holovaty\", 4.5),\n (\"Brad Dayley\", 3.0),\n (\"Jacob Kaplan-Moss\", 4.5),\n (\"James Bennett\", 4.0),\n (\"Paul Bissex\", 4.0),\n (\"Stuart Russell\", 4.0),\n ],\n lambda a: (a.name, a.book__rating__avg),\n )\n\n authors = Author.objects.annotate(num_books=Count(\"book\")).order_by(\"name\")\n self.assertQuerysetEqual(\n authors,\n [\n (\"Adrian Holovaty\", 1),\n (\"Brad Dayley\", 1),\n (\"Jacob Kaplan-Moss\", 1),\n (\"James Bennett\", 1),\n (\"Jeffrey Forcier\", 1),\n (\"Paul Bissex\", 1),\n (\"Peter Norvig\", 2),\n (\"Stuart Russell\", 1),\n (\"Wesley J. Chun\", 1),\n ],\n lambda a: (a.name, a.num_books),\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 470, "n_words": 76, "vocab_size": 43, "complexity": 1, "nloc": 34, "token_counts": 200, "n_ast_nodes": 295, "n_identifiers": 16, "random_cut": "def test_backwards_m2m_annotate(self):\n authors = (\n Author.objects.filter(name__contains=\"a\")\n .annotate(Avg(\"book__rating\"))\n .order_by(\"name\")\n )\n self.assertQuerysetEqual(\n authors,\n [\n (\"Adrian Holovaty\", 4.5),\n (\"Brad Dayley\", 3.0),\n (\"Jacob Kaplan-Moss\", 4.5),\n (\"James Bennett\", 4.0),\n " }, { "id": 181816, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/base.py", "file_name": "base.py", "fun_name": "_combine_individual_stats", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def _combine_individual_stats(self, operator_count, cv_score, individual_stats):\n \n stats = deepcopy(\n individual_stats\n ) # Deepcopy, since the string reference to predecessor should be cloned\n stats[\"operator_count\"] = operator_count\n stats[\"internal_cv_score\"] = cv_score\n return stats\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 83, "n_words": 29, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 32, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def _combine_individual_stats(self, operator_count, cv_score, individual_stats):\n \n stats = deepcopy(\n individual_stats\n ) # Deepcopy, since the string reference to predecessor" }, { "id": 65357, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/tax_detail/tax_detail.py", "file_name": "tax_detail.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute(filters=None):\n\tif not filters:\n\t\treturn [], []\n\n\tfieldlist = required_sql_fields\n\tfieldstr = get_fieldstr(fieldlist)\n\n\tgl_entries = frappe.db.sql(\n\t\t.format(\n\t\t\tfieldstr=fieldstr\n\t\t),\n\t\tfilters,\n\t\tas_dict=1,\n\t)\n\n\treport_data = modify_report_data(gl_entries)\n\tsummary = None\n\tif filters[\"mode\"] == \"run\" and filters[\"report_name\"] != \"Tax Detail\":\n\t\treport_data, summary = run_report(filters[\"report_name\"], report_data)\n\n\t# return columns, data, message, chart, report_summary\n\treturn get_columns(fieldlist), report_data, None, None, summary\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 38, "n_words": 56, "vocab_size": 44, "complexity": 4, "nloc": 35, "token_counts": 100, "n_ast_nodes": 165, "n_identifiers": 17, "random_cut": "def execute(filters=None):\n\tif not filters:\n\t\treturn [], []\n\n\tfieldlist = required_sql_fields\n\tfieldstr = get_fieldstr(fieldlist)\n\n\tgl_entries = frappe.db.sql(\n\t\t.format(\n\t\t\tfieldstr=fieldstr\n\t\t),\n\t\tfilters,\n\t\tas_dict=1,\n\t)\n\n\treport_data = modify_report_data(gl_entries)\n\tsummary = None\n\tif filters[\"mode\"] == \"run\" and filters[\"report_name\"] != \"Tax Detail\":\n\t\treport_data, summary = run_rep" }, { "id": 17415, "commit_id": "e01461d06258d5c0956f03f4f4b858a183116276", "repo": "ccxt", "path": "python/ccxt/async_support/okcoin.py", "file_name": "okcoin.py", "fun_name": "fetch_ohlcv", "commit_message": "1.72.15\n\n[ci skip]", "code": "async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n duration = self.parse_timeframe(timeframe)\n request = {\n 'instrument_id': market['id'],\n 'granularity': self.timeframes[timeframe],\n }\n options = self.safe_value(self.options, 'fetchOHLCV', {})\n defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles\n type = self.safe_string(params, 'type', defaultType)\n params = self.omit(params, 'type')\n method = market['type'] + 'GetInstrumentsInstrumentId' + type\n if type == 'Candles':\n if since is not None:\n if limit is not None:\n request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))\n request['start'] = self.iso8601(since)\n else:\n if limit is not None:\n now = self.milliseconds()\n request['start'] = self.iso8601(now - limit * duration * 1000)\n request['end'] = self.iso8601(now)\n elif type == 'HistoryCandles':\n if market['option']:\n raise NotSupported(self.id + ' fetchOHLCV does not have ' + type + ' for ' + market['type'] + ' markets')\n if since is not None:\n if limit is None:\n limit = 300 # default\n request['start'] = self.iso8601(self.sum(since, limit * duration * 1000))\n request['end'] = self.iso8601(since)\n else:\n if limit is not None:\n now = self.milliseconds()\n request['end'] = self.iso8601(now - limit * duration * 1000)\n request['start'] = self.iso8601(now)\n response = await getattr(self, method)(self.extend(request, params))\n #\n # spot markets\n #\n # [\n # {\n # close: \"0.02683401\",\n # high: \"0.02683401\",\n # low: \"0.02683401\",\n # open: \"0.02683401\",\n # time: \"2018-12-17T23:47:00.000Z\",\n # volume: \"0\"\n # },\n # {\n # close: \"0.02684545\",\n # high: \"0.02685084\",\n # low: \"0.02683312\",\n # open: \"0.02683894\",\n # time: \"2018-12-17T20:28:00.000Z\",\n # volume: \"101.457222\"\n # }\n # ]\n #\n # futures\n #\n # [\n # [\n # 1545090660000,\n # 0.3171,\n # 0.3174,\n # 0.3171,\n # 0.3173,\n # 1648,\n # 51930.38579450868\n # ],\n # [\n # 1545072720000,\n # 0.3159,\n # 0.3161,\n # 0.3144,\n # 0.3149,\n # 22886,\n # 725179.26172331\n # ]\n # ]\n #\n return self.parse_ohlcvs(response, market, timeframe, since, limit)\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1437, "n_words": 282, "vocab_size": 123, "complexity": 10, "nloc": 38, "token_counts": 373, "n_ast_nodes": 663, "n_identifiers": 30, "random_cut": "async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n duration = self.parse_timeframe(timeframe)\n request = {\n 'instrument_id': market['id'],\n 'granularity': self.timeframes[timeframe],\n }\n options = self.safe_value(self.options, 'fetchOHLCV', {})\n defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles\n type = self.safe_string(params, 'type', defaultType)\n params = self.omit(params, 'type')\n method = market['type'] + 'GetInstrumentsInstrumentId' + type\n if type == 'Candles':\n if since is not None:\n if limit is not None:\n request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))\n request['start'] = self.iso8601(since)\n else:\n if limit is not None:\n now = self.milliseconds()\n request['start'] = self.iso8601(now - limit * duration * 1000)\n request['end'] = self.iso8601(now)\n elif type == '" }, { "id": 283865, "commit_id": "826cd8a723d8e2b810c51bf8266c09e8e55059c4", "repo": "OpenBBTerminal", "path": "tests/bots/stocks/options/test_opt_chain.py", "file_name": "test_opt_chain.py", "fun_name": "vcr_config", "commit_message": "Add tests for bots/stocks (#1616)\n\n* Added test_quote\r\n\r\n* Added dps tests\r\n\r\n* Added more tests\r\n\r\n* Added government tests\r\n\r\n* Added insider tests\r\n\r\n* Added options tests\r\n\r\n* Added sia tests\r\n\r\n* Added ta tests\r\n\r\n* Readd coverage rc\r\n\r\n* Added test", "code": "def vcr_config():\n return {\n \"filter_headers\": [(\"User-Agent\", None)],\n \"filter_query_parameters\": [\n (\"period1\", \"MOCK_PERIOD_1\"),\n (\"period2\", \"MOCK_PERIOD_2\"),\n (\"date\", \"MOCK_DATE\"),\n ],\n }\n\n\n@pytest.mark.vcr\n@pytest.mark.bots\n@pytest.mark.parametrize(\n \"opt_type, min_sp, max_sp\", [(\"Calls\", None, None), (\"Puts\", 100.0, 1000.0)]\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr\n@pytest.mark.bots\n@pytest.mark.parametrize(\n \"opt_type, min_sp, max_sp\", [(\"Calls\", None, None), (\"Puts\", 100.0, 1000.0)]\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 88, "n_words": 30, "vocab_size": 30, "complexity": 1, "nloc": 9, "token_counts": 40, "n_ast_nodes": 132, "n_identifiers": 6, "random_cut": "def vcr_config():\n return {\n \"filter_headers\": [(\"User-Agent\", None)],\n \"filter_query_parameters\": [\n (\"period1\", \"MOCK_PERIOD_1\"),\n (\"period2\", \"MOCK_PERIOD_2" }, { "id": 259690, "commit_id": "a739f6ca7cd54bd7a8a3c1e22b54d194098d85af", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_get_chunk_n_rows", "commit_message": "MNT Replace pytest.warns(None) in test_utils (#23137)", "code": "def test_get_chunk_n_rows(row_bytes, max_n_rows, working_memory, expected):\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", UserWarning)\n actual = get_chunk_n_rows(\n row_bytes=row_bytes,\n max_n_rows=max_n_rows,\n working_memory=working_memory,\n )\n\n assert actual == expected\n assert type(actual) is type(expected)\n with config_context(working_memory=working_memory):\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", UserWarning)\n actual = get_chunk_n_rows(row_bytes=row_bytes, max_n_rows=max_n_rows)\n assert actual == expected\n assert type(actual) is type(expected)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 150, "n_words": 42, "vocab_size": 25, "complexity": 1, "nloc": 16, "token_counts": 106, "n_ast_nodes": 172, "n_identifiers": 13, "random_cut": "def test_get_chunk_n_rows(row_bytes, max_n_rows, working_memory, expected):\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", UserWarning)\n actual = get_chunk_n_rows(\n row_bytes=row_bytes,\n max_n_rows=max_n_rows,\n working_memory=working_memory,\n )\n\n assert actual == expected\n assert type(actual) is type(expected)\n with config_context(working_memory=working_memory):\n with warnings.catch_warnings():\n warnings.simplefilter(\"e" }, { "id": 303247, "commit_id": "a502a8798ff74eb6185473df7f69553fc4663634", "repo": "core", "path": "homeassistant/components/skybell/binary_sensor.py", "file_name": "binary_sensor.py", "fun_name": "extra_state_attributes", "commit_message": "Add config flow to skybell (#70887)", "code": "def extra_state_attributes(self) -> dict[str, str | int | tuple[str, str]]:\n \n attrs = super().extra_state_attributes\n if event := self._event.get(CONST.CREATED_AT):\n attrs[\"event_date\"] = event\n return attrs\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 22, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 51, "n_ast_nodes": 82, "n_identifiers": 13, "random_cut": "def extra_state_attributes(self) -> dict[str, str | int | tuple[str, str]]:\n \n attrs = super(" }, { "id": 299979, "commit_id": "eb77f8db8559dba95e5e36c8a9314f89e1ae82b1", "repo": "core", "path": "homeassistant/components/recorder/core.py", "file_name": "core.py", "fun_name": "get_session", "commit_message": "Complete strict typing for recorder (#71274)\n\n* Complete strict typing for recorder\r\n\r\n* update tests\r\n\r\n* Update tests/components/recorder/test_migrate.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Update tests/components/recorder/test_migrate.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Remove the asserts\r\n\r\n* remove ignore comments\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "def get_session(self) -> Session:\n \n if self._get_session is None:\n raise RuntimeError(\"The database connection has not been established\")\n return self._get_session()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 47, "n_identifiers": 5, "random_cut": "def get_session(self) -> Session:\n \n " }, { "id": 243365, "commit_id": "38b53a9fd704570fb29abd10910ea7939b1185e1", "repo": "Pillow", "path": "Tests/test_image_thumbnail.py", "file_name": "test_image_thumbnail.py", "fun_name": "test_load_first", "commit_message": "Do not call load() before draft()", "code": "def test_load_first():\n # load() may change the size of the image\n # Test that thumbnail() is calling it before performing size calculations\n with Image.open(\"Tests/images/g4_orientation_5.tif\") as im:\n im.thumbnail((64, 64))\n assert im.size == (64, 10)\n\n # Test thumbnail(), without draft(),\n # on an image that is large enough once load() has changed the size\n with Image.open(\"Tests/images/g4_orientation_5.tif\") as im:\n im.thumbnail((590, 88), reducing_gap=None)\n assert im.size == (590, 88)\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 109, "n_words": 64, "vocab_size": 45, "complexity": 1, "nloc": 7, "token_counts": 68, "n_ast_nodes": 117, "n_identifiers": 7, "random_cut": "def test_load_first():\n # load() may change the size of the image\n # Test that thumbnail() is calling it before performing size calculations\n with Image.open(\"Tests/images/g4_orientation_5.tif\") as im:\n im.thumbnail((64, 64))\n assert im.si" }, { "id": 127362, "commit_id": "3590a86db0369ce8a8f9c3965cddc9e4c817c2b8", "repo": "ray", "path": "python/ray/tune/tests/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_format_vars", "commit_message": "[tune] Add timeout ro retry_fn to catch hanging syncs (#28155)\n\nSyncing sometimes hangs in pyarrow for unknown reasons. We should introduce a timeout for these syncing operations.\r\n\r\nSigned-off-by: Kai Fricke ", "code": "def test_format_vars():\n\n # Format brackets correctly\n assert (\n format_vars(\n {\n (\"a\", \"b\", \"c\"): 8.1234567,\n (\"a\", \"b\", \"d\"): [7, 8],\n (\"a\", \"b\", \"e\"): [[[3, 4]]],\n }\n )\n == \"c=8.1235,d=7_8,e=3_4\"\n )\n # Sorted by full keys, but only last key is reported\n assert (\n format_vars(\n {\n (\"a\", \"c\", \"x\"): [7, 8],\n (\"a\", \"b\", \"x\"): 8.1234567,\n }\n )\n == \"x=8.1235,x=7_8\"\n )\n # Filter out invalid chars. It's ok to have empty keys or values.\n assert (\n format_vars(\n {\n (\"a c?x\",): \" <;%$ok \",\n (\"some\",): \" \",\n }\n )\n == \"a_c_x=ok,some=\"\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 349, "n_words": 88, "vocab_size": 56, "complexity": 1, "nloc": 29, "token_counts": 118, "n_ast_nodes": 202, "n_identifiers": 2, "random_cut": "def test_format_vars():\n\n # Format brackets correctly\n assert (\n format_vars(\n {\n " }, { "id": 216382, "commit_id": "72bffdc59f2f62bbaa5e4686d4a408802cec1e89", "repo": "salt", "path": "tests/pytests/functional/utils/win_dacl/test_file.py", "file_name": "test_file.py", "fun_name": "test_applies_to_this_folder_subfolders_files", "commit_message": "Add tests", "code": "def test_applies_to_this_folder_subfolders_files(test_dir):\n \n result = win_dacl.set_permissions(\n obj_name=test_dir,\n principal=\"Backup Operators\",\n permissions=\"full_control\",\n access_mode=\"grant\",\n applies_to=\"this_folder_subfolders_files\",\n obj_type=\"file\",\n reset_perms=False,\n protected=None,\n )\n assert result is True\n\n expected = {\n \"Not Inherited\": {\n \"Backup Operators\": {\n \"grant\": {\n \"applies to\": \"This folder, subfolders and files\",\n \"permissions\": \"Full control\",\n }\n }\n }\n }\n result = win_dacl.get_permissions(\n obj_name=test_dir,\n principal=\"Backup Operators\",\n obj_type=\"file\",\n )\n assert result == expected\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 264, "n_words": 56, "vocab_size": 38, "complexity": 1, "nloc": 28, "token_counts": 96, "n_ast_nodes": 167, "n_identifiers": 15, "random_cut": "def test_applies_to_this_folder_subfolders_files(test_dir):\n \n result = win_dacl.set_permissions(\n obj_name=test_dir,\n principal=\"Backup Operators\",\n permissions=\"full_control\",\n access_mode=\"grant\",\n applies_to=\"this_folder_subfolders_files\",\n obj_type=\"file\",\n reset_perms=False,\n protected=None,\n )\n assert result is True\n\n expected = {\n \"Not Inherited\": {\n \"Backup Operators\": {\n \"grant\": {\n \"applies to\": \"This folder, subfolders and files\",\n \"permissions\": \"Full control\",\n }\n }\n }\n }\n result = win_dacl.get_permissions(\n obj_name=test_dir,\n principal=\"Backup Operators\",\n obj_type=\"file\"," }, { "id": 175355, "commit_id": "acf7403f9baea3ae1119fc6b4a3298522188bf96", "repo": "cpython", "path": "Lib/test/test_enum.py", "file_name": "test_enum.py", "fun_name": "test_overridden_str", "commit_message": "bpo-40066: [Enum] update str() and format() output (GH-30582)\n\nUndo rejected PEP-663 changes:\r\n\r\n- restore `repr()` to its 3.10 status\r\n- restore `str()` to its 3.10 status\r\n\r\nNew changes:\r\n\r\n- `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result\r\n- zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == ''`\r\n- update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type\r\n- added `_numeric_repr_` to `Flag` to control display of unnamed values\r\n- enums without doc strings have a more comprehensive doc string added\r\n- `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`", "code": "def test_overridden_str(self):\n NS = self.NewStrEnum\n self.assertEqual(str(NS.first), NS.first.name.upper())\n self.assertEqual(format(NS.first), NS.first.name.upper())\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 29, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 52, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def test_overridden_str(self):\n NS = self.NewSt" }, { "id": 91369, "commit_id": "284e980df0018f8baee659999268bdd4c7d08255", "repo": "sentry", "path": "tests/sentry/integrations/gitlab/test_issues.py", "file_name": "test_issues.py", "fun_name": "test_after_link_issue_failure", "commit_message": "ref: replace self.assertRaises with pytest.raises (#35685)\n\n* add flake8 plugin to detect assertRaises\r\n\r\n* ref: replace self.assertRaises with pytest.raises\r\n\r\n* non-sed fixes", "code": "def test_after_link_issue_failure(self):\n responses.add(\n responses.POST,\n \"https://example.gitlab.com/api/v4/projects/2/issues/321/notes\",\n status=502,\n )\n data = {\"externalIssue\": \"2#321\", \"comment\": \"This is not good.\"}\n external_issue = ExternalIssue.objects.create(\n organization_id=self.organization.id, integration_id=self.integration.id, key=\"2#321\"\n )\n with pytest.raises(IntegrationError):\n self.installation.after_link_issue(external_issue, data=data)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 123, "n_words": 27, "vocab_size": 25, "complexity": 1, "nloc": 12, "token_counts": 79, "n_ast_nodes": 132, "n_identifiers": 22, "random_cut": "def test_after_link_issue_failure(self):\n responses.add(\n responses.POST,\n \"https://example.gitlab.com/api/v4/projects/2/issues/321/notes\",\n status=502,\n )\n data = {\"externalIssue\": \"2#321\", \"comment\": \"This is not good.\"}\n external_issue = ExternalIssue.objects.create(\n organizatio" }, { "id": 196328, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/integrals/intpoly.py", "file_name": "intpoly.py", "fun_name": "left_integral2D", "commit_message": "Updated import locations", "code": "def left_integral2D(m, index, facets, x0, expr, gens):\n \n value = S.Zero\n for j in range(0, m):\n intersect = ()\n if j in ((index - 1) % m, (index + 1) % m):\n intersect = intersection(facets[index], facets[j], \"segment2D\")\n if intersect:\n distance_origin = norm(tuple(map(lambda x, y: x - y,\n intersect, x0)))\n if is_vertex(intersect):\n if isinstance(expr, Expr):\n if len(gens) == 3:\n expr_dict = {gens[0]: intersect[0],\n gens[1]: intersect[1],\n gens[2]: intersect[2]}\n else:\n expr_dict = {gens[0]: intersect[0],\n gens[1]: intersect[1]}\n value += distance_origin * expr.subs(expr_dict)\n else:\n value += distance_origin * expr\n return value\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 455, "n_words": 85, "vocab_size": 57, "complexity": 7, "nloc": 22, "token_counts": 197, "n_ast_nodes": 293, "n_identifiers": 26, "random_cut": "def left_integral2D(m, index, facets, x0, expr, gens):\n \n value = S.Zero\n for j in range(0, m):\n intersect = ()\n if j in ((index - 1) % m, (index + 1) % m):\n intersect = intersection(facets[index], facets[j], \"segment2D\")\n if intersect:\n distance_origin = norm(tuple(map(lambda x, y: x - y,\n intersect, x0)))\n if is_vertex(intersect):\n if isinstance(expr, Expr):\n if len(gens) == 3:\n expr_dict = {gens[0]: intersect[0],\n gens[1]: intersect[1],\n gens[2]: intersect[2]}\n else:\n expr_dict = {gens[0]: intersect[0],\n " }, { "id": 214403, "commit_id": "660b3006dfa49841c553ac952624aac5d18b634d", "repo": "flair", "path": "flair/datasets/treebanks.py", "file_name": "treebanks.py", "fun_name": "_read_next_sentence", "commit_message": "Label logic and new unit tests", "code": "def _read_next_sentence(self, file):\n line = file.readline()\n sentence: Sentence = Sentence([])\n\n # current token ID\n token_idx = 0\n\n # handling for the awful UD multiword format\n current_multiword_text = \"\"\n current_multiword_sequence = \"\"\n current_multiword_first_token = 0\n current_multiword_last_token = 0\n\n while line:\n line = line.strip()\n fields: List[str] = re.split(\"\\t+\", line)\n\n # end of sentence\n if line == \"\":\n if len(sentence) > 0:\n break\n\n # comments\n elif line.startswith(\"#\"):\n line = file.readline()\n continue\n\n # ellipsis\n elif \".\" in fields[0]:\n line = file.readline()\n continue\n\n # if token is a multi-word\n elif \"-\" in fields[0]:\n line = file.readline()\n\n current_multiword_first_token = int(fields[0].split(\"-\")[0])\n current_multiword_last_token = int(fields[0].split(\"-\")[1])\n current_multiword_text = fields[1]\n current_multiword_sequence = \"\"\n\n if self.split_multiwords:\n continue\n else:\n token = Token(fields[1])\n token.add_label(\"lemma\", str(fields[2]))\n if len(fields) > 9 and \"SpaceAfter=No\" in fields[9]:\n token.whitespace_after = False\n sentence.add_token(token)\n token_idx += 1\n\n # normal single-word tokens\n else:\n\n # if we don't split multiwords, skip over component words\n if not self.split_multiwords and token_idx < current_multiword_last_token:\n token_idx += 1\n line = file.readline()\n continue\n\n # add token\n token = Token(fields[1], head_id=int(fields[6]))\n token.add_label(\"lemma\", str(fields[2]))\n token.add_label(\"upos\", str(fields[3]))\n token.add_label(\"pos\", str(fields[4]))\n token.add_label(\"dependency\", str(fields[7]))\n\n if len(fields) > 9 and \"SpaceAfter=No\" in fields[9]:\n token.whitespace_after = False\n\n # add morphological tags\n for morph in str(fields[5]).split(\"|\"):\n if \"=\" not in morph:\n continue\n token.add_label(morph.split(\"=\")[0].lower(), morph.split(\"=\")[1])\n\n if len(fields) > 10 and str(fields[10]) == \"Y\":\n token.add_label(\"frame\", str(fields[11]))\n\n token_idx += 1\n\n # derive whitespace logic for multiwords\n if token_idx <= current_multiword_last_token:\n current_multiword_sequence += token.text\n\n # print(token)\n # print(current_multiword_last_token)\n # print(current_multiword_first_token)\n # if multi-word equals component tokens, there should be no whitespace\n if token_idx == current_multiword_last_token and current_multiword_sequence == current_multiword_text:\n # go through all tokens in subword and set whitespace_after information\n for i in range(current_multiword_last_token - current_multiword_first_token):\n # print(i)\n sentence[-(i + 1)].whitespace_after = False\n\n sentence.add_token(token)\n\n line = file.readline()\n return sentence\n\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1408, "n_words": 279, "vocab_size": 143, "complexity": 22, "nloc": 62, "token_counts": 488, "n_ast_nodes": 831, "n_identifiers": 33, "random_cut": "def _read_next_sentence(self, file):\n line = file.readline()\n sentence: Sentence = Sentence([])\n\n # current token ID\n token_idx = 0\n\n # handling for the awful UD multiword format\n current_multiword_text = \"\"\n current_multiword_sequence = \"\"\n current_multiword_first_token = 0\n current_multiword_last_token = 0\n\n while line:\n line = line.strip()\n fields: List[str] = re.split(\"\\t+\", line)\n\n # end of sentence\n if line == \"\":\n if len(sentence) > 0:\n break\n\n # comments\n elif line.startswith(\"#\"):\n line = file.readline()\n continue\n\n # ellipsis\n elif" }, { "id": 281220, "commit_id": "006b3570b795215a17c64841110b649b03db9a98", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/crypto_controller.py", "file_name": "crypto_controller.py", "fun_name": "call_disc", "commit_message": "Baseclass (#1141)\n\n* A working decorator\r\n\r\n* Basic intro\r\n\r\n* Added more\r\n\r\n* Refactor\r\n\r\n* Refactor\r\n\r\n* Cleaned code\r\n\r\n* Simplified function (thanks Chavi)\r\n\r\n* Small change\r\n\r\n* Updating tests : fix issue with mock\r\n\r\n* Updating tests : fix remaining mocks after merging\r\n\r\n* Updating tests : black\r\n\r\n* Cleaned up\r\n\r\n* Finished base cases\r\n\r\n* Notes\r\n\r\n* Slight changes\r\n\r\n* Added dynamic options handling, error persists\r\n\r\n* Fixed pylint issues\r\n\r\n* Fixed mock\r\n\r\n* fix decorator with dynamic dictionary of args\r\n\r\n* move choices from dynamic to const in crypto/ov\r\n\r\n* Updated var names\r\n\r\n* Check\r\n\r\n* Moved decorators\r\n\r\n* Fixed import issues\r\n\r\n* Fixed tests, update payoff controller\r\n\r\n* Fixed tests\r\n\r\n* Fixed pylint\r\n\r\n* Updated files\r\n\r\n* Added base class\r\n\r\n* Added reset\r\n\r\n* Improved base class\r\n\r\n* For James\r\n\r\n* More menues converted\r\n\r\n* Added contexts\r\n\r\n* 24 controllers left\r\n\r\n* 18 Controllers left\r\n\r\n* Changes choices\r\n\r\n* 9 controllers left\r\n\r\n* Added all controllers\r\n\r\n* Fixed glitch\r\n\r\n* Replaced all improper callings of class\r\n\r\n* Removed menu decorator\r\n\r\n* refactored try_except\r\n\r\n* Last commit\r\n\r\n* Black fix\r\n\r\n* Bug fix\r\n\r\n* Added James' new menus\r\n\r\n* Fixed tests\r\n\r\n* Fixed 8 tests\r\n\r\n* Fixing mypy issue\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Fixed options\r\n\r\n* Fixed tests\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Fixed tests\r\n\r\n* More test fixes\r\n\r\n* Updating tests : stocks/ba\r\n\r\n* Fixed options test\r\n\r\n* More bug fixes\r\n\r\n* Fixed tests\r\n\r\n* fixed pylint\r\n\r\n* Skipped test_call_load\r\n\r\n* Add typings to base class\r\n\r\n* Fix issue with appending auto completer options + bugfixes\r\n\r\n* Add typings to base class\r\n\r\n* Terminal throws error for bad path\r\n\r\n* sexy solution to auto completer in runtime\r\n\r\n* more sexy reset with reset_level stored\r\n\r\n* no so sexy jump between indirect menus\r\n\r\n* Removing choices argument\r\n\r\n* refactor custom_reset\r\n\r\n* Fixed tests\r\n\r\n* Theo fixes\r\n\r\n* Added back function\r\n\r\n* Fixed tests\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: DidierRLopes ", "code": "def call_disc(self, _):\n \n from gamestonk_terminal.cryptocurrency.discovery.discovery_controller import (\n DiscoveryController,\n )\n\n self.queue = DiscoveryController(queue=self.queue).menu()\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 51, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 37, "n_ast_nodes": 57, "n_identifiers": 10, "random_cut": "def call_disc(self, _):\n \n from gamestonk_terminal.cryptocurrency.discovery.discovery_controller import (\n DiscoveryController,\n )\n\n self.queue =" }, { "id": 73999, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_blocks.py", "file_name": "test_blocks.py", "fun_name": "test_get_searchable_content_whitespace", "commit_message": "Reformat with black", "code": "def test_get_searchable_content_whitespace(self):\n block = blocks.RichTextBlock()\n value = RichText(\"

    mashed

    potatoes

    \")\n result = block.get_searchable_content(value)\n self.assertEqual(result, [\"mashed potatoes\"])\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 36, "n_ast_nodes": 63, "n_identifiers": 10, "random_cut": "def test_get_searchable_content_whitespace(self):\n block = blocks.RichTextBlock()\n value = RichText(\"

    mashed

    potatoes", "code": "def get_invalid_runtime_envs() -> List[Dict]:\n \n\n return [\n # Local URIs in working_dir and py_modules\n {\n \"working_dir\": \".\",\n \"py_modules\": [\n \"/Desktop/my_project\",\n (\n \"https://github.com/shrekris-anyscale/\"\n \"test_deploy_group/archive/HEAD.zip\"\n ),\n ],\n }\n ]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 172, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 14, "token_counts": 31, "n_ast_nodes": 61, "n_identifiers": 3, "random_cut": "def get_invalid_runtime_envs() -> List[Dict]:\n \n\n return [\n # Local URIs in working_dir and py_modules\n {\n \"working_dir\": \".\",\n \"py_modules\": [\n \"/Desktop/my_project\",\n " }, { "id": 243216, "commit_id": "0ed03d4a58d5f31d570fc9fc391298ce032ad7ce", "repo": "Pillow", "path": "Tests/test_file_mpo.py", "file_name": "test_file_mpo.py", "fun_name": "test_app", "commit_message": "Parametrize tests", "code": "def test_app(test_file):\n # Test APP/COM reader (@PIL135)\n with Image.open(test_file) as im:\n assert im.applist[0][0] == \"APP1\"\n assert im.applist[1][0] == \"APP2\"\n assert (\n im.applist[1][1][:16] == b\"MPF\\x00MM\\x00*\\x00\\x00\\x00\\x08\\x00\\x03\\xb0\\x00\"\n )\n assert len(im.applist) == 2\n\n\n@pytest.mark.parametrize(\"test_file\", test_files)", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"test_file\", test_files)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 81, "n_words": 31, "vocab_size": 25, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 138, "n_identifiers": 11, "random_cut": "def test_app(test_file):\n # Test APP/COM reader (@PIL135)\n with Image.open(test_file) as im:\n assert im.applist[0][0] == \"APP1\"\n assert im.applist[1][0] == \"APP2\"\n assert (\n im.applist[1][1][:16] == b\"MPF\\x00MM\\x00*\\x00\\x00\\x00\\x08\\x00\\x03\\xb0\\x00\"\n )\n assert len(im.applist) == 2\n\n\n@pytest.mark.pa" }, { "id": 198671, "commit_id": "19114acf6514bc87f5c8cfde35e0fcab88965be7", "repo": "sympy", "path": "sympy/vector/integrals.py", "file_name": "integrals.py", "fun_name": "_bounds_case", "commit_message": "Code optimizations", "code": "def _bounds_case(cls, parameters, limits):\n\n V = list(limits.keys())\n E = []\n\n for p in V:\n lower_p = limits[p][0]\n upper_p = limits[p][1]\n\n lower_p = lower_p.atoms()\n upper_p = upper_p.atoms()\n E.extend((p, q) for q in V if p != q and\n (lower_p.issuperset({q}) or upper_p.issuperset({q})))\n\n if not E:\n return parameters\n else:\n return topological_sort((V, E), key=default_sort_key)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 181, "n_words": 50, "vocab_size": 36, "complexity": 7, "nloc": 14, "token_counts": 119, "n_ast_nodes": 183, "n_identifiers": 18, "random_cut": "def _bounds_case(cls, parameters, limits):\n\n V = list(limits.keys())\n E = []\n\n for p in V:\n lower_p = limits[p][0]\n upper_p = limits[p][1]\n\n lower_p = lower_p.atoms()\n upper_p = upper_p.atoms()\n E.extend((p, q) for q in V if p != q and\n (lower_p.issuperset({q}) or upper_p.issuperset({q})))\n\n if not E:\n return parameters\n else:\n return topological_sort((V, E), key=default_so" }, { "id": 59568, "commit_id": "447f475d95da0b19b9d94e9367dee05dd248ed53", "repo": "prefect", "path": "tests/cli/test_block.py", "file_name": "test_block.py", "fun_name": "test_listing_blocks_after_saving_a_block", "commit_message": "Remove console, update client logs, fix tests, and add docs", "code": "def test_listing_blocks_after_saving_a_block():\n system.JSON(value=\"a casual test block\").save(\"wildblock\")\n\n expected_output = (\n \"ID\",\n \"Type\",\n \"Name\",\n \"Slug\",\n \"wildblock\",\n )\n\n invoke_and_assert(\n [\"block\", \"ls\"],\n expected_code=0,\n expected_output_contains=expected_output,\n expected_line_count=9,\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 99, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 15, "token_counts": 52, "n_ast_nodes": 92, "n_identifiers": 10, "random_cut": "def test_listing_blocks_after_saving_a_block():\n system.JSON(value=\"a casual test block\").save(\"wildblock\")\n\n expected_output = (\n \"ID\",\n \"Type\",\n \"Name\",\n \"Slug\",\n \"wildblock\",\n )\n\n invoke_and_assert(\n [\"bl" }, { "id": 217875, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/server.py", "file_name": "server.py", "fun_name": "handle_one_request", "commit_message": "add python 3.10.4 for windows", "code": "def handle_one_request(self):\n \n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG)\n return\n if not self.raw_requestline:\n self.close_connection = True\n return\n if not self.parse_request():\n # An error code has been sent, just exit\n return\n mname = 'do_' + self.command\n if not hasattr(self, mname):\n self.send_error(\n HTTPStatus.NOT_IMPLEMENTED,\n \"Unsupported method (%r)\" % self.command)\n return\n method = getattr(self, mname)\n method()\n self.wfile.flush() #actually send the response if not already done.\n except TimeoutError as e:\n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = True\n return\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 463, "n_words": 95, "vocab_size": 70, "complexity": 6, "nloc": 27, "token_counts": 143, "n_ast_nodes": 251, "n_identifiers": 24, "random_cut": "def handle_one_request(self):\n \n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG)\n return\n if not self.raw_requestline:\n self.close_connection = True\n return\n if not self.parse_request():\n # An error code has been sent, just exit\n return\n mname = 'do_' + self.command\n if not hasattr(self, mname):\n self.send_error(\n HTTPStatus.NOT_IMPLEMENTED,\n \"Unsupported method (%r)\" % self.command)\n return\n method = getattr(self, mn" }, { "id": 181326, "commit_id": "f60053d85ffb7f0fd4bb648906914370b7aa4598", "repo": "gradio", "path": "test/test_components.py", "file_name": "test_components.py", "fun_name": "test_update_visibility", "commit_message": "gr.ScatterPlot component (#2764)\n\n* Try clean install\r\n\r\n* Resolve peer dependencies?\r\n\r\n* CHANGELOG\r\n\r\n* Add outbreak_forcast notebook\r\n\r\n* generate again\r\n\r\n* CHANGELOG\r\n\r\n* Add image to changelog\r\n\r\n* Color palette\r\n\r\n* Fix colors + legend\r\n\r\n* Tooltip\r\n\r\n* Add axis titles\r\n\r\n* Clean up code a bit + quant scales\r\n\r\n* Add code\r\n\r\n* Add size, shape + rename legend title\r\n\r\n* Fix demo\r\n\r\n* Add update + demo\r\n\r\n* Handle darkmode better\r\n\r\n* Try new font\r\n\r\n* Use sans-serif\r\n\r\n* Add caption\r\n\r\n* Changelog + tests\r\n\r\n* More tests\r\n\r\n* Address comments\r\n\r\n* Make caption fontsize smaller and enable interactivity\r\n\r\n* Add docstrings + add height + width\r\n\r\n* Use normal font weight\r\n\r\n* Make last values keyword only\r\n\r\nCo-authored-by: Abubakar Abid \r\n\r\n* Fix typo\r\n\r\n* Accept value as fn\r\n\r\n* reword changelog a bit\r\n\r\nCo-authored-by: Abubakar Abid ", "code": "def test_update_visibility(self):\n output = gr.ScatterPlot.update(visible=False)\n assert not output[\"visible\"]\n assert output[\"value\"] is gr.components._Keywords.NO_VALUE\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 36, "n_ast_nodes": 60, "n_identifiers": 10, "random_cut": "def test_update_visibility(self):\n output = gr.ScatterPlot.update(visible=False)\n assert not output[\"visible\"]\n assert output[\"value\"] is " }, { "id": 135534, "commit_id": "9e3e3280e440c3ce026e29c1cc8d1f73f124e21a", "repo": "ray", "path": "dashboard/modules/node/node_head.py", "file_name": "node_head.py", "fun_name": "get_all_nodes", "commit_message": "[Dashboard] Remove the view data (#29701)\n\nView data is unnecessary in the new dashboard. Same for node detail view.\r\n\r\nWhen the node is idle at 250 nodes, CPU usage is about 20~30%, and I found most of usage is from MessageToDict. Since we have lots of view data, I assume most of overhead is from view data.", "code": "async def get_all_nodes(self, req) -> aiohttp.web.Response:\n view = req.query.get(\"view\")\n if view == \"summary\":\n all_node_summary = await DataOrganizer.get_all_node_summary()\n return dashboard_optional_utils.rest_response(\n success=True, message=\"Node summary fetched.\", summary=all_node_summary\n )\n elif view is not None and view.lower() == \"hostNameList\".lower():\n alive_hostnames = set()\n for node in DataSource.nodes.values():\n if node[\"state\"] == \"ALIVE\":\n alive_hostnames.add(node[\"nodeManagerHostname\"])\n return dashboard_optional_utils.rest_response(\n success=True,\n message=\"Node hostname list fetched.\",\n host_name_list=list(alive_hostnames),\n )\n else:\n return dashboard_optional_utils.rest_response(\n success=False, message=f\"Unknown view {view}\"\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 298, "n_words": 63, "vocab_size": 46, "complexity": 7, "nloc": 28, "token_counts": 172, "n_ast_nodes": 239, "n_identifiers": 27, "random_cut": "async def get_all_nodes(self, req) -> aiohttp.web.Response:\n view = req.query.get(\"view\")\n if view == \"summary\":\n all_node_summary = await DataOrganizer.get_all_node_summary()\n return dashboard_optional_utils.rest_response(\n success=True, message=\"Node summary fetched.\", summary=all_node_summary\n )\n elif view is not None and view.lower() == \"hostNameList\".lower():\n alive_hostnames = set()\n for node in DataSource.nodes.values():\n if node[\"state\"] == \"ALIVE\":\n alive_hostnames.add(node[\"nodeManagerHostname\"])\n return dashboard_optional_utils.rest_response(\n success=True,\n message=\"Node hostname list fetched.\",\n host_name_list=list(alive_hostnames),\n )\n else:\n return dashboard_optional_utils.rest_response(\n success=False, message=f\"Unknown view {view}\"\n " }, { "id": 307021, "commit_id": "8bdeb3ca5b27b5d92163a14c7dd7c5eca37cfe13", "repo": "core", "path": "homeassistant/components/vlc/media_player.py", "file_name": "media_player.py", "fun_name": "media_play", "commit_message": "Use new media player enums [u-w] (#78067)", "code": "def media_play(self) -> None:\n \n self._vlc.play()\n self._state = MediaPlayerState.PLAYING\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 22, "n_ast_nodes": 39, "n_identifiers": 7, "random_cut": "def media_play(self) -> None:\n \n self._vlc.play()\n self._state = MediaP" }, { "id": 290750, "commit_id": "607a0e7697a640e524405f5560868125781bdf0c", "repo": "core", "path": "tests/components/recorder/test_statistics.py", "file_name": "test_statistics.py", "fun_name": "test_compile_hourly_statistics", "commit_message": "Reduce size of get_statistics_during_period WS API response (#82131)", "code": "def test_compile_hourly_statistics(hass_recorder):\n \n hass = hass_recorder()\n instance = recorder.get_instance(hass)\n setup_component(hass, \"sensor\", {})\n zero, four, states = record_states(hass)\n hist = history.get_significant_states(hass, zero, four)\n assert dict(states) == dict(hist)\n\n # Should not fail if there is nothing there yet\n stats = get_latest_short_term_statistics(\n hass, [\"sensor.test1\"], {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"}\n )\n assert stats == {}\n\n for kwargs in ({}, {\"statistic_ids\": [\"sensor.test1\"]}):\n stats = statistics_during_period(hass, zero, period=\"5minute\", **kwargs)\n assert stats == {}\n stats = get_last_short_term_statistics(\n hass,\n 0,\n \"sensor.test1\",\n True,\n {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"},\n )\n assert stats == {}\n\n do_adhoc_statistics(hass, start=zero)\n do_adhoc_statistics(hass, start=four)\n wait_recording_done(hass)\n expected_1 = {\n \"start\": process_timestamp(zero),\n \"end\": process_timestamp(zero + timedelta(minutes=5)),\n \"mean\": approx(14.915254237288135),\n \"min\": approx(10.0),\n \"max\": approx(20.0),\n \"last_reset\": None,\n \"state\": None,\n \"sum\": None,\n }\n expected_2 = {\n \"start\": process_timestamp(four),\n \"end\": process_timestamp(four + timedelta(minutes=5)),\n \"mean\": approx(20.0),\n \"min\": approx(20.0),\n \"max\": approx(20.0),\n \"last_reset\": None,\n \"state\": None,\n \"sum\": None,\n }\n expected_stats1 = [expected_1, expected_2]\n expected_stats2 = [expected_1, expected_2]\n\n # Test statistics_during_period\n stats = statistics_during_period(hass, zero, period=\"5minute\")\n assert stats == {\"sensor.test1\": expected_stats1, \"sensor.test2\": expected_stats2}\n\n # Test statistics_during_period with a far future start and end date\n future = dt_util.as_utc(dt_util.parse_datetime(\"2221-11-01 00:00:00\"))\n stats = statistics_during_period(hass, future, end_time=future, period=\"5minute\")\n assert stats == {}\n\n # Test statistics_during_period with a far future end date\n stats = statistics_during_period(hass, zero, end_time=future, period=\"5minute\")\n assert stats == {\"sensor.test1\": expected_stats1, \"sensor.test2\": expected_stats2}\n\n stats = statistics_during_period(\n hass, zero, statistic_ids=[\"sensor.test2\"], period=\"5minute\"\n )\n assert stats == {\"sensor.test2\": expected_stats2}\n\n stats = statistics_during_period(\n hass, zero, statistic_ids=[\"sensor.test3\"], period=\"5minute\"\n )\n assert stats == {}\n\n # Test get_last_short_term_statistics and get_latest_short_term_statistics\n stats = get_last_short_term_statistics(\n hass,\n 0,\n \"sensor.test1\",\n True,\n {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"},\n )\n assert stats == {}\n\n stats = get_last_short_term_statistics(\n hass,\n 1,\n \"sensor.test1\",\n True,\n {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"},\n )\n assert stats == {\"sensor.test1\": [expected_2]}\n\n stats = get_latest_short_term_statistics(\n hass, [\"sensor.test1\"], {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"}\n )\n assert stats == {\"sensor.test1\": [expected_2]}\n\n metadata = get_metadata(hass, statistic_ids=['sensor.test1\"'])\n\n stats = get_latest_short_term_statistics(\n hass,\n [\"sensor.test1\"],\n {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"},\n metadata=metadata,\n )\n assert stats == {\"sensor.test1\": [expected_2]}\n\n stats = get_last_short_term_statistics(\n hass,\n 2,\n \"sensor.test1\",\n True,\n {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"},\n )\n assert stats == {\"sensor.test1\": expected_stats1[::-1]}\n\n stats = get_last_short_term_statistics(\n hass,\n 3,\n \"sensor.test1\",\n True,\n {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"},\n )\n assert stats == {\"sensor.test1\": expected_stats1[::-1]}\n\n stats = get_last_short_term_statistics(\n hass,\n 1,\n \"sensor.test3\",\n True,\n {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"},\n )\n assert stats == {}\n\n instance.get_session().query(StatisticsShortTerm).delete()\n # Should not fail there is nothing in the table\n stats = get_latest_short_term_statistics(\n hass, [\"sensor.test1\"], {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"}\n )\n assert stats == {}\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 996, "n_words": 394, "vocab_size": 126, "complexity": 2, "nloc": 119, "token_counts": 763, "n_ast_nodes": 1277, "n_identifiers": 46, "random_cut": "def test_compile_hourly_statistics(hass_recorder):\n \n hass = hass_recorder()\n instance = recorder.get_instance(hass)\n setup_component(hass, \"sensor\", {})\n zero, four, states = record_states(hass)\n hist = history.get_significant_states(hass, zero, four)\n assert dict(states) == dict(hist)\n\n # Should not fail if there is nothing there yet\n stats = get_latest_short_term_statistics(\n hass, [\"sensor.test1\"], {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"}\n )\n assert stats == {}\n\n for kwargs in ({}, {\"statistic_ids\": [\"sensor.test1\"]}):\n stats = statistics_during_period(hass, zero, period=\"5minute\", **kwargs)\n assert stats == {}\n stats = get_last_short_term_statistics(\n hass,\n 0,\n \"sensor.test1\",\n True,\n {\"last_reset\", \"max\", \"mean\", \"min\", \"state\", \"sum\"},\n )\n assert stats == {}\n\n do_adhoc_statistics(hass, start=zero)\n do_adhoc_statistics(hass, start=four)\n wait_recording_done(hass)\n expected_1 = {\n \"start\": process_time" }, { "id": 54326, "commit_id": "763390cded874b2427017347c75f0fd35b743e1a", "repo": "prefect", "path": "tests/orion/models/test_orm.py", "file_name": "test_orm.py", "fun_name": "test_repr", "commit_message": "Add ORM repr", "code": "async def test_repr(self, db, session, flow):\n assert repr(flow) == f\"Flow(id={flow.id})\"\n assert repr(db.Flow()) == f\"Flow(id=None)\"\n flow_id = uuid4()\n assert repr(db.Flow(id=flow_id)) == f\"Flow(id={flow_id})\"\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 48, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 51, "n_ast_nodes": 93, "n_identifiers": 10, "random_cut": "async def test_repr(self, db, session, flow):\n assert repr(flow) == f\"Flow(id={flow.id})\"\n assert repr(db.Flow()) == f\"Flow(id=None)\"\n flow_id = uuid4()\n assert repr(db.Flow(id=flow_id)) == f\"Flo" }, { "id": 124971, "commit_id": "fb54679a239e4c7368a72a2fe3023cac04380827", "repo": "ray", "path": "python/ray/data/tests/test_stats.py", "file_name": "test_stats.py", "fun_name": "test_dataset_split_stats", "commit_message": "[Datasets] Refactor `split_at_indices()` to minimize number of split tasks and data movement. (#26363)\n\nThe current Dataset.split_at_indices() implementation suffers from O(n^2) memory usage in the small-split case (see issue) due to recursive splitting of the same blocks. This PR implements a split_at_indices() algorithm that minimizes the number of split tasks and data movement while ensuring that at most one block is used in each split task, for the sake of memory stability. Co-authored-by: scv119 ", "code": "def test_dataset_split_stats(ray_start_regular_shared, tmp_path):\n ds = ray.data.range(100, parallelism=10).map(lambda x: x + 1)\n dses = ds.split_at_indices([49])\n dses = [ds.map(lambda x: x + 1) for ds in dses]\n for ds_ in dses:\n stats = canonicalize(ds_.stats())\n assert (\n stats\n == \n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 92, "n_words": 37, "vocab_size": 25, "complexity": 3, "nloc": 33, "token_counts": 81, "n_ast_nodes": 128, "n_identifiers": 15, "random_cut": "def test_dataset_split_stats(ray_start_regular_shared, tmp_path):\n ds = ray.data.range(100, parallelism=10).map(lambda x: x + 1)\n dses = ds.split_at_indices([49])\n dses = [ds.map(lambda x: x + 1) for ds in dses]\n for ds_ in dses:\n stats = canonicalize(ds_.stats())\n assert (\n " }, { "id": 79979, "commit_id": "5c1c2c8f531d96f4568f6dfa6ce71bc32dd9d16c", "repo": "wagtail", "path": "wagtail/images/fields.py", "file_name": "fields.py", "fun_name": "check_image_pixel_size", "commit_message": "Enforce the use of a single string formatting mechanism for translation source strings\n\nClose #9377", "code": "def check_image_pixel_size(self, f):\n # Upload pixel size checking can be disabled by setting max upload pixel to None\n if self.max_image_pixels is None:\n return\n\n # Check the pixel size\n width, height = f.image.get_size()\n frames = f.image.get_frame_count()\n num_pixels = width * height * frames\n\n if num_pixels > self.max_image_pixels:\n raise ValidationError(\n self.error_messages[\"file_too_many_pixels\"]\n % {\"num_pixels\": num_pixels, \"max_pixels_count\": self.max_image_pixels},\n code=\"file_too_many_pixels\",\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 182, "n_words": 56, "vocab_size": 45, "complexity": 3, "nloc": 12, "token_counts": 76, "n_ast_nodes": 127, "n_identifiers": 14, "random_cut": "def check_image_pixel_size(self, f):\n # Upload pixel size checking can be disabled by setting max upload pixel to None\n if self.max_image_pixels is None:\n return\n\n # Check the pixel size\n width, height = f.image.get_size()\n frames = f.image.get_frame_cou" }, { "id": 89912, "commit_id": "32f7a18046786e84cd852334178a3ecedefb12cc", "repo": "sentry", "path": "tests/sentry/models/test_group.py", "file_name": "test_group.py", "fun_name": "test_get_latest_event", "commit_message": "feat(issue-platform): Include the `IssueOccurrence` with the `GroupEvent` when fetching latest event (#42279)\n\nThis ensures that when we fetch the latest event for a `Group` that if\r\nan `IssueOccurrence` exists and is associated with the event that we\r\nfetch it and include it in the `GroupEvent`.\r\n\r\nThis also adds various other necessary work to be able to query this\r\ndataset in snuba. I haven't included all the columns, that can happen as\r\nneeded.", "code": "def test_get_latest_event(self):\n self.store_event(\n data={\"event_id\": \"a\" * 32, \"fingerprint\": [\"group-1\"], \"timestamp\": self.two_min_ago},\n project_id=self.project.id,\n )\n self.store_event(\n data={\"event_id\": \"b\" * 32, \"fingerprint\": [\"group-1\"], \"timestamp\": self.min_ago},\n project_id=self.project.id,\n )\n\n group = Group.objects.first()\n\n group_event = group.get_latest_event()\n\n assert group_event.event_id == \"b\" * 32\n assert group_event.occurrence is None\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 139, "n_words": 40, "vocab_size": 27, "complexity": 1, "nloc": 13, "token_counts": 105, "n_ast_nodes": 179, "n_identifiers": 17, "random_cut": "def test_get_latest_event(self):\n self.store_event(\n data={\"event_id\": \"a\" * 32, \"fingerprint\": [\"group-1\"], \"timestamp\": self.two_min_ago},\n project_id=self.project.id,\n )\n self.store_event(\n data={\"event_id\": \"b\" * 32, \"fingerprint\": [\"group-1\"], \"timestamp\": self.min_ago},\n project_id=self.project.id,\n )\n\n group = Group.objects.first()\n\n group_event = group." }, { "id": 188186, "commit_id": "e259d2a9e9167c58fa75a78d1050dd5dcfde96f4", "repo": "jumpserver", "path": "apps/common/permissions.py", "file_name": "permissions.py", "fun_name": "has_permission", "commit_message": "fix: fix rbac to dev (#7636)\n\n* feat: 添加 RBAC 应用模块\r\n\r\n* feat: 添加 RBAC Model、API\r\n\r\n* feat: 添加 RBAC Model、API 2\r\n\r\n* feat: 添加 RBAC Model、API 3\r\n\r\n* feat: 添加 RBAC Model、API 4\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC 整理权限位\r\n\r\n* feat: RBAC 整理权限位2\r\n\r\n* feat: RBAC 整理权限位2\r\n\r\n* feat: RBAC 整理权限位\r\n\r\n* feat: RBAC 添加默认角色\r\n\r\n* feat: RBAC 添加迁移文件;迁移用户角色->用户角色绑定\r\n\r\n* feat: RBAC 添加迁移文件;迁移用户角色->用户角色绑定\r\n\r\n* feat: RBAC 修改用户模块API\r\n\r\n* feat: RBAC 添加组织模块迁移文件 & 修改组织模块API\r\n\r\n* feat: RBAC 添加组织模块迁移文件 & 修改组织模块API\r\n\r\n* feat: RBAC 修改用户角色属性的使用\r\n\r\n* feat: RBAC No.1\r\n\r\n* xxx\r\n\r\n* perf: 暂存\r\n\r\n* perf: ...\r\n\r\n* perf(rbac): 添加 perms 到 profile serializer 中\r\n\r\n* stash\r\n\r\n* perf: 使用init\r\n\r\n* perf: 修改migrations\r\n\r\n* perf: rbac\r\n\r\n* stash\r\n\r\n* stash\r\n\r\n* pref: 修改rbac\r\n\r\n* stash it\r\n\r\n* stash: 先去修复其他bug\r\n\r\n* perf: 修改 role 添加 users\r\n\r\n* pref: 修改 RBAC Model\r\n\r\n* feat: 添加权限的 tree api\r\n\r\n* stash: 暂存一下\r\n\r\n* stash: 暂存一下\r\n\r\n* perf: 修改 model verbose name\r\n\r\n* feat: 添加model各种 verbose name\r\n\r\n* perf: 生成 migrations\r\n\r\n* perf: 优化权限位\r\n\r\n* perf: 添加迁移脚本\r\n\r\n* feat: 添加组织角色迁移\r\n\r\n* perf: 添加迁移脚本\r\n\r\n* stash\r\n\r\n* perf: 添加migrateion\r\n\r\n* perf: 暂存一下\r\n\r\n* perf: 修改rbac\r\n\r\n* perf: stash it\r\n\r\n* fix: 迁移冲突\r\n\r\n* fix: 迁移冲突\r\n\r\n* perf: 暂存一下\r\n\r\n* perf: 修改 rbac 逻辑\r\n\r\n* stash: 暂存一下\r\n\r\n* perf: 修改内置角色\r\n\r\n* perf: 解决 root 组织的问题\r\n\r\n* perf: stash it\r\n\r\n* perf: 优化 rbac\r\n\r\n* perf: 优化 rolebinding 处理\r\n\r\n* perf: 完成用户离开组织的问题\r\n\r\n* perf: 暂存一下\r\n\r\n* perf: 修改翻译\r\n\r\n* perf: 去掉了 IsSuperUser\r\n\r\n* perf: IsAppUser 去掉完成\r\n\r\n* perf: 修改 connection token 的权限\r\n\r\n* perf: 去掉导入的问题\r\n\r\n* perf: perms define 格式,修改 app 用户 的全新啊\r\n\r\n* perf: 修改 permission\r\n\r\n* perf: 去掉一些 org admin\r\n\r\n* perf: 去掉部分 org admin\r\n\r\n* perf: 再去掉点 org admin role\r\n\r\n* perf: 再去掉部分 org admin\r\n\r\n* perf: user 角色搜索\r\n\r\n* perf: 去掉很多 js\r\n\r\n* perf: 添加权限位\r\n\r\n* perf: 修改权限\r\n\r\n* perf: 去掉一个 todo\r\n\r\n* merge: with dev\r\n\r\n* fix: 修复冲突\r\n\r\nCo-authored-by: Bai \r\nCo-authored-by: Michael Bai \r\nCo-authored-by: ibuler ", "code": "def has_permission(self, request, view):\n return super().has_permission(request, view) \\\n and request.user.is_superuser\n\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 30, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 3, "token_counts": 27, "n_ast_nodes": 39, "n_identifiers": 7, "random_cut": "def has_permission(self, request, view):\n return super().has_permission(request, view) \\\n " }, { "id": 220347, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/base_events.py", "file_name": "base_events.py", "fun_name": "_do_shutdown", "commit_message": "add python 3.10.4 for windows", "code": "def _do_shutdown(self, future):\n try:\n self._default_executor.shutdown(wait=True)\n self.call_soon_threadsafe(future.set_result, None)\n except Exception as ex:\n self.call_soon_threadsafe(future.set_exception, ex)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 44, "n_ast_nodes": 70, "n_identifiers": 11, "random_cut": "def _do_shutdown(self, future):\n try:\n self._default_executor.shutdown(wait=True)\n self.call_soon_threadsafe(future.set_result, None)\n except Exception as ex:\n self.call_soon_threadsafe(future.set_exception, ex)\n" }, { "id": 308837, "commit_id": "7872f87dd74fb4e2b610bb589facc0f763f153ae", "repo": "core", "path": "tests/components/webhook/test_init.py", "file_name": "test_init.py", "fun_name": "test_webhook_put", "commit_message": "Allow registering a webhook as local only (#63516)", "code": "async def test_webhook_put(hass, mock_client):\n \n hooks = []\n webhook_id = webhook.async_generate_id()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 11, "token_counts": 93, "n_ast_nodes": 35, "n_identifiers": 7, "random_cut": "async def test_webhook_put(hass, mock_client):\n \n hooks = []\n webhook_i" }, { "id": 64795, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/bank_statement_import/bank_statement_import.py", "file_name": "bank_statement_import.py", "fun_name": "update_mapping_db", "commit_message": "style: format code with black", "code": "def update_mapping_db(bank, template_options):\n\tbank = frappe.get_doc(\"Bank\", bank)\n\tfor d in bank.bank_transaction_mapping:\n\t\td.delete()\n\n\tfor d in json.loads(template_options)[\"column_to_field_map\"].items():\n\t\tbank.append(\"bank_transaction_mapping\", {\"bank_transaction_field\": d[1], \"file_field\": d[0]})\n\n\tbank.save()\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 15, "n_words": 22, "vocab_size": 19, "complexity": 3, "nloc": 7, "token_counts": 73, "n_ast_nodes": 121, "n_identifiers": 13, "random_cut": "def update_mapping_db(bank, template_options):\n\tbank = frappe.get" }, { "id": 264259, "commit_id": "3e3880823b6f2fb528cd64c00acb863f17e96bae", "repo": "netbox", "path": "netbox/extras/forms/customfields.py", "file_name": "customfields.py", "fun_name": "_append_customfield_fields", "commit_message": "Merge v3.1.6", "code": "def _append_customfield_fields(self):\n \n for customfield in self._get_custom_fields(self._get_content_type()):\n field_name = f'cf_{customfield.name}'\n self.fields[field_name] = self._get_form_field(customfield)\n\n # Annotate the field in the list of CustomField form fields\n self.custom_fields[field_name] = customfield\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 84, "n_words": 26, "vocab_size": 21, "complexity": 2, "nloc": 5, "token_counts": 45, "n_ast_nodes": 82, "n_identifiers": 10, "random_cut": "def _append_customfield_fields(self):\n \n for customfield in self._get_cu" }, { "id": 309555, "commit_id": "b17860a7dd283d54bc452e5dca23532d05822589", "repo": "core", "path": "tests/components/aws/test_init.py", "file_name": "test_init.py", "fun_name": "test_credential_skip_validate", "commit_message": "Upgrade boto3 to 1.20.24 + aiobotocore to 2.1.0 (#64045)", "code": "async def test_credential_skip_validate(hass):\n \n with async_patch(\"aiobotocore.session.AioSession\", new=MockAioSession):\n await async_setup_component(\n hass,\n \"aws\",\n {\n \"aws\": {\n \"credentials\": [\n {\n \"name\": \"key\",\n \"aws_access_key_id\": \"not-valid\",\n \"aws_secret_access_key\": \"dont-care\",\n \"validate\": False,\n }\n ]\n }\n },\n )\n await hass.async_block_till_done()\n\n sessions = hass.data[aws.DATA_SESSIONS]\n assert sessions is not None\n assert len(sessions) == 1\n session = sessions.get(\"key\")\n assert isinstance(session, MockAioSession)\n session.get_user.assert_not_awaited()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 361, "n_words": 50, "vocab_size": 42, "complexity": 1, "nloc": 25, "token_counts": 103, "n_ast_nodes": 185, "n_identifiers": 17, "random_cut": "async def test_credential_skip_validate(hass):\n \n with async_patch(\"aiobotocore.session.AioSession\", new=MockAioSession):\n await async_setup_component(\n " }, { "id": 81375, "commit_id": "782667a34ee45bfe825b29db39c67d4465391bdb", "repo": "awx", "path": "awx/sso/pipeline.py", "file_name": "pipeline.py", "fun_name": "_get_matches", "commit_message": "Allow multiple values in SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR.is_*_[value|role] settings (#12558)", "code": "def _get_matches(list1, list2):\n # Because we are just doing an intersection here we don't really care which list is in which parameter\n\n # A SAML provider could return either a string or a list of items so we need to coerce the SAML value into a list (if needed)\n if not isinstance(list1, (list, tuple)):\n list1 = [list1]\n\n # In addition, we used to allow strings in the SAML config instead of Lists. The migration should take case of that but just in case, we will convert our list too\n if not isinstance(list2, (list, tuple)):\n list2 = [list2]\n\n return set(list1).intersection(set(list2))\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 130, "n_words": 99, "vocab_size": 72, "complexity": 3, "nloc": 6, "token_counts": 56, "n_ast_nodes": 89, "n_identifiers": 8, "random_cut": "def _get_matches(list1, list2):\n # Because we are just doing an intersection here we don't really care which list is in which parameter\n\n # A SAML provider could return either a string or a list of items so we need to coerce the SA" }, { "id": 154060, "commit_id": "6ce9cf4daec7f9996038205289bce2186be87611", "repo": "modin", "path": "modin/pandas/series.py", "file_name": "series.py", "fun_name": "_between", "commit_message": "FEAT-#4147: Add partial compatibility with Python 3.6 and pandas 1.1 (#4301)\n\nSigned-off-by: Devin Petersohn \r\nSigned-off-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Rehan Durrani \r\nCo-authored-by: Igoshev, Yaroslav \r\nCo-authored-by: Myachev, Anatoly ", "code": "def _between(self, left, right, inclusive): # noqa: PR01, RT01, D200\n \n return self._default_to_pandas(\n pandas.Series.between, left, right, inclusive=inclusive\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 50, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 46, "n_identifiers": 9, "random_cut": "def _between(self, left, right, inclusive): # noqa: PR01, RT01, D200\n \n return self._default_to_pandas(\n pandas.Series.between, left, right, inclusive=inc" }, { "id": 294272, "commit_id": "dbef90654f3693401a2df88fa00afbbffbdffcd2", "repo": "core", "path": "tests/components/hue/test_light_v2.py", "file_name": "test_light_v2.py", "fun_name": "test_lights", "commit_message": "Add effects feature to Hue lights (#68567)", "code": "async def test_lights(hass, mock_bridge_v2, v2_resources_test_data):\n \n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n\n await setup_platform(hass, mock_bridge_v2, \"light\")\n # there shouldn't have been any requests at this point\n assert len(mock_bridge_v2.mock_requests) == 0\n # 6 entities should be created from test data (grouped_lights are disabled by default)\n assert len(hass.states.async_all()) == 6\n\n # test light which supports color and color temperature\n light_1 = hass.states.get(\"light.hue_light_with_color_and_color_temperature_1\")\n assert light_1 is not None\n assert (\n light_1.attributes[\"friendly_name\"]\n == \"Hue light with color and color temperature 1\"\n )\n assert light_1.state == \"on\"\n assert light_1.attributes[\"brightness\"] == int(46.85 / 100 * 255)\n assert light_1.attributes[\"mode\"] == \"normal\"\n assert light_1.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert set(light_1.attributes[\"supported_color_modes\"]) == {\n COLOR_MODE_COLOR_TEMP,\n COLOR_MODE_XY,\n }\n assert light_1.attributes[\"xy_color\"] == (0.5614, 0.4058)\n assert light_1.attributes[\"min_mireds\"] == 153\n assert light_1.attributes[\"max_mireds\"] == 500\n assert light_1.attributes[\"dynamics\"] == \"dynamic_palette\"\n assert light_1.attributes[\"effect_list\"] == [\"None\", \"candle\", \"fire\"]\n assert light_1.attributes[\"effect\"] == \"None\"\n\n # test light which supports color temperature only\n light_2 = hass.states.get(\"light.hue_light_with_color_temperature_only\")\n assert light_2 is not None\n assert (\n light_2.attributes[\"friendly_name\"] == \"Hue light with color temperature only\"\n )\n assert light_2.state == \"off\"\n assert light_2.attributes[\"mode\"] == \"normal\"\n assert light_2.attributes[\"supported_color_modes\"] == [COLOR_MODE_COLOR_TEMP]\n assert light_2.attributes[\"min_mireds\"] == 153\n assert light_2.attributes[\"max_mireds\"] == 454\n assert light_2.attributes[\"dynamics\"] == \"none\"\n assert light_2.attributes[\"effect_list\"] == [\"None\", \"candle\", \"sunrise\"]\n\n # test light which supports color only\n light_3 = hass.states.get(\"light.hue_light_with_color_only\")\n assert light_3 is not None\n assert light_3.attributes[\"friendly_name\"] == \"Hue light with color only\"\n assert light_3.state == \"on\"\n assert light_3.attributes[\"brightness\"] == 128\n assert light_3.attributes[\"mode\"] == \"normal\"\n assert light_3.attributes[\"supported_color_modes\"] == [COLOR_MODE_XY]\n assert light_3.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert light_3.attributes[\"dynamics\"] == \"dynamic_palette\"\n\n # test light which supports on/off only\n light_4 = hass.states.get(\"light.hue_on_off_light\")\n assert light_4 is not None\n assert light_4.attributes[\"friendly_name\"] == \"Hue on/off light\"\n assert light_4.state == \"off\"\n assert light_4.attributes[\"mode\"] == \"normal\"\n assert light_4.attributes[\"supported_color_modes\"] == []\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 458, "n_words": 264, "vocab_size": 124, "complexity": 1, "nloc": 52, "token_counts": 423, "n_ast_nodes": 729, "n_identifiers": 22, "random_cut": "async def test_lights(hass, mock_bridge_v2, v2_resources_test_data):\n \n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n\n await setup_platform(hass, mock_bridge_v2, \"light\")\n # there shouldn't have been any requests at this point\n assert len(mock_bridge_v2.mock_requests) == 0\n # 6 entities should be created from test data (grouped_lights are disabled by default)\n assert len(hass.states.async_all()) == 6\n\n # test light which supports color and color temperature\n light_1 = hass.states.get(\"light.hue_light_with_color_and_color_temperature_1\")\n assert light_1 is not None\n assert (\n light_1.attributes[\"friendly_name\"]\n == \"Hue light with color and color temperature 1\"\n )\n assert light_1.state == \"on\"\n assert light_1.attributes[\"brightness\"] == int(46.85 / 100 * 255)\n assert light_1.attributes[\"mode\"] == \"normal\"\n assert light_1.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert set(light_1.attributes[\"supported_color_modes\"]) == {\n COLOR_MODE_COLOR_TEMP,\n COLOR_MODE_XY,\n }\n assert light_1.attributes[\"xy_color\"] == (0.5614, 0.4058)\n assert light_1.attributes[\"min_mireds\"] == 153\n assert light_1.attributes[\"max_mireds\"] == 500\n assert light_1.attributes[\"dynamics\"] == \"dynamic_palette\"\n assert light_1.attributes[\"effect_list\"] == [\"None\", \"candle\", \"fire\"]\n assert light_1.attributes[\"effect\"] == \"None\"\n\n # test light which supports color temperature only\n light_2 = hass.states.get(\"light.hue_light_with_color_temperature_only\")\n assert light_2 is not None\n assert (\n light_2.attributes[\"friendly_name\"] == \"Hue light with color temperature only\"\n )\n assert light_2.state == \"off\"\n assert light_2.attributes[\"mode\"] == \"normal\"\n assert light_2.attributes[\"supported_color_modes\"] == [COLOR_MODE_COLOR_TEMP]\n assert light_2.attributes[\"min_mireds\"] == 153\n assert light_2.attributes[\"max_mireds\"] == 454\n assert light_2.attributes[\"dynamics\"] == \"none\"\n assert light_2.attributes[\"effect_list\"] == [\"None\", \"candle\", \"sunrise\"]\n\n # test light which supports color only\n light_3 = hass.states.get(\"light.hue_light_with_color_only\")\n assert light_3 is not None\n assert light_3.attributes[\"friendly_name\"] == \"Hue light with color only\"\n assert light_3.state == \"on\"\n assert light_3.attributes[\"brightness\"] == 128\n assert light_3.attributes[\"mode\"] == \"normal\"\n assert light_3.attributes[\"supported_color_modes\"] == [COLOR_MODE_XY]\n assert light_3.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert light_3.attributes[\"dynamics\"] == \"dynamic_palette\"\n\n # test light which supports on/off only\n light_4 = hass.states.get(\"light.hue_on_off_light\")\n assert light_4 is not None\n assert light_4.attributes[\"friendl" }, { "id": 190019, "commit_id": "206db54af53a87985c0d243d75304ea620dad520", "repo": "manim", "path": "tests/test_scene_rendering/opengl/test_cli_flags_opengl.py", "file_name": "test_cli_flags_opengl.py", "fun_name": "test_mov_can_be_set_as_output_format", "commit_message": "Migrate more `os.path` to `pathlib` in tests (#2991)\n\n* Migrate more `os.path` to `pathlib` in tests\r\n\r\n* Convert test fixtures to pathlib\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix mypy errors in tests\r\n\r\n* migrate another pathlib instance\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Benjamin Hackl ", "code": "def test_mov_can_be_set_as_output_format(tmp_path, manim_cfg_file, simple_scenes_path):\n \n scene_name = \"SquareToCircle\"\n command = [\n sys.executable,\n \"-m\",\n \"manim\",\n \"--renderer\",\n \"opengl\",\n \"-ql\",\n \"--media_dir\",\n str(tmp_path),\n \"--format\",\n \"mov\",\n str(simple_scenes_path),\n scene_name,\n ]\n out, err, exit_code = capture(command)\n assert exit_code == 0, err\n\n unexpected_webm_path = (\n tmp_path / \"videos\" / \"simple_scenes\" / \"480p15\" / \"SquareToCircle.webm\"\n )\n assert not unexpected_webm_path.exists(), \"unexpected webm file found at \" + str(\n unexpected_webm_path,\n )\n\n expected_mov_path = (\n tmp_path / \"videos\" / \"simple_scenes\" / \"480p15\" / \"SquareToCircle.mov\"\n )\n assert expected_mov_path.exists(), \"expected .mov file not found at \" + str(\n expected_mov_path,\n )\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 239, "n_words": 85, "vocab_size": 56, "complexity": 1, "nloc": 30, "token_counts": 120, "n_ast_nodes": 211, "n_identifiers": 16, "random_cut": "def test_mov_can_be_set_as_output_format(tmp_path, manim_cfg_file, simple_scenes_path):\n \n scene_name = \"SquareToCircle\"\n command = [\n sys.executable,\n \"-m" }, { "id": 98436, "commit_id": "6621356bbf73f25d6e0df98a78360c7db0f9ee42", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_member_details.py", "file_name": "test_organization_member_details.py", "fun_name": "test_lists_organization_roles", "commit_message": "feat(access): Add retirement flag for org roles (#33603)\n\nAdd an `is_retired` attribute to organization-level roles. Roles marked\r\nas retired will be hidden in the UI, and the role can no longer be\r\nassigned to members. Members who already have the role will keep it,\r\nreceiving the associated permissions and minimum team role.\r\n\r\nRole retirement is gated by the \"organizations:team-roles\" feature flag.\r\nOrganizations without the flag will see retired roles as normal.", "code": "def test_lists_organization_roles(self):\n response = self.get_success_response(self.organization.slug, \"me\")\n\n role_ids = [role[\"id\"] for role in response.data[\"roles\"]]\n assert role_ids == [\"member\", \"admin\", \"manager\", \"owner\"]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 40, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 4, "token_counts": 48, "n_ast_nodes": 83, "n_identifiers": 9, "random_cut": "def test_lists_organization_roles(self):\n response = self.get_success_response(self.organization.slug, \"me\")\n\n role_ids = [role[\"id\"] for role in response.data[\"roles\"]]" }, { "id": 3668, "commit_id": "359fcd801128239b39297828d39821f631ce00c0", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-google-ads/unit_tests/test_google_ads.py", "file_name": "test_google_ads.py", "fun_name": "test_get_date_params_with_time_zone", "commit_message": "Source Google Ads: handle page token expired exception (#9812)\n\n* dynamic date range\r\n\r\n* raise exception if exites the cycle without error\r\n\r\n* if range days is 1 already do not retry\r\n\r\n* added unit tests\r\n\r\n* added comments\r\n\r\n* added comments\r\n\r\n* common mock classes are moved to common module\r\n\r\n* change read_records\r\n\r\n* refactored get_date_params\r\n\r\n* handle corner case\r\n\r\n* added parse_dates function\r\n\r\n* added test_streams\r\n\r\n* check mock calls\r\n\r\n* fix unit tests for chunk date range refactoring\r\n\r\n* removed commented codes\r\n\r\n* remove commented line\r\n\r\n* refactor test_streams\r\n\r\n* refactor CustomQuery.get_query\r\n\r\n* remove TODO\r\n\r\n* deleted unused json\r\n\r\n* format\r\n\r\n* fix chunk_date_range\r\n\r\n* added docstring\r\n\r\n* set range_days to 15 for ShoppingPerformanceReport\r\n\r\n* refactor chunk_date_range\r\n\r\n* format code 2\r\n\r\n* call parent read_records method\r\n\r\n* add return type in get_date_params\r\n\r\n* change e to exception\r\n\r\n* set start_date as end_date\r\n\r\n* log page token has expired\r\n\r\n* bump version\r\n\r\n* updated spec and def yaml\r\n\r\nCo-authored-by: auganbay ", "code": "def test_get_date_params_with_time_zone():\n time_zone_chatham = Timezone(\"Pacific/Chatham\") # UTC+12:45\n mock_start_date_chatham = pendulum.today(tz=time_zone_chatham).subtract(days=1).to_date_string()\n time_zone_honolulu = Timezone(\"Pacific/Honolulu\") # UTC-10:00\n mock_start_date_honolulu = pendulum.today(tz=time_zone_honolulu).subtract(days=1).to_date_string()\n\n mock_conversion_window_days = 14\n\n incremental_stream_config = dict(\n conversion_window_days=mock_conversion_window_days,\n start_date=mock_start_date_chatham,\n api=MockGoogleAdsClient(SAMPLE_CONFIG),\n time_zone=time_zone_chatham,\n )\n stream = IncrementalGoogleAdsStream(**incremental_stream_config)\n start_date_chatham, end_date_chatham = get_date_params(\n start_date=mock_start_date_chatham, time_zone=stream.time_zone, range_days=stream.range_days\n )\n\n incremental_stream_config.update({\"start_date\": mock_start_date_honolulu, \"time_zone\": time_zone_honolulu})\n stream_2 = IncrementalGoogleAdsStream(**incremental_stream_config)\n\n start_date_honolulu, end_date_honolulu = get_date_params(\n start_date=mock_start_date_honolulu, time_zone=stream_2.time_zone, range_days=stream_2.range_days\n )\n\n assert start_date_honolulu != start_date_chatham and end_date_honolulu != end_date_chatham\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 151, "n_words": 63, "vocab_size": 45, "complexity": 2, "nloc": 22, "token_counts": 165, "n_ast_nodes": 266, "n_identifiers": 31, "random_cut": "def test_get_date_params_with_time_zone():\n time_zone_chatham = Timezone(\"Pacific/Chatham\") # UTC+12:45\n mock_start_date_chatham = pendulum.today(tz=time_zone_chatham).subtract(days=1).to_date_string()\n time_zone_honolulu = Timezone(\"Pacific/Honolulu\") # UTC-10:00\n mock_start_date_honolulu = pendulum.today(tz=time_zone_honolulu).subtract(days=1).to_date_string()\n\n mock_conversion_window_days = 14\n\n incremental_stream_config = dict(\n conversion_window_days=mock_conversion_window_days,\n start_date=mock_start_date_chatham,\n api=MockGoogleAdsClient(SAMPLE_CONFIG),\n time_zone=time_zone_chatham,\n )\n stream = IncrementalGoogleAdsStream(**incremental_str" }, { "id": 31992, "commit_id": "e4d2588573f2c68eb792f2d11f092eb2c562bef5", "repo": "transformers", "path": "tests/pipelines/test_pipelines_common.py", "file_name": "test_pipelines_common.py", "fun_name": "test_load_default_pipelines_tf_table_qa", "commit_message": "[Pipelines] Add revision tag to all default pipelines (#17667)\n\n* trigger test failure\r\n\r\n* upload revision poc\r\n\r\n* Update src/transformers/pipelines/base.py\r\n\r\nCo-authored-by: Julien Chaumond \r\n\r\n* up\r\n\r\n* add test\r\n\r\n* correct some stuff\r\n\r\n* Update src/transformers/pipelines/__init__.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* correct require flag\r\n\r\nCo-authored-by: Julien Chaumond \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def test_load_default_pipelines_tf_table_qa(self):\n import tensorflow as tf\n\n set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731\n self.check_default_pipeline(\"table-question-answering\", \"tf\", set_seed_fn, self.check_models_equal_tf)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 38, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 59, "n_identifiers": 9, "random_cut": "def test_load_default_pipelines_tf_table_qa(self):\n import tensorflow as tf\n\n set_seed_fn = lambda: tf.random.set_seed(0) # " }, { "id": 91295, "commit_id": "d6da9e3f9a72428db7de318fd6c13641dbb41825", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_v2.py", "file_name": "test_organization_events_v2.py", "fun_name": "test_count_if", "commit_message": "fix(discover): Handle unicode values in parameters (#35272)\n\n- This handles unicode values in parameters becoming aliased and used in\r\n clickhouse which doesn't support that\r\n - Uses the ascii values instead eg. u716e\r\n - Which works out for the new events endpoint since we transform\r\n them back to what was passed in", "code": "def test_count_if(self):\n unicode_phrase1 = \"\\u716e\\u6211\\u66f4\\u591a\\u7684\\u98df\\u7269\\uff0c\\u6211\\u9913\\u4e86\"\n for i in range(5):\n data = load_data(\n \"transaction\",\n timestamp=before_now(minutes=(1 + i)),\n start_timestamp=before_now(minutes=(1 + i), milliseconds=100 if i < 3 else 200),\n )\n data[\"tags\"] = {\n \"sub_customer.is-Enterprise-42\": \"yes\" if i == 0 else \"no\",\n \"unicode-phrase\": unicode_phrase1 if i == 0 else \"no\",\n }\n self.store_event(data, project_id=self.project.id)\n\n query = {\n \"field\": [\n \"count_if(transaction.duration, less, 150)\",\n \"count_if(transaction.duration, greater, 150)\",\n \"count_if(sub_customer.is-Enterprise-42, equals, yes)\",\n \"count_if(sub_customer.is-Enterprise-42, notEquals, yes)\",\n f\"count_if(unicode-phrase, equals, {unicode_phrase1})\",\n ],\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200\n assert len(response.data[\"data\"]) == 1\n\n assert response.data[\"data\"][0][\"count_if(transaction.duration, less, 150)\"] == 3\n assert response.data[\"data\"][0][\"count_if(transaction.duration, greater, 150)\"] == 2\n\n assert response.data[\"data\"][0][\"count_if(sub_customer.is-Enterprise-42, equals, yes)\"] == 1\n assert (\n response.data[\"data\"][0][\"count_if(sub_customer.is-Enterprise-42, notEquals, yes)\"] == 4\n )\n assert response.data[\"data\"][0][f\"count_if(unicode-phrase, equals, {unicode_phrase1})\"] == 1\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 455, "n_words": 116, "vocab_size": 67, "complexity": 5, "nloc": 33, "token_counts": 234, "n_ast_nodes": 403, "n_identifiers": 21, "random_cut": "def test_count_if(self):\n unicode_phrase1 = \"\\u716e\\u6211\\u66f4\\u591a\\u7684\\u98df\\u7269\\uff0c\\u6211\\u9913\\u4e86\"\n for i in range(5):\n data = load_data(\n \"transaction\",\n timestamp=before_now(minutes=(1 + i)),\n start_timestamp=before_now(minutes=(1 + i), milliseconds=100 if i < 3 else 200),\n )\n data[\"tags\"] = {\n \"sub_customer.is-Enterprise-42\": \"yes\" if i == 0 else \"no\",\n \"unicode-phrase\": unicode_phrase1 if i == 0 else \"no\",\n }\n self.store_event(data, project_id=self.project.id)\n\n query = {\n \"field\": [\n \"count_if(transaction.duration, less, 150)\",\n \"count_if(transaction.duration, greater, 150)\",\n \"count_if(sub_customer.is-Enterprise-42, equals, yes)\",\n \"count_if(sub_customer.is-Enterprise-42, notEquals, yes)\",\n f\"count_if(unicode-phrase, equals, {unicode_phrase1})\",\n ],\n \"project\": [self.project.id],\n }\n response = self.do_request(query)\n assert response.status_code == 200\n assert len(response.data[\"data\"]) == 1\n\n assert response.data[\"data\"][0][\"count_if(transaction.duration, less, 150)\"] == 3\n assert response.data[\"data\"][0][\"count_if(transaction.duration, greater, 150)\"] == 2\n\n assert response.data[\"data\"][0][\"count_if(sub_customer.is-Enterprise-42, equals, yes)\"] == 1\n assert (\n response.data[\"data\"][0][\"count_if(sub_customer.is-Enterprise-42, notEquals, yes)" }, { "id": 321539, "commit_id": "d387b1a1084b9649009e5cffb9d71facc80bb41f", "repo": "qutebrowser", "path": "tests/helpers/fixtures.py", "file_name": "fixtures.py", "fun_name": "webengineview", "commit_message": "tests: Adjust most imports", "code": "def webengineview(qtbot, monkeypatch, web_tab_setup):\n \n QtWebEngineWidgets = pytest.importorskip('qutebrowser.qt.webenginewidgets')\n monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebEngine)\n view = QtWebEngineWidgets.QWebEngineView()\n qtbot.add_widget(view)\n yield view\n view.setPage(None) # Avoid warning if using QWebEngineProfile\n\n\n@pytest.fixture", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 45, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 7, "token_counts": 53, "n_ast_nodes": 98, "n_identifiers": 17, "random_cut": "def webengineview(qtbot, monkeypatch, web_tab_setup):\n \n QtWebEngineWidgets = pytest.importorskip('qutebrowser.qt.webenginewidgets')\n monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebEngine)\n view = QtWebEngineWidgets.QWebEngineView()\n qtbot.add_widget(view)\n yield view\n view.setPage(No" }, { "id": 186246, "commit_id": "e68e02405f813a5e7c4dc7b8e11bd8cc742d8055", "repo": "textual", "path": "docs/examples/styles/link_hover_color.py", "file_name": "link_hover_color.py", "fun_name": "compose", "commit_message": "Add example for link hover color.", "code": "def compose(self):\n yield Label(\n \"Visit the [link=https://textualize.io]Textualize[/link] website.\",\n id=\"lbl1\", # (1)!\n )\n yield Label(\n \"Click [@click=app.bell]here[/] for the bell sound.\",\n id=\"lbl2\", # (2)!\n )\n yield Label(\n \"You can also click [@click=app.bell]here[/] for the bell sound.\",\n id=\"lbl3\", # (3)!\n )\n yield Label(\n \"[@click=app.quit]Exit this application.[/]\",\n id=\"lbl4\", # (4)!\n )\n\n\napp = LinkHoverColorApp(css_path=\"link_hover_color.css\")\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 197, "n_words": 51, "vocab_size": 33, "complexity": 1, "nloc": 17, "token_counts": 45, "n_ast_nodes": 104, "n_identifiers": 7, "random_cut": "def compose(self):\n yield Label(\n \"Visit the [link=https://textualize.io]Textualize[/link] website.\",\n id=\"lbl1\", # (1)!\n )\n yield Label(\n \"Click [@click=app.bell]here[/] for the bell sound.\",\n id=\"lbl2\", # (2)!\n )\n yield Label(\n \"You can also click [@click=app.bell]here[/] for the bell sound.\",\n id=\"lbl3\", # (3)!\n )\n yield Label(\n \"[@click=app.quit]Exit this application.[/]\",\n id=\"lbl4\", # (4)!\n )\n\n\napp = LinkHoverColo" }, { "id": 100351, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/model/layers.py", "file_name": "layers.py", "fun_name": "call", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def call(self, inputs, *args, **kwargs):\n \n input_shape = K.int_shape(inputs)\n if len(input_shape) != 4:\n raise ValueError('Inputs should have rank ' +\n str(4) +\n '; Received input shape:', str(input_shape))\n\n if self.data_format == 'channels_first':\n batch_size, channels, height, width = input_shape\n if batch_size is None:\n batch_size = -1\n r_height, r_width = self.size\n o_height, o_width = height * r_height, width * r_width\n o_channels = channels // (r_height * r_width)\n\n out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width))\n out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2))\n out = K.reshape(out, (batch_size, o_channels, o_height, o_width))\n elif self.data_format == 'channels_last':\n batch_size, height, width, channels = input_shape\n if batch_size is None:\n batch_size = -1\n r_height, r_width = self.size\n o_height, o_width = height * r_height, width * r_width\n o_channels = channels // (r_height * r_width)\n\n out = K.reshape(inputs, (batch_size, height, width, r_height, r_width, o_channels))\n out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5))\n out = K.reshape(out, (batch_size, o_height, o_width, o_channels))\n return out\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 467, "n_words": 152, "vocab_size": 71, "complexity": 6, "nloc": 27, "token_counts": 267, "n_ast_nodes": 406, "n_identifiers": 25, "random_cut": "def call(self, inputs, *args, **kwargs):\n \n input_shape = K.int_shape(inputs)\n if len(input_shape) != 4:\n raise ValueError('Inputs should have rank ' +\n str(4) +\n '; Received input shape:', str(input_shape))\n\n if self.data_format == 'channels_first':\n batch_size, channels, height, width = input_shape\n if batch_size is None:\n batch_size = -1\n r_height, r_width = self.size\n o_height, o_width = height * r_height, width * r_width\n o_channels = channels // (r_height * r_width)\n\n out = K.reshape(inputs, (batch" }, { "id": 310537, "commit_id": "a70c9802839e7e5d57230d9315dc48a8b7124590", "repo": "core", "path": "homeassistant/components/openuv/sensor.py", "file_name": "sensor.py", "fun_name": "update_from_latest_data", "commit_message": "Fix small inconsistency in OpenUV data storage (#64717)", "code": "def update_from_latest_data(self) -> None:\n \n if (data := self.openuv.data[DATA_UV]) is None:\n self._attr_available = False\n return\n\n self._attr_available = True\n\n if self.entity_description.key == TYPE_CURRENT_OZONE_LEVEL:\n self._attr_native_value = data[\"ozone\"]\n elif self.entity_description.key == TYPE_CURRENT_UV_INDEX:\n self._attr_native_value = data[\"uv\"]\n elif self.entity_description.key == TYPE_CURRENT_UV_LEVEL:\n if data[\"uv\"] >= 11:\n self._attr_native_value = UV_LEVEL_EXTREME\n elif data[\"uv\"] >= 8:\n self._attr_native_value = UV_LEVEL_VHIGH\n elif data[\"uv\"] >= 6:\n self._attr_native_value = UV_LEVEL_HIGH\n elif data[\"uv\"] >= 3:\n self._attr_native_value = UV_LEVEL_MODERATE\n else:\n self._attr_native_value = UV_LEVEL_LOW\n elif self.entity_description.key == TYPE_MAX_UV_INDEX:\n self._attr_native_value = data[\"uv_max\"]\n if uv_max_time := parse_datetime(data[\"uv_max_time\"]):\n self._attr_extra_state_attributes.update(\n {ATTR_MAX_UV_TIME: as_local(uv_max_time)}\n )\n elif self.entity_description.key in (\n TYPE_SAFE_EXPOSURE_TIME_1,\n TYPE_SAFE_EXPOSURE_TIME_2,\n TYPE_SAFE_EXPOSURE_TIME_3,\n TYPE_SAFE_EXPOSURE_TIME_4,\n TYPE_SAFE_EXPOSURE_TIME_5,\n TYPE_SAFE_EXPOSURE_TIME_6,\n ):\n self._attr_native_value = data[\"safe_exposure_time\"][\n EXPOSURE_TYPE_MAP[self.entity_description.key]\n ]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 509, "n_words": 98, "vocab_size": 54, "complexity": 12, "nloc": 38, "token_counts": 220, "n_ast_nodes": 355, "n_identifiers": 31, "random_cut": "def update_from_latest_data(self) -> None:\n \n if (data := self.openuv.data[DATA_UV]) is None:\n self._attr_available = False\n return\n\n self._attr_available = True\n\n if self.entity_description.key == TYPE_CURRENT_OZONE_LEVEL:\n self._attr_native_value = data[\"ozone\"]\n elif self.entity_description.key == TYPE_CURRENT_UV_INDEX:\n self._attr_native_value = data[\"uv\"]\n elif self.entity_description.key == TYPE_CURRENT_UV_LEVEL:\n if data[\"uv\"] >= " }, { "id": 157924, "commit_id": "b0cb7bed0f74c8c263c38b2a5d9c0fca70db4d56", "repo": "d2l-zh", "path": "d2l/paddle.py", "file_name": "paddle.py", "fun_name": "evaluate_accuracy_gpu", "commit_message": "[Paddle]Add chapter_computational-performance (#1167)\n\n* [Paddle]Add chapter_computational-performance\r\n\r\n* add Residual\r\n\r\n* Fix sgd bugs\r\n\r\n* remove the number\r\n\r\n* Fix gpu config\r\n\r\n* Fix some syntax and describe issue\r\n\r\n* Update gpu configuration\r\n\r\nCo-authored-by: w5688414 ", "code": "def evaluate_accuracy_gpu(net, data_iter, device=None):\n \n if isinstance(net, nn.Layer):\n net.eval() # 设置为评估模式\n if not device:\n device = next(iter(net.parameters())).place\n paddle.set_device(\"gpu:{}\".format(str(device)[-2])) \n # 正确预测的数量,总预测的数量\n metric = d2l.Accumulator(2)\n with paddle.no_grad():\n for X, y in data_iter:\n if isinstance(X, list):\n # BERT微调所需的\n X = [paddle.to_tensor(x, place=device) for x in X]\n else:\n X = paddle.to_tensor(X, place=device)\n y = paddle.to_tensor(y, place=device)\n metric.add(d2l.accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 202, "n_words": 58, "vocab_size": 44, "complexity": 6, "nloc": 16, "token_counts": 171, "n_ast_nodes": 274, "n_identifiers": 28, "random_cut": "def evaluate_accuracy_gpu(net, data_iter, device=None):\n \n if isinstance(net, nn.Layer):\n net.eval() # 设置为评估模式\n " }, { "id": 46813, "commit_id": "f9e18472c0c228fc3de7c883c7c3d26d7ee49e81", "repo": "airflow", "path": "tests/providers/google/cloud/operators/test_bigquery.py", "file_name": "test_bigquery.py", "fun_name": "test_execute", "commit_message": "Add autodetect arg in BQCreateExternalTable Operator (#22710)\n\n* Add autodetect parameter\r\n\r\n* Update docstring\r\n\r\n* Update google provider documentation", "code": "def test_execute(self, mock_hook):\n operator = BigQueryCreateExternalTableOperator(\n task_id=TASK_ID,\n destination_project_dataset_table=f'{TEST_DATASET}.{TEST_TABLE_ID}',\n schema_fields=[],\n bucket=TEST_GCS_BUCKET,\n source_objects=TEST_GCS_DATA,\n source_format=TEST_SOURCE_FORMAT,\n autodetect=True,\n )\n\n operator.execute(None)\n mock_hook.return_value.create_external_table.assert_called_once_with(\n external_project_dataset_table=f'{TEST_DATASET}.{TEST_TABLE_ID}',\n schema_fields=[],\n source_uris=[f'gs://{TEST_GCS_BUCKET}/{source_object}' for source_object in TEST_GCS_DATA],\n source_format=TEST_SOURCE_FORMAT,\n autodetect=True,\n compression='NONE',\n skip_leading_rows=0,\n field_delimiter=',',\n max_bad_records=0,\n quote_character=None,\n allow_quoted_newlines=False,\n allow_jagged_rows=False,\n src_fmt_configs={},\n labels=None,\n encryption_configuration=None,\n )\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 312, "n_words": 36, "vocab_size": 32, "complexity": 2, "nloc": 28, "token_counts": 127, "n_ast_nodes": 203, "n_identifiers": 35, "random_cut": "def test_execute(self, mock_hook):\n operator = BigQueryCreateExternalTableOperator(\n task_id=TASK_ID,\n destination_project_dataset_table=f'{TEST_DATASET}.{TEST_TABLE_ID}',\n schema_fields=[],\n bucket=TEST_GCS_BUCKET,\n source_objects=TEST_GCS_DATA,\n " }, { "id": 149760, "commit_id": "fc837c4daa27a18ff0e86128f4d52089b88fa5fb", "repo": "freqtrade", "path": "freqtrade/freqai/data_handler.py", "file_name": "data_handler.py", "fun_name": "append_predictions", "commit_message": "add freqao backend machinery, user interface, documentation", "code": "def append_predictions(self, predictions, do_predict, len_dataframe):\n \n\n ones = np.ones(len_dataframe)\n s_mean, s_std = ones*self.data['s_mean'], ones*self.data['s_std']\n\n self.predictions = np.append(self.predictions,predictions)\n self.do_predict = np.append(self.do_predict,do_predict)\n self.target_mean = np.append(self.target_mean,s_mean)\n self.target_std = np.append(self.target_std,s_std)\n\n return\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 82, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 8, "token_counts": 98, "n_ast_nodes": 153, "n_identifiers": 13, "random_cut": "def append_predictions(self, predictions, do_predict, len_dataframe):\n \n\n ones = np.ones(len_dataframe)\n s_mean, s_std = ones*self.data['s_mean'], ones*self.data['s_std']\n\n self.predictions = np.append(self.predictions,predictions)\n self.do_predict = np.append(self.do_predict,do_predict)\n self.target_mean = np.append(self.target_mean,s_mean)\n self.target_std = np.append(self.target_std,s_std)\n\n return\n" }, { "id": 53847, "commit_id": "51da0df3edbbf2f812b34616ef2b0fa83c676e04", "repo": "prefect", "path": "tests/test_context.py", "file_name": "test_context.py", "fun_name": "test_exiting_a_context_more_than_entering_raises", "commit_message": "Fix errors with multiple tokens and ban behavior", "code": "def test_exiting_a_context_more_than_entering_raises():\n context = ExampleContext(x=1)\n\n with pytest.raises(RuntimeError, match=\"Asymmetric use of context\"):\n with context:\n context.__exit__()\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 32, "n_ast_nodes": 59, "n_identifiers": 9, "random_cut": "def test_exiting_a_context_more_than_entering_raises():\n context = ExampleContext(x=1)\n\n with pytest.raises(Run" }, { "id": 336702, "commit_id": "f7cd6b87e1ee8c7909de760f22f1a6b0c6ae0592", "repo": "diffusers", "path": "src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py", "file_name": "pipeline_stable_diffusion_img2img.py", "fun_name": "disable_attention_slicing", "commit_message": "Fix `disable_attention_slicing` in pipelines (#498)\n\nFix `disable_attention_slicing` in pipelines.", "code": "def disable_attention_slicing(self):\n r\n # set slice_size = `None` to disable `set_attention_slice`\n self.enable_attention_slicing(None)\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 32, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 13, "n_ast_nodes": 24, "n_identifiers": 3, "random_cut": "def disable_attention_slicing(self):\n r\n # " }, { "id": 67112, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/india/e_invoice/utils.py", "file_name": "utils.py", "fun_name": "raise_document_name_too_long_error", "commit_message": "style: format code with black", "code": "def raise_document_name_too_long_error():\n\ttitle = _(\"Document ID Too Long\")\n\tmsg = _(\"As you have E-Invoicing enabled, to be able to generate IRN for this invoice\")\n\tmsg += \", \"\n\tmsg += _(\"document id {} exceed 16 letters.\").format(bold(_(\"should not\")))\n\tmsg += \"

    \"\n\tmsg += _(\"You must {} your {} in order to have document id of {} length 16.\").format(\n\t\tbold(_(\"modify\")), bold(_(\"naming series\")), bold(_(\"maximum\"))\n\t)\n\tmsg += _(\"Please account for ammended documents too.\")\n\tfrappe.throw(msg, title=title)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 61, "n_words": 72, "vocab_size": 54, "complexity": 1, "nloc": 11, "token_counts": 88, "n_ast_nodes": 165, "n_identifiers": 8, "random_cut": "def raise_document_name_too_long_error():\n\ttitle = _(\"Document ID Too Long\")\n\tmsg = _(\"As you have E-Invoicing enabled, to be able to generate IRN for this invoice\")\n\tmsg += \", \"\n\tmsg += _(\"document id {} exceed 16 letters.\").format(bold(_(\"should not\")))\n\tmsg " }, { "id": 78170, "commit_id": "b301fb17a70ad6494a391f17277f8a5410313d69", "repo": "wagtail", "path": "wagtail/documents/views/chooser.py", "file_name": "chooser.py", "fun_name": "get_context_data", "commit_message": "Further reshuffles to match generic views as closely as possible", "code": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"results\": self.documents,\n \"table\": self.table,\n \"results_url\": self.get_results_url(),\n \"is_searching\": self.is_searching,\n \"search_query\": self.search_query,\n \"can_create\": self.can_create(),\n \"collection_id\": self.collection_id,\n }\n )\n\n if context[\"can_create\"]:\n creation_form = self.get_creation_form()\n context.update(self.get_creation_form_context_data(creation_form))\n\n return context\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 215, "n_words": 32, "vocab_size": 30, "complexity": 2, "nloc": 17, "token_counts": 98, "n_ast_nodes": 166, "n_identifiers": 16, "random_cut": "def get_context_data(self, **kwargs):\n context = super().get_context_dat" }, { "id": 150204, "commit_id": "e213d0ad55db09d83a172019234398b64469de6f", "repo": "freqtrade", "path": "tests/freqai/conftest.py", "file_name": "conftest.py", "fun_name": "get_patched_data_drawer", "commit_message": "isolate data_drawer functions from data_kitchen, accommodate tests, add new test", "code": "def get_patched_data_drawer(mocker, freqaiconf):\n # dd = mocker.patch('freqtrade.freqai.data_drawer', MagicMock())\n dd = FreqaiDataDrawer(freqaiconf)\n return dd\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 13, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 15, "n_ast_nodes": 25, "n_identifiers": 5, "random_cut": "def get_patched_data_drawer(mocker, freqaiconf):\n # dd = mocker.patch('freqtrade.freqai.data_drawer', MagicMock())\n" }, { "id": 154653, "commit_id": "0a2c0de4451f7e2e8f337a9478d7595473aa348e", "repo": "modin", "path": "modin/pandas/groupby.py", "file_name": "groupby.py", "fun_name": "__getattr__", "commit_message": "REFACTOR-#5026: Change exception names to simplify grepping (#5027)\n\nSigned-off-by: Myachev ", "code": "def __getattr__(self, key):\n \n try:\n return object.__getattribute__(self, key)\n except AttributeError as err:\n if key in self._columns:\n return self.__getitem__(key)\n raise err\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 88, "n_words": 19, "vocab_size": 18, "complexity": 3, "nloc": 7, "token_counts": 40, "n_ast_nodes": 66, "n_identifiers": 9, "random_cut": "def __getattr__(self, key):\n \n try:\n return object.__getattribute__(self, key)\n except Attr" }, { "id": 94757, "commit_id": "04193742b952f3ebedfc36857771fc15489f7cd0", "repo": "sentry", "path": "tests/sentry/utils/suspect_resolutions/test_commit_correlation.py", "file_name": "test_commit_correlation.py", "fun_name": "test_no_files_changed", "commit_message": "fix(suspect-resolutions): Tweak commit correlation logic and track total events (#37891)", "code": "def test_no_files_changed(self):\n project = self.create_project()\n group1 = self.create_group(project=project, resolved_at=timezone.now())\n group2 = self.create_group(project=project, status=GroupStatus.UNRESOLVED)\n release = self.create_release(project=project, version=\"1\")\n release2 = self.create_release(project=project, version=\"2\")\n repo = self.create_repo(project=project, name=project.name)\n commit = Commit.objects.create(\n organization_id=project.organization_id, repository_id=repo.id, key=\"1\"\n )\n ReleaseCommit.objects.create(\n organization_id=project.organization_id, release=release, commit=commit, order=1\n )\n ReleaseCommit.objects.create(\n organization_id=project.organization_id, release=release2, commit=commit, order=1\n )\n GroupRelease.objects.create(\n project_id=project.id,\n group_id=group1.id,\n release_id=release.id,\n last_seen=(group1.resolved_at - timedelta(hours=2)),\n )\n GroupRelease.objects.create(\n project_id=project.id,\n group_id=group2.id,\n release_id=release2.id,\n last_seen=(group1.resolved_at - timedelta(hours=2)),\n )\n\n res1 = get_files_changed_in_releases(group1.resolved_at, group1.id, project.id)\n res2 = get_files_changed_in_releases(group1.resolved_at, group2.id, project.id)\n\n assert res1.files_changed == set()\n assert res2.files_changed == set()\n assert res1.release_ids.first().id == release.id\n assert res2.release_ids.first().id == release2.id\n assert not is_issue_commit_correlated(group1.id, group2.id, project.id).is_correlated\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 372, "n_words": 91, "vocab_size": 56, "complexity": 1, "nloc": 35, "token_counts": 336, "n_ast_nodes": 506, "n_identifiers": 46, "random_cut": "def test_no_files_changed(self):\n project = self.create_project()\n group1 = self.create_group(project=project, resolved_at=timezone.now())\n group2 = self.create_group(project=project, status=GroupStatus.UNRESOLVED)\n release = self.create_release(project=project, version=\"1\")\n release2 = self.create_release(project=project, version=\"2\")\n repo = self.create_repo(project=project, name=project.name)\n commit = Commit.objects.create(\n organization_id=project.organization_id, repository_id=repo.id, key=\"1\"\n )\n ReleaseCommit.objects.create(\n organization_id=project.organization_id, release=release, commit=commit, order=1\n )\n ReleaseCommit.objects.create(\n organization_id=project.organization_id, release=release2, commit=commit, order=1\n )\n " }, { "id": 288415, "commit_id": "d6a6d0d7548307c143fd2c44a589bd29f729f1e6", "repo": "core", "path": "homeassistant/components/lacrosse_view/sensor.py", "file_name": "sensor.py", "fun_name": "native_value", "commit_message": "Fix LaCrosse View not updating (#79474)", "code": "def native_value(self) -> float | str:\n \n return self.entity_description.value_fn(\n self.coordinator.data[self.index], self.entity_description.key\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 34, "n_ast_nodes": 54, "n_identifiers": 10, "random_cut": "def native_value(self) -> float | str:\n \n return " }, { "id": 268350, "commit_id": "4260b71cc77b7a44e061668d0d408d847f550156", "repo": "ansible", "path": "lib/ansible/template/__init__.py", "file_name": "__init__.py", "fun_name": "__getitem__", "commit_message": "refactor and fixes for doc parsing (#77719)\n\n* refactor and remove redundant code in documentation\r\n\r\n allow location and building api to be more accessible\r\n fix issues with displaying ansible.legacy and ansible.builtin\r\n ensure we don't x2 process tokens (some modules reference them also) fixes #77764\r\n move to constants vs hardcoded\r\n more informative errors and comments\r\n now have actual filter/test plugins, which expose the filter/test functions\r\n moved filter/test loading/finding logic into jinja2pluginloader, removed dupe implementations\r\n added tests for case in which we unique by basename when listing\r\n\r\nUpdate lib/ansible/utils/plugin_docs.py\r\nCo-authored-by: Sloane Hertel <19572925+s-hertel@users.noreply.github.com>", "code": "def __getitem__(self, key):\n\n if not isinstance(key, string_types):\n raise ValueError('key must be a string, got %s instead' % type(key))\n\n if key not in self._loaded_builtins:\n plugin = None\n try:\n plugin = self._pluginloader.get(key)\n except (AnsibleError, KeyError) as e:\n raise TemplateSyntaxError('Could not load \"%s\": %s' % (key, to_native(e)), 0)\n except Exception as e:\n display.vvvv('Unexpected plugin load (%s) exception: %s' % (key, to_native(e)))\n raise e\n\n # if a plugin was found/loaded\n if plugin:\n # set in filter cache and avoid expensive plugin load\n self._delegatee[key] = plugin.j2_function\n self._loaded_builtins.add(key)\n\n # let it trigger keyerror if we could not find ours or jinja2 one\n func = self._delegatee[key]\n\n # if i do have func and it is a filter, it nees wrapping\n if self._pluginloader.type == 'filter':\n # filter need wrapping\n if key in C.STRING_TYPE_FILTERS:\n # avoid litera_eval when you WANT strings\n func = _wrap_native_text(func)\n else:\n # conditionally unroll iterators/generators to avoid having to use `|list` after every filter\n func = _unroll_iterator(func)\n\n return func\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 477, "n_words": 154, "vocab_size": 99, "complexity": 8, "nloc": 22, "token_counts": 157, "n_ast_nodes": 262, "n_identifiers": 27, "random_cut": "def __getitem__(self, key):\n\n if not isinstance(key, string_types):\n raise ValueError('key must be a string, got %s instead' % type(key))\n\n if key not in self._loaded_builtins:\n plugin = None\n try:\n plugin = self._pluginloader.get(key)\n except (AnsibleError, KeyError) as e:\n raise TemplateSyntaxError('Could not load \"%s\": %s' % (key, to_native(e)), 0)\n except Exception as e:\n display.vvvv('Unexpected plugin load (%s) exception: %s' % (key, to_native(e)))\n raise e\n\n # if a plugin was found/loaded\n if plugin:\n # set in filter cache and avoid expensive plugin load\n self._delegatee[key] = plugin.j2_function\n self._loaded_builtins.add(key)\n\n # let it trigger keyerror if we could not find ours or jinja2 one\n func = self._delegatee[key]\n\n # if i do have func and it is a filter, it nees wrapping\n if self._pluginloader.type == 'filter':\n # filter need wrappin" }, { "id": 107482, "commit_id": "f156db08eee54d285ab0fb4e031e48d078ba6aa3", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "tick_bottom", "commit_message": "DOC: More cleanup axes -> Axes", "code": "def tick_bottom(self):\n \n label = True\n if 'label1On' in self._major_tick_kw:\n label = (self._major_tick_kw['label1On']\n or self._major_tick_kw['label2On'])\n self.set_ticks_position('bottom')\n # If labels were turned off before this was called, leave them off.\n self.set_tick_params(which='both', labelbottom=label)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 103, "n_words": 30, "vocab_size": 28, "complexity": 3, "nloc": 7, "token_counts": 51, "n_ast_nodes": 93, "n_identifiers": 8, "random_cut": "def tick_bottom(self):\n \n label = True\n if 'label1On' in self._major_tick_kw:\n label = (self._major_tick_kw['label1On']\n or self._major_tick_kw['label2On'])\n " }, { "id": 214627, "commit_id": "6ffedc473b3d89bff82dc45c5fd16c9003111f86", "repo": "flair", "path": "flair/embeddings/transformer.py", "file_name": "transformer.py", "fun_name": "__getstate__", "commit_message": "move transformer embeddings to own file", "code": "def __getstate__(self):\n config_dict = self.model.config.to_dict()\n\n tokenizer_data = self._tokenizer_bytes()\n\n model_state = {\n \"model\": self.base_model_name,\n \"fine_tune\": self.fine_tune,\n \"layers\": \",\".join(map(str, self.layer_indexes)),\n \"layer_mean\": self.layer_mean,\n \"subtoken_pooling\": self.subtoken_pooling,\n \"cls_pooling\": self.cls_pooling,\n \"is_token_embedding\": self.token_embedding,\n \"is_document_embedding\": self.document_embedding,\n \"allow_long_sentences\": self.allow_long_sentences,\n \"config_state_dict\": config_dict,\n \"tokenizer_data\": tokenizer_data,\n \"name\": self.name,\n \"context_length\": self.context_length,\n \"respect_document_boundaries\": self.respect_document_boundaries,\n \"context_dropout\": self.context_dropout,\n }\n\n return model_state\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 244, "n_words": 45, "vocab_size": 42, "complexity": 1, "nloc": 21, "token_counts": 125, "n_ast_nodes": 210, "n_identifiers": 25, "random_cut": "def __getstate__(self):\n config_dict = self.model.config.to_dict()\n\n " }, { "id": 150887, "commit_id": "b9f35cadb330763e70c52dd867ab74dc4555a94e", "repo": "freqtrade", "path": "tests/rpc/test_rpc.py", "file_name": "test_rpc.py", "fun_name": "test_rpc_stopentry", "commit_message": "add /stopentry alias for /stopbuy", "code": "def test_rpc_stopentry(mocker, default_conf) -> None:\n mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock())\n mocker.patch.multiple(\n 'freqtrade.exchange.Exchange',\n fetch_ticker=MagicMock()\n )\n\n freqtradebot = get_patched_freqtradebot(mocker, default_conf)\n patch_get_signal(freqtradebot)\n rpc = RPC(freqtradebot)\n freqtradebot.state = State.RUNNING\n\n assert freqtradebot.config['max_open_trades'] != 0\n result = rpc._rpc_stopentry()\n assert {'status': 'No more entries will occur from now. Run /reload_config to reset.'} == result\n assert freqtradebot.config['max_open_trades'] == 0\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 94, "n_words": 48, "vocab_size": 38, "complexity": 1, "nloc": 14, "token_counts": 91, "n_ast_nodes": 154, "n_identifiers": 18, "random_cut": "def test_rpc_stopentry(mocker, default_conf) -> None:\n mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock())\n mocker.patch.multiple(\n 'freq" }, { "id": 153226, "commit_id": "6f20abdf65515b7dd0d451259f41ccc010c1f7a4", "repo": "modin", "path": "examples/tutorial/jupyter/execution/pandas_on_ray/test/test_notebooks.py", "file_name": "test_notebooks.py", "fun_name": "test_exercise_2", "commit_message": "REFACTOR-#4213: Refactor `modin/examples/tutorial/` directory (#4214)\n\nSigned-off-by: Igoshev, Yaroslav ", "code": "def test_exercise_2():\n modified_notebook_path = (\n \"examples/tutorial/jupyter/execution/pandas_on_ray/local/exercise_2_test.ipynb\"\n )\n nb = nbformat.read(\n \"examples/tutorial/jupyter/execution/pandas_on_ray/local/exercise_2.ipynb\",\n as_version=nbformat.NO_CONVERT,\n )\n\n _replace_str(\n nb,\n 'path = \"s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv\"',\n '# path = \"s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv\"',\n )\n\n new_optional_cell = f'path = \"{test_dataset_path}\"\\n' + download_taxi_dataset\n\n optional_cell_idx = _find_code_cell_idx(nb, \"[Optional] Download data locally.\")\n nb[\"cells\"][optional_cell_idx][\"source\"] = new_optional_cell\n\n nbformat.write(nb, modified_notebook_path)\n _execute_notebook(modified_notebook_path)\n\n\n# in this notebook user should add custom mad implementation\n# to make notebook work", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 130, "n_words": 58, "vocab_size": 45, "complexity": 1, "nloc": 18, "token_counts": 71, "n_ast_nodes": 129, "n_identifiers": 15, "random_cut": "def test_exercise_2():\n modified_notebook_path = (\n \"examples/tutorial/jupyter/execution/pandas_on_ray/local/exercise_2_test.ipynb\"\n )\n nb = nbformat.read(\n \"examples/tutorial/jupyter/execution/pandas_on_ray/local/exercise_2.ipynb\",\n as_version=nbformat.NO_CONVERT,\n )\n\n _replace_str(\n nb,\n 'path = \"s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv\"',\n '# path = \"s3://dask-data/nyc-taxi/2015/" }, { "id": 26633, "commit_id": "81d02e76b22e3d3b3603e5ae27c5788033ac01b3", "repo": "saleor", "path": "saleor/tests/fixtures.py", "file_name": "fixtures.py", "fun_name": "app_with_token", "commit_message": "Refactor app tokens (#9438)\n\n* Save last_4 chars of current app tokens and store their hashes\r\n\r\n* Update app mutations, commands and tests\r\n\r\n* Update changelog\r\n\r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def app_with_token(db):\n app = App.objects.create(name=\"Sample app objects\", is_active=True)\n app.tokens.create(name=\"Test\")\n return app\n\n\n@pytest.fixture", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 19, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 63, "n_identifiers": 11, "random_cut": "def app_with_token(db):\n app = App.objects.create(name=" }, { "id": 53756, "commit_id": "38a964d05aba99c743802b158ffb7f16201d85aa", "repo": "prefect", "path": "tests/test_settings.py", "file_name": "test_settings.py", "fun_name": "test_nested_settings", "commit_message": "Using new settings access pattern everywhere", "code": "def test_nested_settings(monkeypatch):\n assert get_current_settings().get(PREFECT_ORION_DATABASE_ECHO) is False\n\n monkeypatch.setenv(\"PREFECT_ORION_DATABASE_ECHO\", \"1\")\n new_settings = Settings()\n assert new_settings.get(PREFECT_ORION_DATABASE_ECHO) is True\n\n\n@pytest.mark.parametrize(\n \"value,expected\",\n [\n (\"foo\", [\"foo\"]),\n (\"foo,bar\", [\"foo\", \"bar\"]),\n (\"foo, bar, foobar \", [\"foo\", \"bar\", \"foobar\"]),\n ],\n)", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"value,expected\",\n [\n (\"foo\", [\"foo\"]),\n (\"foo,bar\", [\"foo\", \"bar\"]),\n (\"foo, bar, foobar \", [\"foo\", \"bar\", \"foobar\"]),\n ],\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 71, "n_words": 32, "vocab_size": 29, "complexity": 1, "nloc": 5, "token_counts": 38, "n_ast_nodes": 142, "n_identifiers": 11, "random_cut": "def test_nested_settings(monkeypatch):\n assert get_current_settings().get(PREFECT_ORION_DATABASE_ECHO) is False\n\n monkeypatch.setenv(\"PREFECT_ORION_DATABASE_ECHO\", \"1\")\n new_settings = S" }, { "id": 68423, "commit_id": "3fa1c634790095bf7eabc135ed717e124efa4ff0", "repo": "erpnext", "path": "erpnext/accounts/general_ledger.py", "file_name": "general_ledger.py", "fun_name": "update_accounting_dimensions", "commit_message": "test: Unit test for round off entry dimensions", "code": "def update_accounting_dimensions(round_off_gle):\n\tdimensions = get_accounting_dimensions()\n\tmeta = frappe.get_meta(round_off_gle[\"voucher_type\"])\n\thas_all_dimensions = True\n\n\tfor dimension in dimensions:\n\t\tif not meta.has_field(dimension):\n\t\t\thas_all_dimensions = False\n\n\tif dimensions and has_all_dimensions:\n\t\tdimension_values = frappe.db.get_value(\n\t\t\tround_off_gle[\"voucher_type\"], round_off_gle[\"voucher_no\"], dimensions, as_dict=1\n\t\t)\n\n\t\tfor dimension in dimensions:\n\t\t\tround_off_gle[dimension] = dimension_values.get(dimension)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 27, "n_words": 40, "vocab_size": 28, "complexity": 6, "nloc": 13, "token_counts": 86, "n_ast_nodes": 138, "n_identifiers": 15, "random_cut": "def update_accounting_dimensions(round_off_gle):\n\tdimensions = get_accounting_dimensions()\n\tmeta = frappe.get_met" }, { "id": 24726, "commit_id": "6e89ec8d09c06453edeee3874a826e750a6947d6", "repo": "PaddleOCR", "path": "tools/infer/predict_rec.py", "file_name": "predict_rec.py", "fun_name": "__call__", "commit_message": "fix sar export", "code": "def __call__(self, img_list):\n img_num = len(img_list)\n # Calculate the aspect ratio of all text bars\n width_list = []\n for img in img_list:\n width_list.append(img.shape[1] / float(img.shape[0]))\n # Sorting can speed up the recognition process\n indices = np.argsort(np.array(width_list))\n rec_res = [['', 0.0]] * img_num\n batch_num = self.rec_batch_num\n st = time.time()\n if self.benchmark:\n self.autolog.times.start()\n for beg_img_no in range(0, img_num, batch_num):\n end_img_no = min(img_num, beg_img_no + batch_num)\n norm_img_batch = []\n imgC, imgH, imgW = self.rec_image_shape[:3]\n max_wh_ratio = imgW / imgH\n # max_wh_ratio = 0\n for ino in range(beg_img_no, end_img_no):\n h, w = img_list[indices[ino]].shape[0:2]\n wh_ratio = w * 1.0 / h\n max_wh_ratio = max(max_wh_ratio, wh_ratio)\n for ino in range(beg_img_no, end_img_no):\n\n if self.rec_algorithm == \"SAR\":\n norm_img, _, _, valid_ratio = self.resize_norm_img_sar(\n img_list[indices[ino]], self.rec_image_shape)\n norm_img = norm_img[np.newaxis, :]\n valid_ratio = np.expand_dims(valid_ratio, axis=0)\n valid_ratios = []\n valid_ratios.append(valid_ratio)\n norm_img_batch.append(norm_img)\n elif self.rec_algorithm == \"SRN\":\n norm_img = self.process_image_srn(\n img_list[indices[ino]], self.rec_image_shape, 8, 25)\n encoder_word_pos_list = []\n gsrm_word_pos_list = []\n gsrm_slf_attn_bias1_list = []\n gsrm_slf_attn_bias2_list = []\n encoder_word_pos_list.append(norm_img[1])\n gsrm_word_pos_list.append(norm_img[2])\n gsrm_slf_attn_bias1_list.append(norm_img[3])\n gsrm_slf_attn_bias2_list.append(norm_img[4])\n norm_img_batch.append(norm_img[0])\n elif self.rec_algorithm == \"SVTR\":\n norm_img = self.resize_norm_img_svtr(img_list[indices[ino]],\n self.rec_image_shape)\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n elif self.rec_algorithm == \"VisionLAN\":\n norm_img = self.resize_norm_img_vl(img_list[indices[ino]],\n self.rec_image_shape)\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n elif self.rec_algorithm == 'SPIN':\n norm_img = self.resize_norm_img_spin(img_list[indices[ino]])\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n elif self.rec_algorithm == \"ABINet\":\n norm_img = self.resize_norm_img_abinet(\n img_list[indices[ino]], self.rec_image_shape)\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n else:\n norm_img = self.resize_norm_img(img_list[indices[ino]],\n max_wh_ratio)\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n norm_img_batch = np.concatenate(norm_img_batch)\n norm_img_batch = norm_img_batch.copy()\n if self.benchmark:\n self.autolog.times.stamp()\n\n if self.rec_algorithm == \"SRN\":\n encoder_word_pos_list = np.concatenate(encoder_word_pos_list)\n gsrm_word_pos_list = np.concatenate(gsrm_word_pos_list)\n gsrm_slf_attn_bias1_list = np.concatenate(\n gsrm_slf_attn_bias1_list)\n gsrm_slf_attn_bias2_list = np.concatenate(\n gsrm_slf_attn_bias2_list)\n\n inputs = [\n norm_img_batch,\n encoder_word_pos_list,\n gsrm_word_pos_list,\n gsrm_slf_attn_bias1_list,\n gsrm_slf_attn_bias2_list,\n ]\n if self.use_onnx:\n input_dict = {}\n input_dict[self.input_tensor.name] = norm_img_batch\n outputs = self.predictor.run(self.output_tensors,\n input_dict)\n preds = {\"predict\": outputs[2]}\n else:\n input_names = self.predictor.get_input_names()\n for i in range(len(input_names)):\n input_tensor = self.predictor.get_input_handle(\n input_names[i])\n input_tensor.copy_from_cpu(inputs[i])\n self.predictor.run()\n outputs = []\n for output_tensor in self.output_tensors:\n output = output_tensor.copy_to_cpu()\n outputs.append(output)\n if self.benchmark:\n self.autolog.times.stamp()\n preds = {\"predict\": outputs[2]}\n elif self.rec_algorithm == \"SAR\":\n valid_ratios = np.concatenate(valid_ratios)\n inputs = [\n norm_img_batch,\n np.array(\n [valid_ratios], dtype=np.float32),\n ]\n if self.use_onnx:\n input_dict = {}\n input_dict[self.input_tensor.name] = norm_img_batch\n outputs = self.predictor.run(self.output_tensors,\n input_dict)\n preds = outputs[0]\n else:\n input_names = self.predictor.get_input_names()\n for i in range(len(input_names)):\n input_tensor = self.predictor.get_input_handle(\n input_names[i])\n input_tensor.copy_from_cpu(inputs[i])\n self.predictor.run()\n outputs = []\n for output_tensor in self.output_tensors:\n output = output_tensor.copy_to_cpu()\n outputs.append(output)\n if self.benchmark:\n self.autolog.times.stamp()\n preds = outputs[0]\n else:\n if self.use_onnx:\n input_dict = {}\n input_dict[self.input_tensor.name] = norm_img_batch\n outputs = self.predictor.run(self.output_tensors,\n input_dict)\n preds = outputs[0]\n else:\n self.input_tensor.copy_from_cpu(norm_img_batch)\n self.predictor.run()\n outputs = []\n for output_tensor in self.output_tensors:\n output = output_tensor.copy_to_cpu()\n outputs.append(output)\n if self.benchmark:\n self.autolog.times.stamp()\n if len(outputs) != 1:\n preds = outputs\n else:\n preds = outputs[0]\n rec_result = self.postprocess_op(preds)\n for rno in range(len(rec_result)):\n rec_res[indices[beg_img_no + rno]] = rec_result[rno]\n if self.benchmark:\n self.autolog.times.end(stamp=True)\n return rec_res, time.time() - st\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 3313, "n_words": 429, "vocab_size": 173, "complexity": 29, "nloc": 156, "token_counts": 1116, "n_ast_nodes": 1751, "n_identifiers": 84, "random_cut": "def __call__(self, img_list):\n img_num = len(img_list)\n # Calculate the aspect ratio of all text bars\n width_list = []\n for img in img_list:\n width_list.append(img.shape[1] / float(img.shape[0]))\n # Sorting can speed up the recognition process\n indices = np.argsort(np.array(width_list))\n rec_res = [['', 0.0]] * img_num\n batch_num = self.rec_batch_num\n st = time.time()\n if self.benchmark:\n self.autolog.times.start()\n for beg_img_no in range(0, img_num, batch_num):\n end_img_no = min(img_num, beg_img_no + batch_num)\n norm_img_batch = []\n imgC, imgH, imgW = self.rec_image_shape[:3]\n max_wh_ratio = imgW / imgH\n # max_wh_ratio = 0\n for ino in range(beg_img_no, end_img_no):\n h, w = img_list[indices[ino]].shape[0:2]\n wh_ratio = w * 1.0 / h\n max_wh_ratio = max(max_wh_ratio, wh_ratio)\n for ino in range(beg_img_no, end_img_no):\n\n if self.rec_algorithm == \"SAR\":\n norm_img, _, _, valid_ratio = self.resize_norm_img_sar(\n img_list[indices[ino]], self.rec_image_shape)\n norm_img = norm_img[np.newaxis, :]\n valid_ratio = np.expand_dims(valid_ratio, axis=0)\n valid_ratios = []\n valid_ratios.append(valid_ratio)\n norm_img_batch.append(norm_img)\n elif self.rec_algorithm == \"SRN\":\n norm_img = self.process_image_srn(\n img_list[indices[ino]], self.rec_image_shape, 8, 25)\n encoder_word_pos_list = []\n gsrm_word_pos_list = []\n gsrm_slf_attn_bias1_list = []\n gsrm_slf_attn_bias2_list = []\n encoder_word_pos_list.append(norm_img[1])\n gsrm_word_pos_list.append(norm_img[2])\n gsrm_slf_attn_bias1_list.append(norm_img[3])\n gsrm_slf_attn_bias2_list.append(norm_img[4])\n norm_img_batch.append(norm_img[0])\n elif self.rec_algorithm == \"SVTR\":\n norm_img = self.resize_norm_img_svtr(img_list[indices[ino]],\n self.rec_image_shape)\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n elif self.rec_algorithm == \"VisionLAN\":\n norm_img = self.resize_norm_img_vl(img_list[indices[ino]],\n self.rec_image_shape)\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n elif self.rec_algorithm == 'SPIN':\n norm_img = self.resize_norm_img_spin(img_list[indices[ino]])\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n elif self.rec_algorithm == \"ABINet\":\n norm_img = self.resize_norm_img_abinet(\n img_list[indices[ino]], self.rec_image_shape)\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n else:\n norm_img = self.resize_norm_img(img_list[indices[ino]],\n max_wh_ratio)\n norm_img = norm_img[np.newaxis, :]\n norm_img_batch.append(norm_img)\n norm_img_batch = np.concatenate(norm_img_batch)\n norm_img_batch = norm_img_batch.copy()\n if self.benchmark:\n self.autolog.times.stamp()\n\n if self.rec_algorithm == \"SRN\":\n encoder_word_pos_list = np.concatenate(encoder_word_pos_list)\n gsrm_word_pos_list = np.concatenate(gsrm_word_pos_list)\n gsrm_slf_attn_bias1_list = np.concatenate(\n gsrm_slf_attn_bias1_list)\n gsrm_slf_attn_bias2_list = np.concatenate(\n gsrm_slf_attn_bias2_list)\n\n inputs = [\n norm_img_batch,\n encoder_word_pos_list,\n gsrm_word_pos_list,\n gsrm_slf_attn_bias1_list,\n gsrm_slf_attn_bias2_list,\n ]\n if self.use_onnx:\n input_dict = {}\n input_dict[self.input_tensor.name] = norm_img_batch\n outputs = self.predictor.run(self.output_tensors,\n input_dict)\n preds = {\"predict\": outputs[2]}\n else:\n input_names = self.predictor.get_input_names()\n for i in range(len(input_names)):\n input_tensor = self.predictor.get_input_handle(\n input_names[i])\n input_tensor.copy_from_cpu(inputs[i])\n self.predictor.run()\n outputs = []\n for output_tensor in self.output_tensors:\n output = output_tensor.copy_to_cpu()\n outputs.append(output)\n if self.benchmark:\n self.autolog.times.stamp()\n preds = {\"predict\": outputs[2]}\n elif self.rec_algorithm == \"SAR\":\n valid_ratios = np.concatenate(valid_ratios)\n inputs = [\n norm_img_batch,\n np.array(\n [valid_ratios], dtype=np.float32),\n ]\n if self.use_onnx:\n input_dict = {}\n input_dict[self.input_tensor.name] = norm_img_batch\n outputs = self.predictor.run(self.output_tensors,\n input_dict)\n preds = outputs[0]\n else:\n input_names = self.predictor.get_input_names()\n for i in range(len(input_names)):\n input_tensor = self.predictor.get_input_handle(\n input_names[i])\n input_tensor.copy_from_cpu(inputs[i])\n self.predictor.run()\n outputs = []\n for output_tensor in self.output_tensors:\n output = output_tensor.copy_to_cpu()\n outputs.append(output)\n if self.benchmark:\n self.autolog.times.stamp()\n preds = outputs[0]\n else:\n if self.use_onnx:\n input_dict = {}\n input_dict[self.input_tensor.name] = norm_img_batch\n outputs = self.predictor.run(self.output_tensors,\n input_dict)\n preds = outputs[0]\n else:\n self.input_tensor.copy_from_cpu(norm_img_batch)\n self.predictor.run()\n outputs = []\n for output_tensor in self.output_tensors:\n ou" }, { "id": 1710, "commit_id": "d171fcd8726dccfffd7d13f5188a7a91cffc5b6b", "repo": "PySyft", "path": "packages/syft/src/syft/core/adp/data_subject_list.py", "file_name": "data_subject_list.py", "fun_name": "__getitem__", "commit_message": "Implement DSL setitem properly AT LONG LAST", "code": "def __getitem__(self, item) -> DataSubjectList:\n result = self.data_subjects_indexed[item]\n return DataSubjectList(\n one_hot_lookup=self.one_hot_lookup, # np.unique(self.one_hot_lookup[result]),\n data_subjects_indexed=result,\n )\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 58, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 31, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def __getitem__(self, item) -> DataSubjectList:\n result = self.data_subjects_indexed[item]\n return DataSubjectList(\n one_ho" }, { "id": 219312, "commit_id": "1bfb2da67c76758578eff6f3a7c3e6cf4967bc77", "repo": "XX-Net", "path": "code/default/launcher/tests/integrate_testing.py", "file_name": "integrate_testing.py", "fun_name": "xtunnel_logout", "commit_message": "Improve testing.", "code": "def xtunnel_logout(self):\n xlog.info(\"Start testing XTunnel logout\")\n res = simple_http_client.request(\"POST\", \"http://127.0.0.1:8085/module/x_tunnel/control/logout\", timeout=10)\n self.assertEqual(res.status, 200)\n self.xtunnel_login_status = False\n xlog.info(\"Finished testing XTunnel logout\")\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 54, "n_words": 20, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 46, "n_ast_nodes": 80, "n_identifiers": 11, "random_cut": "def xtunnel_logout(self):\n xlog.info(\"Start testing XTunnel logout\")\n res = simple_http_client.request(\"P" }, { "id": 224214, "commit_id": "dca7cbb43fcd6ea7c677c98ba585395b070d387b", "repo": "mkdocs", "path": "mkdocs/commands/gh_deploy.py", "file_name": "gh_deploy.py", "fun_name": "_is_cwd_git_repo", "commit_message": "Format code with `black -l100 --skip-string-normalization`", "code": "def _is_cwd_git_repo():\n try:\n proc = subprocess.Popen(\n ['git', 'rev-parse', '--is-inside-work-tree'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n except FileNotFoundError:\n log.error(\"Could not find git - is it installed and on your path?\")\n raise Abort('Deployment Aborted!')\n proc.communicate()\n return proc.wait() == 0\n\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 106, "n_words": 34, "vocab_size": 34, "complexity": 2, "nloc": 12, "token_counts": 60, "n_ast_nodes": 104, "n_identifiers": 13, "random_cut": "def _is_cwd_git_repo():\n try:\n proc = subprocess.Popen(\n ['git', 'rev-parse', '--is-inside-work-tree'],\n stdout=sub" }, { "id": 191047, "commit_id": "301124c5b377fa56b940d298900dbc5816dbc24e", "repo": "thumbor", "path": "thumbor/engines/__init__.py", "file_name": "__init__.py", "fun_name": "is_multiple", "commit_message": "Reformat to 80 chars and mypy.ini", "code": "def is_multiple(self):\n return (\n hasattr(self, \"multiple_engine\")\n and self.multiple_engine is not None\n )\n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 4, "random_cut": "def is_multiple(self):\n return (\n hasattr(self, \"multiple_engine\")\n " }, { "id": 252656, "commit_id": "cba66953a303c4411a47f987170e08f30110c6ed", "repo": "mitmproxy", "path": "test/mitmproxy/utils/test_magisk.py", "file_name": "test_magisk.py", "fun_name": "test_subject_hash_old", "commit_message": "Magisk module onboarding for Android (#5547)\n\n* Added magisk module generation\r\n\r\n* Fixed typo\r\n\r\n* changelog\r\n\r\n* Fixed mypy bug\r\n\r\n* Changed action based on ubuntu 18.04 due to https://bit.ly/3QOw87Z\r\n\r\n* Workflow pinned to ubuntu 20.04\r\n\r\n* Moved magisk code to utils and gen on download\r\n\r\n* Styling\r\n\r\n* Removed magisk from git repo\r\n\r\n* Added tests\r\n\r\n* Fixed dead line\r\n\r\n* Update CHANGELOG.md\r\n\r\n* Hardcoded hash\r\n\r\nCo-authored-by: Joran van Apeldoorn \r\nCo-authored-by: Maximilian Hils ", "code": "def test_subject_hash_old(tdata):\n # checks if the hash is the same as that comming form openssl\n with taddons.context() as tctx:\n tctx.options.confdir = tdata.path(\"mitmproxy/data/confdir\")\n ca = magisk.get_ca_from_files()\n our_hash = magisk.subject_hash_old(ca)\n assert our_hash == \"efb15d7d\"\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 32, "vocab_size": 27, "complexity": 1, "nloc": 6, "token_counts": 45, "n_ast_nodes": 82, "n_identifiers": 13, "random_cut": "def test_subject_hash_old(tdata):\n # ch" }, { "id": 68867, "commit_id": "74a782d81d8f8c4a4d9214a9c06377e5e6e464dd", "repo": "erpnext", "path": "erpnext/stock/report/incorrect_serial_no_valuation/incorrect_serial_no_valuation.py", "file_name": "incorrect_serial_no_valuation.py", "fun_name": "get_stock_ledger_entries", "commit_message": "refactor: DB independent quoting and truthy/falsy values (#31358)\n\n* refactor: DB independent quoting and truthy/falsy values\r\n\r\n* style: reformat to black spec\r\n\r\n* fix: ifnull -> coalesce\r\n\r\n* fix: coalesce -> Coalesce\r\n\r\n* fix: revert pypika comparison\r\n\r\n* refactor: convert queries to QB\r\n\r\n* fix: incorrect value types for query\r\n\r\n`=` query makes no sense with list of values\r\n\r\n* fix: remove warehouse docstatus condition\r\n\r\n* fix: keep using base rate as rate\r\n\r\nCo-authored-by: Ankush Menat ", "code": "def get_stock_ledger_entries(report_filters):\n\tfields = [\n\t\t\"name\",\n\t\t\"voucher_type\",\n\t\t\"voucher_no\",\n\t\t\"item_code\",\n\t\t\"serial_no as serial_nos\",\n\t\t\"actual_qty\",\n\t\t\"posting_date\",\n\t\t\"posting_time\",\n\t\t\"company\",\n\t\t\"warehouse\",\n\t\t\"(stock_value_difference / actual_qty) as valuation_rate\",\n\t]\n\n\tfilters = {\"serial_no\": (\"is\", \"set\"), \"is_cancelled\": 0}\n\n\tif report_filters.get(\"item_code\"):\n\t\tfilters[\"item_code\"] = report_filters.get(\"item_code\")\n\n\tif report_filters.get(\"from_date\") and report_filters.get(\"to_date\"):\n\t\tfilters[\"posting_date\"] = (\n\t\t\t\"between\",\n\t\t\t[report_filters.get(\"from_date\"), report_filters.get(\"to_date\")],\n\t\t)\n\n\treturn frappe.get_all(\n\t\t\"Stock Ledger Entry\",\n\t\tfields=fields,\n\t\tfilters=filters,\n\t\torder_by=\"posting_date asc, posting_time asc, creation asc\",\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 32, "n_words": 60, "vocab_size": 53, "complexity": 4, "nloc": 28, "token_counts": 125, "n_ast_nodes": 228, "n_identifiers": 8, "random_cut": "def get_stock_ledger_entries(report_filters):\n\tfields = [\n\t\t\"name\",\n\t\t\"voucher_type\",\n\t\t\"voucher_no\",\n\t\t\"item_code\",\n\t\t\"serial_no as serial_nos\",\n\t\t\"actual_qty\",\n\t\t\"posting_date\",\n\t\t\"posting_time\",\n\t\t\"company\",\n\t\t\"warehouse\",\n\t\t\"(stock_value_difference / actual_qty) as valuation_rate\",\n\t]\n\n\tfilters = {\"serial_no\": (\"is\", \"set\"), \"is_cancelled\": 0}\n\n\tif report_filters.get(\"item_code\"):\n\t\tfilters[\"item_code\"] = report_filters.get(\"item_code\")\n\n\tif report_filters.get(\"from_date\") and report_filters.get(\"to_date\"):\n\t\tfilters[\"posting_date\"] = (\n\t\t\t\"between\",\n\t\t\t[report_filters.get(\"from_date\"), report_filters.get(\"to_date\")],\n\t\t)\n\n\treturn frappe.get_all(\n\t\t\"Stock Ledger Entry\",\n\t\tfields=fields,\n\t\tfilters=filters" }, { "id": 40176, "commit_id": "c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c", "repo": "dash", "path": "dash/_validate.py", "file_name": "_validate.py", "fun_name": "validate_callback", "commit_message": "f-strings everywhere! fffff", "code": "def validate_callback(outputs, inputs, state, extra_args, types):\n Input, Output, State = types\n if extra_args:\n if not isinstance(extra_args[0], (Output, Input, State)):\n raise exceptions.IncorrectTypeException(\n dedent(\n f\n )\n )\n\n raise exceptions.IncorrectTypeException(\n dedent(\n f\n )\n )\n\n for args in [outputs, inputs, state]:\n for arg in args:\n validate_callback_arg(arg)\n\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 197, "n_words": 42, "vocab_size": 30, "complexity": 5, "nloc": 28, "token_counts": 83, "n_ast_nodes": 163, "n_identifiers": 17, "random_cut": "def validate_callback(outputs, inputs, state, extra_args, types):\n Input, Output, State = types\n if extra_args:\n if not isinstance(extra_args[0], (Output, Input, State)):\n raise exceptions.IncorrectTypeException(\n dedent(\n f\n )\n )\n\n raise exceptions.Incorr" }, { "id": 272119, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/feature_column/sequence_feature_column_test.py", "file_name": "sequence_feature_column_test.py", "fun_name": "test_shared_embedding_column_with_non_sequence_categorical", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_shared_embedding_column_with_non_sequence_categorical(self):\n \n with tf.Graph().as_default():\n vocabulary_size = 3\n sparse_input_a = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n sparse_input_b = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n\n categorical_column_a = (\n tf.feature_column.categorical_column_with_identity(\n key=\"aaa\", num_buckets=vocabulary_size\n )\n )\n categorical_column_b = (\n tf.feature_column.categorical_column_with_identity(\n key=\"bbb\", num_buckets=vocabulary_size\n )\n )\n shared_embedding_columns = tf.feature_column.shared_embeddings(\n [categorical_column_a, categorical_column_b], dimension=2\n )\n\n sequence_input_layer = ksfc.SequenceFeatures(\n shared_embedding_columns\n )\n with self.assertRaisesRegex(\n ValueError,\n r\"In embedding_column: aaa_shared_embedding\\. \"\n r\"categorical_column must \"\n r\"be of type SequenceCategoricalColumn to use SequenceFeatures\\.\",\n ):\n _, _ = sequence_input_layer(\n {\"aaa\": sparse_input_a, \"bbb\": sparse_input_b}\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 680, "n_words": 114, "vocab_size": 64, "complexity": 1, "nloc": 38, "token_counts": 216, "n_ast_nodes": 329, "n_identifiers": 29, "random_cut": "def test_shared_embedding_column_with_non_sequence_categorical(self):\n \n with tf.Graph().as_default():\n vocabulary_size = 3\n sparse_input_a = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n sparse_input_b = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n\n categorical_column_a = (\n tf.feature_column.categorical_column_with_identity(\n key=\"aaa\", num_buckets=vocabulary_size\n )\n )\n categorical_column_b = (\n tf.feature_column.categorical_column_with_identity(\n key=\"bbb\", num_buckets=vocabulary_size\n )\n )\n shared_embedding_columns = tf.feature_column.shared_embeddings(\n [categorical_column_" }, { "id": 89537, "commit_id": "e94d7cd092d813d88c2216fca3ca6bd48e0747a3", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_accept_organization_invite.py", "file_name": "test_accept_organization_invite.py", "fun_name": "test_cannot_accept_unapproved_invite", "commit_message": "chore(hybrid-cloud): use organization_slug in AcceptOrganizationInvite API (#42138)", "code": "def test_cannot_accept_unapproved_invite(self):\n self.login_as(self.user)\n\n om = OrganizationMember.objects.create(\n email=\"newuser@example.com\",\n role=\"member\",\n token=\"abc\",\n organization=self.organization,\n invite_status=InviteStatus.REQUESTED_TO_JOIN.value,\n )\n for path in self._get_paths([om.id, om.token]):\n resp = self.client.post(path)\n assert resp.status_code == 400\n\n om = OrganizationMember.objects.get(id=om.id)\n assert not om.invite_approved\n assert om.is_pending\n assert om.token\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 166, "n_words": 34, "vocab_size": 28, "complexity": 2, "nloc": 16, "token_counts": 109, "n_ast_nodes": 170, "n_identifiers": 26, "random_cut": "def test_cannot_accept_unapproved_invite(self):\n self.login_as(self.user)\n\n om = OrganizationMember.objects.create(\n email=\"newuser@example.com\",\n role=\"member\",\n token=\"abc\",\n organization=self.organization,\n invite_status=InviteStatus.REQUESTED_TO_JOIN.value,\n " }, { "id": 89491, "commit_id": "458900af44ec0ceb675ce8159d33c4b361847471", "repo": "sentry", "path": "src/sentry/testutils/cases.py", "file_name": "cases.py", "fun_name": "_setup_today", "commit_message": "ref: ban exam.patcher (#42222)\n\nblocked on https://github.com/getsentry/getsentry/pull/9091", "code": "def _setup_today(self):\n with mock.patch(\n \"django.utils.timezone.now\",\n return_value=(datetime(2013, 5, 18, 15, 13, 58, 132928, tzinfo=timezone.utc)),\n ):\n yield\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 61, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 42, "n_ast_nodes": 63, "n_identifiers": 9, "random_cut": "def _setup_today(self):\n with mock.patch(\n \"django.utils.timezone.now\",\n return_value=(datetime(2013, 5, 18, 15, 13, 58, 132928, tzinfo=timezone.utc))," }, { "id": 305270, "commit_id": "8d94c8f74aea9a6a75dbc5ffbb8fb6b8ad4442d7", "repo": "core", "path": "tests/components/melnor/__init__.py", "file_name": "__init__.py", "fun_name": "patch_async_setup_entry", "commit_message": "Add Melnor Bluetooth valve watering Integration (#70457)", "code": "def patch_async_setup_entry(return_value=True):\n \n return patch(\n \"homeassistant.components.melnor.async_setup_entry\",\n return_value=return_value,\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 5, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 3, "random_cut": "def patch_async_setup_entry(return_value=True):\n \n return patch(\n \"homeassistant.components.melnor.async" }, { "id": 215952, "commit_id": "f2a783643de61cac1ff3288b40241e5ce6e1ddc8", "repo": "salt", "path": "salt/modules/linux_shadow.py", "file_name": "linux_shadow.py", "fun_name": "list_users", "commit_message": "Update to latest ``pyupgrade`` hook. Stop skipping it on CI.\n\nSigned-off-by: Pedro Algarvio ", "code": "def list_users(root=None):\n \n if root is not None:\n getspall = functools.partial(_getspall, root=root)\n else:\n getspall = functools.partial(spwd.getspall)\n\n return sorted(\n user.sp_namp if hasattr(user, \"sp_namp\") else user.sp_nam for user in getspall()\n )\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 64, "n_words": 28, "vocab_size": 25, "complexity": 4, "nloc": 8, "token_counts": 62, "n_ast_nodes": 100, "n_identifiers": 12, "random_cut": "def list_users(root=None):\n \n if root is not None:\n getspall = functools.partial(_getspall, root=root)\n else:\n getspall = functools.partial(spwd.getspall)\n\n return sorted(\n user.sp_namp if hasattr(user, \"sp_namp\") else user.sp_nam for user in getspall()\n )\n\n" }, { "id": 247356, "commit_id": "7e91107be1a4287873266e588a3c5b415279f4c8", "repo": "synapse", "path": "tests/rest/media/v1/test_filepath.py", "file_name": "test_filepath.py", "fun_name": "test_url_cache_thumbnail", "commit_message": "Add type hints to `tests/rest` (#12146)\n\n* Add type hints to `tests/rest`\r\n\r\n* newsfile\r\n\r\n* change import from `SigningKey`", "code": "def test_url_cache_thumbnail(self) -> None:\n \n self.assertEqual(\n self.filepaths.url_cache_thumbnail_rel(\n \"2020-01-02_GerZNDnDZVjsOtar\", 800, 600, \"image/jpeg\", \"scale\"\n ),\n \"url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale\",\n )\n self.assertEqual(\n self.filepaths.url_cache_thumbnail(\n \"2020-01-02_GerZNDnDZVjsOtar\", 800, 600, \"image/jpeg\", \"scale\"\n ),\n \"/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale\",\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 155, "n_words": 24, "vocab_size": 16, "complexity": 1, "nloc": 14, "token_counts": 56, "n_ast_nodes": 96, "n_identifiers": 6, "random_cut": "def test_url_cache_thumbnail(self) -> None:\n \n self.assertEqual(\n self.filepaths.url_cache_thumbnail_rel(\n \"2020-01-02_GerZNDnDZVjsOtar\", 800, 600, \"image/jpeg\", \"scale\"\n ),\n \"url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale\",\n )\n self.assertEqual(\n self.filepaths.url_cache_thumbnail(\n \"2020-01-02_GerZNDnDZVjsOtar\", 800, 600, \"image/jpeg\", \"scale\"\n ),\n \"/media_" }, { "id": 214395, "commit_id": "538a531926c36124593a0afedaf8a24f44a11c31", "repo": "flair", "path": "flair/models/tars_model.py", "file_name": "tars_model.py", "fun_name": "_print_predictions", "commit_message": "Fix TARS models", "code": "def _print_predictions(self, batch, gold_label_type):\n\n lines = []\n if self.tars_model.predict_spans:\n for datapoint in batch:\n # all labels default to \"O\"\n for token in datapoint:\n token.set_label(\"gold_bio\", \"O\")\n token.set_label(\"predicted_bio\", \"O\")\n\n # set gold token-level\n for gold_label in datapoint.get_labels(gold_label_type):\n gold_span: Span = gold_label.data_point\n prefix = \"B-\"\n for token in gold_span:\n token.set_label(\"gold_bio\", prefix + gold_label.value)\n prefix = \"I-\"\n\n # set predicted token-level\n for predicted_label in datapoint.get_labels(\"predicted\"):\n predicted_span: Span = predicted_label.data_point\n prefix = \"B-\"\n for token in predicted_span:\n token.set_label(\"predicted_bio\", prefix + predicted_label.value)\n prefix = \"I-\"\n\n # now print labels in CoNLL format\n for token in datapoint:\n eval_line = (\n f\"{token.text} \"\n f\"{token.get_label('gold_bio').value} \"\n f\"{token.get_label('predicted_bio').value}\\n\"\n )\n lines.append(eval_line)\n lines.append(\"\\n\")\n return lines\n\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 639, "n_words": 103, "vocab_size": 57, "complexity": 9, "nloc": 28, "token_counts": 155, "n_ast_nodes": 309, "n_identifiers": 23, "random_cut": "def _print_predictions(self, batch, gold_label_type):\n\n lines = []\n if self.tars_model.predict_spans:\n for datapoint in batch:\n # all labels default to \"O\"\n for token in datapoint:\n token.set_label(\"gold_bio\", \"O\")\n token.set_label(\"predicted_bio\", \"O\")\n\n # set gold token-level\n for gold_label in datapoint.get_labels(gold_label_type):\n gold_span: Span = gold_label.data_point\n prefix = \"B-\"\n for token in gold_span:\n token.set_label(\"gold_bio\", prefix + gold_label.value)\n prefix = \"I-\"\n\n # set predicted token-level\n for predicted_label in datapoint.get_labels(\"predicted\"):\n predicted_span: Span = predicted_label.data_point\n prefix = \"B-\"\n for token in predicted_span:\n token.set_label(\"predicted_bio\", prefix + predicted_label.value)\n prefix = \"I-\"\n\n # now print labels in CoNLL format\n for token in datapoint:\n eval_line = (\n f\"{token.text} \"\n" }, { "id": 86643, "commit_id": "6c1cb91778860eeb8141f9d7df788519c5ef9319", "repo": "sentry", "path": "src/sentry/testutils/helpers/api_gateway.py", "file_name": "api_gateway.py", "fun_name": "get", "commit_message": "feat(api-gateway): Initial plumbing for api gateway (#39739)\n\nThis sets up the initial plumbing for middleware -> proxy -> region\r\nsilo. The API Gateway isn't active as long as the silo's SiloMode is set\r\nto MONOLITH. Also the middleware hasn't been added to\r\nsettings.MIDDLEWARE. There are various fixes scattered around to get the\r\ngateway test passing.\r\n\r\nStill to be done in future PRs:\r\n- Checking if the API needs to be proxied\r\n- Adding proxy headers to responses\r\n- Handling of other methods\r\n- Handling non-json responses", "code": "def get(self, request, organization):\n return Response({\"proxy\": False})\n\n\nurlpatterns = [\n url(\n r\"^organizations/(?P[^\\/]+)/control/$\",\n ControlEndpoint.as_view(),\n name=\"control-endpoint\",\n ),\n url(\n r\"^organizations/(?P[^\\/]+)/region/$\",\n RegionEndpoint.as_view(),\n name=\"region-endpoint\",\n ),\n]\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 79, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 88, "n_identifiers": 11, "random_cut": "def get(self, request, organization):\n return Response({\"proxy\": False})\n\n\nurlpatterns = [\n url(\n r\"^organizations/(?P[^\\/]+)/control/$\",\n ControlEndpoint.as_view(),\n n" }, { "id": 149500, "commit_id": "1347107c1e4c77daa7ddf11520d3ae020a43a5d1", "repo": "freqtrade", "path": "freqtrade/configuration/configuration.py", "file_name": "configuration.py", "fun_name": "load_config", "commit_message": "extract load_from_files to load_config", "code": "def load_config(self) -> Dict[str, Any]:\n \n # Load all configs\n config: Dict[str, Any] = load_from_files(self.args.get(\"config\", []))\n\n # Load environment variables\n env_data = enironment_vars_to_dict()\n config = deep_merge_dicts(env_data, config)\n\n # Normalize config\n if 'internals' not in config:\n config['internals'] = {}\n\n if 'pairlists' not in config:\n config['pairlists'] = []\n\n # Keep a copy of the original configuration file\n config['original_config'] = deepcopy(config)\n\n self._process_logging_options(config)\n\n self._process_runmode(config)\n\n self._process_common_options(config)\n\n self._process_trading_options(config)\n\n self._process_optimize_options(config)\n\n self._process_plot_options(config)\n\n self._process_data_options(config)\n\n # Check if the exchange set by the user is supported\n check_exchange(config, config.get('experimental', {}).get('block_bad_exchanges', True))\n\n self._resolve_pairs_list(config)\n\n process_temporary_deprecated_settings(config)\n\n return config\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 266, "n_words": 83, "vocab_size": 62, "complexity": 3, "nloc": 24, "token_counts": 159, "n_ast_nodes": 273, "n_identifiers": 23, "random_cut": "def load_config(self) -> Dict[str, Any]:\n \n # Load all configs\n config: Dict[str, Any] = load_from_files(self.args.get(\"config\", []))\n\n # Load environment variables\n env_data = enironment_vars_to_dict()\n config = deep_merge_dicts(env_data, config)\n\n # Normalize config\n if 'internals' not in config:\n config['internals'] = {}\n\n if 'pairlists' not in config:\n conf" }, { "id": 11854, "commit_id": "efff15494d3a955b2211dcd2abcd8659c0d006c0", "repo": "jina", "path": "jina/parsers/orchestrate/deployment.py", "file_name": "deployment.py", "fun_name": "mixin_base_deployment_parser", "commit_message": "refactor: rename switch feature (#4494)", "code": "def mixin_base_deployment_parser(parser):\n \n gp = add_arg_group(parser, title='Deployment')\n\n gp.add_argument(\n '--uses-before',\n type=str,\n help='The executor attached after the Pods described by --uses, typically before sending to all '\n 'shards, accepted type follows `--uses`',\n )\n gp.add_argument(\n '--uses-after',\n type=str,\n help='The executor attached after the Pods described by --uses, typically used for receiving from '\n 'all shards, accepted type follows `--uses`',\n )\n\n gp.add_argument(\n '--when',\n action=KVAppendAction,\n metavar='KEY: VALUE',\n nargs='*',\n help='The condition that the documents need to fulfill before reaching the Executor.'\n 'The condition can be defined in the form of a `DocArray query condition `',\n )\n\n gp.add_argument(\n '--external',\n action='store_true',\n default=False,\n help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'\n 'This Deployment will not be context managed by the Flow.',\n )\n\n # hidden CLI used for internal only\n\n gp.add_argument(\n '--deployment-role',\n type=DeploymentRoleType.from_string,\n choices=list(DeploymentRoleType),\n help='The role of this deployment in the flow'\n if _SHOW_ALL_ARGS\n else argparse.SUPPRESS,\n )\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 357, "n_words": 143, "vocab_size": 94, "complexity": 2, "nloc": 37, "token_counts": 122, "n_ast_nodes": 214, "n_identifiers": 21, "random_cut": "def mixin_base_deployment_parser(parser):\n \n gp = add_arg_group(parser, title='Deployment')\n\n gp.add_argument(\n '--uses-before',\n type=str,\n help='The executor attached after the Pods described by --uses, typically before sending to all '\n 'shards, accepted type follows `--uses`',\n )\n gp.add_argument(\n '--uses-after',\n type=str,\n help='The executor attached after the Pods described by --uses, typically used for receiving from '\n 'all shards, accepted type follows `--uses`',\n )\n\n gp.add_argument(\n '--when',\n action=KVAppendAction,\n metavar='KEY: VALUE',\n nargs='*',\n help='The condition that the documents need to fulfill before reaching the Executor.'\n 'The condition can be defined in the form of a `DocArray query condition `',\n )\n\n gp.add_argument(\n '--external',\n action='store_true',\n default=False,\n help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'\n 'This Deployment will not be context managed by the Flow.',\n )\n\n # hidden CLI used for internal only\n\n gp.add_argument(\n '--dep" }, { "id": 118132, "commit_id": "36521e75b6dc297d3b197d93c6fb7e5f4e23456e", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/rockset_handler/tests/test_rockset_handler.py", "file_name": "test_rockset_handler.py", "fun_name": "test_2_get_columns", "commit_message": "run tests and update rockset handler", "code": "def test_2_get_columns(self):\n columns = self.handler.get_columns('test')\n self.assertEqual(columns, [])\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 6, "random_cut": "def test_2_get_columns(self):\n columns = self.handler.get_columns('tes" }, { "id": 275931, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/save_test.py", "file_name": "save_test.py", "fun_name": "test_save_format_defaults", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_save_format_defaults(self):\n path = os.path.join(self.get_temp_dir(), \"model_path\")\n save.save_model(self.model, path)\n self.assert_saved_model(path)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 61, "n_identifiers": 10, "random_cut": "def test_save_format_defaults(self):\n path = os.path.join(self.get_temp_dir(), \"model_path\")\n " }, { "id": 126883, "commit_id": "c3a8ba0f8a8e315383b8c59602d1c698d32efeaf", "repo": "ray", "path": "dashboard/modules/job/tests/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_wait_for_newline", "commit_message": "Add maximum number of characters in logs output for jobs status message (#27581)\n\nWe've seen the API server go down from trying to return 500mb of log output", "code": "def test_wait_for_newline(self, tmp):\n it = file_tail_iterator(tmp)\n assert next(it) is None\n\n f = open(tmp, \"w\")\n f.write(\"no_newline_yet\")\n assert next(it) is None\n f.write(\"\\n\")\n f.flush()\n assert next(it) == [\"no_newline_yet\\n\"]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 80, "n_words": 25, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 61, "n_ast_nodes": 108, "n_identifiers": 10, "random_cut": "def test_wait_for_newline(self, tmp):\n it = file_tail_iterator(tmp)\n " }, { "id": 81703, "commit_id": "8d71292d1af88d7a6418878720779b3084caae1d", "repo": "awx", "path": "awx/main/models/projects.py", "file_name": "projects.py", "fun_name": "save", "commit_message": "Integrity checking on project sync\n\nSigned-off-by: Rick Elrod ", "code": "def save(self, *args, **kwargs):\n added_update_fields = []\n if not self.job_tags:\n job_tags = ['update_{}'.format(self.scm_type), 'install_roles', 'install_collections']\n if self.project.signature_validation:\n job_tags.append('playbook_integrity')\n self.job_tags = ','.join(job_tags)\n added_update_fields.append('job_tags')\n if self.scm_delete_on_update and 'delete' not in self.job_tags and self.job_type == 'check':\n self.job_tags = ','.join([self.job_tags, 'delete'])\n added_update_fields.append('job_tags')\n elif (not self.scm_delete_on_update) and 'delete' in self.job_tags:\n job_tags = self.job_tags.split(',')\n job_tags.remove('delete')\n self.job_tags = ','.join(job_tags)\n added_update_fields.append('job_tags')\n if 'update_fields' in kwargs:\n kwargs['update_fields'].extend(added_update_fields)\n return super(ProjectUpdate, self).save(*args, **kwargs)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 239, "n_words": 62, "vocab_size": 40, "complexity": 9, "nloc": 19, "token_counts": 182, "n_ast_nodes": 321, "n_identifiers": 19, "random_cut": "def save(self, *args, **kwargs):\n added_update_fields = []\n if not self.job_tags:\n job_tags = ['update_{}'.format(self.scm_type), 'install_roles', 'install_collections']\n if self.project.signature_validation:\n job_tags.append('playbook_integrity')\n self.job_tags = ','.join(job_tags)\n added_update_fields.append('job_tags')\n if self.scm_delete_on_update and 'delete' not in self.job_tags and self.job_type == 'check':\n self.job_tags = ','.join([self.job_tags, 'delete'])\n added_update_fields.append('job_tags')\n elif (not self.scm_delete_on_update) and 'delete' in self.job_tags:\n job_tags = self.job_tags.split(',')\n job_tags.remove('delete')\n self.job_tags = ','.join(job_tags)\n added_update_fields.append('job_tags')\n if 'update_fields' in kwargs:\n kwargs['update_fields'].extend(added_update_fields)\n return super(ProjectUpdate, self).save(*args, **kwargs)" }, { "id": 95499, "commit_id": "c5e83fa99fb12391a8648549b55d1f502991ce74", "repo": "sentry", "path": "tests/acceptance/test_organization_sentry_app_detailed_view.py", "file_name": "test_organization_sentry_app_detailed_view.py", "fun_name": "load_page", "commit_message": "test(acceptance): Use data-test-id or loading indicators (#31074)", "code": "def load_page(self, slug):\n url = f\"/settings/{self.organization.slug}/sentry-apps/{slug}/\"\n self.browser.get(url)\n self.browser.wait_until_not('[data-test-id=\"loading-indicator\"]')\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 27, "n_ast_nodes": 60, "n_identifiers": 8, "random_cut": "def load_page(self, slug):\n url = f\"/settings/{self.organization.slug}/sentry-apps/{slug}/\"\n self.browser.get(url)\n " }, { "id": 300982, "commit_id": "037f6947d88f0754b15d156180cdffb053a25b1a", "repo": "core", "path": "tests/components/recorder/test_util.py", "file_name": "test_util.py", "fun_name": "test_warn_unsupported_dialect", "commit_message": "Fail recorder setup with unsupported dialect or version (#70888)", "code": "def test_warn_unsupported_dialect(caplog, dialect, message):\n \n instance_mock = MagicMock()\n dbapi_connection = MagicMock()\n\n with pytest.raises(UnsupportedDialect):\n util.setup_connection_for_dialect(\n instance_mock, dialect, dbapi_connection, True\n )\n\n assert message in caplog.text\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 62, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 46, "n_ast_nodes": 75, "n_identifiers": 13, "random_cut": "def test_warn_unsupported_dialect(caplog, dialect, message):\n \n instance_mock = MagicMock()\n dbapi_connection = MagicMock()\n\n with pytest.raises(UnsupportedDialect):\n util.setup_connection_for_dialect(\n instance_mock, dialect, dbapi_connection, True\n )\n\n assert message in caplog.text\n\n" }, { "id": 93022, "commit_id": "6e7478ebf8582f8aeff3cda2d29077aaddd5756c", "repo": "sentry", "path": "src/sentry/interfaces/http.py", "file_name": "http.py", "fun_name": "format_cookies", "commit_message": "ref: replace legacy compat.map with list comprehensions (ingest) (#36697)", "code": "def format_cookies(value):\n if not value:\n return ()\n\n if isinstance(value, str):\n value = parse_qsl(value, keep_blank_values=True)\n\n if isinstance(value, dict):\n value = value.items()\n\n return [(fix_broken_encoding(k.strip()), fix_broken_encoding(v)) for k, v in value]\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 28, "vocab_size": 22, "complexity": 5, "nloc": 8, "token_counts": 69, "n_ast_nodes": 108, "n_identifiers": 12, "random_cut": "def format_cookies(value):\n if not value:\n return ()\n\n if isinstance(value, s" }, { "id": 89772, "commit_id": "7109322ae20fb07140594ea4ea41b3e5ca06ab83", "repo": "sentry", "path": "tests/sentry/dynamic_sampling/test_logging.py", "file_name": "test_logging.py", "fun_name": "test_should_not_log_rules_if_unchanged_and_different_order", "commit_message": "feat(dynamic-sampling): Add logging to dynamic sampling rules [TET-612] (#42288)\n\nThis PR aims to add logging to dynamic sampling rules generation. We\r\nwill perform the logging on every call to `generate_rules` and log all\r\nthe rules generated by the aforementioned function. The logging will\r\nfollow a custom schema for readability and will be added to Google Cloud\r\nLogging with `name = dynamic_sampling.rules` and `event =\r\nrules_generator.generate_rules`.\r\n\r\n_This PR contains a first iteration towards the enhanced observability\r\neffort for dynamic sampling. We will use the learnings from this simple\r\naddition to improve further how we log what happens in the system._", "code": "def test_should_not_log_rules_if_unchanged_and_different_order():\n new_rules = [\n {\n \"sampleRate\": 0.1,\n \"condition\": {\"op\": \"and\", \"inner\": []},\n \"id\": 1000,\n \"type\": \"trace\",\n \"active\": True,\n },\n ]\n\n assert not should_log_rules_change(1, new_rules)\n\n\n@patch(\n \"sentry.dynamic_sampling.logging.active_rules\",\n new={\n 1: {\n get_rule_hash(\n {\n \"sampleRate\": 1,\n \"type\": \"trace\",\n \"condition\": {\n \"op\": \"or\",\n \"inner\": [\n {\n \"op\": \"glob\",\n \"name\": \"trace.environment\",\n \"value\": [\"*dev*\", \"*test*\"],\n \"options\": {\"ignoreCase\": True},\n }\n ],\n },\n \"active\": True,\n \"id\": 1001,\n },\n ): 1.0\n }\n },\n)", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@patch(\n \"sentry.dynamic_sampling.logging.active_rules\",\n new={\n 1: {\n get_rule_hash(\n {\n \"sampleRate\": 1,\n \"type\": \"trace\",\n \"condition\": {\n \"op\": \"or\",\n \"inner\": [\n {\n \"op\": \"glob\",\n \"name\": \"trace.environment\",\n \"value\": [\"*dev*\", \"*test*\"],\n \"options\": {\"ignoreCase\": True},\n }\n ],\n },\n \"active\": True,\n \"id\": 1001,\n },\n ): 1.0\n }\n },\n)", "n_ast_errors": 1, "ast_levels": 23, "n_whitespaces": 577, "n_words": 66, "vocab_size": 48, "complexity": 1, "nloc": 11, "token_counts": 50, "n_ast_nodes": 233, "n_identifiers": 6, "random_cut": "def test_should_not_log_rules_if_unchanged_and_different_order():\n new_rules = [\n {\n \"sampleRate\": 0.1,\n \"condition\": {\"op\": \"and\", \"inner\": []},\n \"id\": 1000,\n \"type\": \"trace\",\n \"active\": True,\n },\n ]\n\n assert not should_log_rules_change(1, new_rules)\n\n\n@patch(\n \"sentry.dynamic_sampling.logging.active_rules\",\n new={\n 1: {\n get_rule_hash(\n {\n \"sampleRate\": 1,\n \"type\": \"trace\",\n \"condition\": {\n \"op\": \"or\",\n \"inner\": [\n {\n \"op\": \"glob\",\n \"name\": \"trace.environment\",\n \"value\": [\"*dev*\", \"*test*\"],\n \"options\": {\"ignoreCase\": True},\n " }, { "id": 213802, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy_tests/test_core/test_container.py", "file_name": "test_container.py", "fun_name": "test_container_scalar_subtraction", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def test_container_scalar_subtraction(dev, call):\n container = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container -= 1\n assert np.allclose(ivy.to_numpy(container['a']), np.array([0]))\n assert np.allclose(ivy.to_numpy(container.a), np.array([0]))\n assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([1]))\n assert np.allclose(ivy.to_numpy(container.b.c), np.array([1]))\n assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([2]))\n assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))\n\n", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 85, "n_words": 36, "vocab_size": 26, "complexity": 1, "nloc": 10, "token_counts": 216, "n_ast_nodes": 341, "n_identifiers": 14, "random_cut": "def test_container_scalar_subtraction(dev, call):\n container = Container({'a': ivy.array([1], dev=dev),\n 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})\n container -= 1\n assert np.allclose(ivy.to_numpy(container['a']), np.array([0]))\n assert np.allclose(ivy.to_numpy(container.a), np.array([0]))\n assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([1]))\n assert np.allclose(ivy.to_numpy(con" }, { "id": 68498, "commit_id": "548afba8bb244adab3db87eb3bd3a475db3877fe", "repo": "erpnext", "path": "erpnext/utilities/doctype/video/video.py", "file_name": "video.py", "fun_name": "batch_update_youtube_data", "commit_message": "fix(minor): update frappe.error_log to new API (#30864)\n\n* fix(minor): update frappe.error_log to new API\r\n\r\n* refactor: changes for updated log_error api\r\n\r\nCo-authored-by: Ankush Menat ", "code": "def batch_update_youtube_data():\n\tdef get_youtube_statistics(video_ids):\n\t\tapi_key = frappe.db.get_single_value(\"Video Settings\", \"api_key\")\n\t\tapi = Api(api_key=api_key)\n\t\ttry:\n\t\t\tvideo = api.get_video_by_id(video_id=video_ids)\n\t\t\tvideo_stats = video.items\n\t\t\treturn video_stats\n\t\texcept Exception:\n\t\t\tfrappe.log_error(\"Unable to update YouTube statistics\")\n\n\tdef prepare_and_set_data(video_list):\n\t\tvideo_ids = get_formatted_ids(video_list)\n\t\tstats = get_youtube_statistics(video_ids)\n\t\tset_youtube_data(stats)\n\n\tdef set_youtube_data(entries):\n\t\tfor entry in entries:\n\t\t\tvideo_stats = entry.to_dict().get(\"statistics\")\n\t\t\tvideo_id = entry.to_dict().get(\"id\")\n\t\t\tstats = {\n\t\t\t\t\"like_count\": video_stats.get(\"likeCount\"),\n\t\t\t\t\"view_count\": video_stats.get(\"viewCount\"),\n\t\t\t\t\"dislike_count\": video_stats.get(\"dislikeCount\"),\n\t\t\t\t\"comment_count\": video_stats.get(\"commentCount\"),\n\t\t\t\t\"video_id\": video_id,\n\t\t\t}\n\n\t\t\tfrappe.db.sql(\n\t\t\t\t,\n\t\t\t\tstats,\n\t\t\t)\n\n\tvideo_list = frappe.get_all(\"Video\", fields=[\"youtube_video_id\"])\n\tif len(video_list) > 50:\n\t\t# Update in batches of 50\n\t\tstart, end = 0, 50\n\t\twhile start < len(video_list):\n\t\t\tbatch = video_list[start:end]\n\t\t\tprepare_and_set_data(batch)\n\t\t\tstart += 50\n\t\t\tend += 50\n\telse:\n\t\tprepare_and_set_data(video_list)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 62, "n_words": 102, "vocab_size": 78, "complexity": 3, "nloc": 14, "token_counts": 71, "n_ast_nodes": 399, "n_identifiers": 32, "random_cut": "def batch_update_youtube_data():\n\tdef get_youtube_statistics(video_ids):\n\t\tapi_key = frappe.db.get_single_value(\"Video Settings\", \"api_key\")\n\t\tapi = Api(api_key=api_key)\n\t\ttry:\n\t\t\tvideo = api.get_video_by_id(video_id=video_ids)\n" }, { "id": 26157, "commit_id": "55a845c7b19316b93d129681fe0c29c7d067e796", "repo": "saleor", "path": "saleor/graphql/account/mutations/base.py", "file_name": "base.py", "fun_name": "perform_mutation", "commit_message": "Filter Customer/Order/Sale/Product/ProductVariant by datetime of last modification (#9137)\n\n* Add updated_at fields to product/variant/user/order/sale models/types/filters\r\n\r\n* Add updated_at column to update_fields\r\n\r\n* Update schema.graphql\r\n\r\n* Update changelog\r\n\r\n* Cleanup metadata save\r\n\r\n* Format code with black\r\n\r\n* Fix schema.graphql\r\n\r\n* Update DB migrations\r\n\r\n* Cleanup migrations\r\n\r\n* Update migration to be more efficient\r\n\r\n* Resolve migrations conflict\r\n\r\n* Update changelog", "code": "def perform_mutation(cls, _root, info, **data):\n try:\n user = models.User.objects.get(email=data[\"email\"])\n except ObjectDoesNotExist:\n raise ValidationError(\n {\n \"email\": ValidationError(\n \"User with this email doesn't exist\",\n code=AccountErrorCode.NOT_FOUND,\n )\n }\n )\n\n if not default_token_generator.check_token(user, data[\"token\"]):\n raise ValidationError(\n {\"token\": ValidationError(INVALID_TOKEN, code=AccountErrorCode.INVALID)}\n )\n\n user.is_active = True\n user.save(update_fields=[\"is_active\", \"updated_at\"])\n\n match_orders_with_new_user(user)\n assign_user_gift_cards(user)\n\n return ConfirmAccount(user=user)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 284, "n_words": 45, "vocab_size": 39, "complexity": 3, "nloc": 21, "token_counts": 117, "n_ast_nodes": 193, "n_identifiers": 26, "random_cut": "def perform_mutation(cls, _root, info, **data):\n try:\n user = models.User.objects.get(email=data[\"email\"])\n except ObjectDoesNotExist:\n raise ValidationError(\n {\n" }, { "id": 168972, "commit_id": "58c124feb4dffe46a73a43fbe995421ea361dfee", "repo": "pandas", "path": "pandas/tests/indexes/test_indexing.py", "file_name": "test_indexing.py", "fun_name": "test_get_indexer_masked_duplicated_na", "commit_message": "REGR: get_loc for ExtensionEngine not returning bool indexer for na (#48411)", "code": "def test_get_indexer_masked_duplicated_na(self):\n # GH#48411\n idx = Index([1, 2, NA, NA], dtype=\"Int64\")\n result = idx.get_indexer_for(Index([1, NA], dtype=\"Int64\"))\n expected = np.array([0, 2, 3], dtype=result.dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 58, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 70, "n_ast_nodes": 106, "n_identifiers": 13, "random_cut": "def test_get_indexer_masked_duplicated_na(self):\n # GH#48411\n idx = Index([1, 2, NA, NA], dtype=\"Int64\")\n result = idx.get_indexer_for(Index([1, NA], dtype=\"Int64\"))\n expected = np.array([0, 2, 3], dty" }, { "id": 279878, "commit_id": "ead59b2c4c85284d8c2095e691800255068694ce", "repo": "keras", "path": "keras/engine/sequential.py", "file_name": "sequential.py", "fun_name": "from_config", "commit_message": "Keras Saving: Make sure the optimizer weights are also built and restored upon loading.\n\nAlso allow the weights used in the test to have proper gradients, and make the input shape key in config consistent across Sequential and other models.\n\nPiperOrigin-RevId: 475455814", "code": "def from_config(cls, config, custom_objects=None):\n if \"name\" in config:\n name = config[\"name\"]\n build_input_shape = config.get(\"build_input_shape\")\n layer_configs = config[\"layers\"]\n else:\n name = None\n build_input_shape = None\n layer_configs = config\n model = cls(name=name)\n for layer_config in layer_configs:\n layer = layer_module.deserialize(\n layer_config, custom_objects=custom_objects\n )\n model.add(layer)\n\n if getattr(saving_lib._SAVING_V3_ENABLED, \"value\", False):\n compile_config = config.get(\"compile_config\", None)\n if compile_config is not None:\n model._compile_from_config(\n compile_config, base_class=Sequential\n )\n\n if build_input_shape:\n model.build(build_input_shape)\n if model.optimizer is not None:\n model.optimizer.build(model.trainable_variables)\n\n else:\n if (\n not model.inputs\n and build_input_shape\n and isinstance(build_input_shape, (tuple, list))\n ):\n model.build(build_input_shape)\n\n return model\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 457, "n_words": 82, "vocab_size": 53, "complexity": 10, "nloc": 33, "token_counts": 174, "n_ast_nodes": 281, "n_identifiers": 28, "random_cut": "def from_config(cls, config, custom_objects=None):\n if \"name\" in config:\n name = config[\"name\"]\n build_input_shape = config.get(\"build_input_shape\")\n layer_configs = config[\"layers\"]\n else:\n name = None\n build_input_shape = None\n layer_configs = config\n model = cls(name=name)\n for layer_config in layer_configs:\n layer = layer_module.deserialize(\n layer_config, custom_objects=custom_objects\n )\n model.add(layer)\n\n if getattr(savin" }, { "id": 163699, "commit_id": "acd7218f67fbe31308db7482e11fb9c8f30b51a8", "repo": "pandas", "path": "pandas/tests/reductions/test_reductions.py", "file_name": "test_reductions.py", "fun_name": "test_minmax_period", "commit_message": "DEPR: Index.is_monotonic for Index.is_monotonic_increasing (#45422)", "code": "def test_minmax_period(self):\n\n # monotonic\n idx1 = PeriodIndex([NaT, \"2011-01-01\", \"2011-01-02\", \"2011-01-03\"], freq=\"D\")\n assert not idx1.is_monotonic_increasing\n assert idx1[1:].is_monotonic_increasing\n\n # non-monotonic\n idx2 = PeriodIndex(\n [\"2011-01-01\", NaT, \"2011-01-03\", \"2011-01-02\", NaT], freq=\"D\"\n )\n assert not idx2.is_monotonic_increasing\n\n for idx in [idx1, idx2]:\n assert idx.min() == Period(\"2011-01-01\", freq=\"D\")\n assert idx.max() == Period(\"2011-01-03\", freq=\"D\")\n assert idx1.argmin() == 1\n assert idx2.argmin() == 0\n assert idx1.argmax() == 3\n assert idx2.argmax() == 2\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 185, "n_words": 62, "vocab_size": 43, "complexity": 2, "nloc": 15, "token_counts": 132, "n_ast_nodes": 223, "n_identifiers": 14, "random_cut": "def test_minmax_period(self):\n\n # monotonic\n idx" }, { "id": 12967, "commit_id": "e794c06c919ab1d04fe99136851c897ce64b8e5c", "repo": "jina", "path": "jina/parsers/orchestrate/runtimes/remote.py", "file_name": "remote.py", "fun_name": "mixin_http_gateway_parser", "commit_message": "feat: expose grpc parameters and add production ready keepalive parameters (#5092)\n\n* fix: add production ready keepalive grpc parameters\r\n\r\n* feat: add test for long live flow\r\n\r\n* feat: add grpc server options to argument\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: update description\r\n\r\n* feat: add test for grpc server options\r\n\r\n* fix: move parsing from gateway to worker\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: merge default values and new ones for grpc option\r\n\r\n* refactor: cleaner dict to list unpacking\r\n\r\n* refactor: reduce the time in test to wait\r\n\r\n* docs: add example in docstring for grpc server options\r\n\r\n* refactor: move the get grpc option from jina helper to runtime helper\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: fix typo\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def mixin_http_gateway_parser(parser=None):\n \n gp = add_arg_group(parser, title='HTTP Gateway')\n\n gp.add_argument(\n '--title',\n type=str,\n help='The title of this HTTP server. It will be used in automatics docs such as Swagger UI.',\n )\n\n gp.add_argument(\n '--description',\n type=str,\n help='The description of this HTTP server. It will be used in automatics docs such as Swagger UI.',\n )\n\n gp.add_argument(\n '--cors',\n action='store_true',\n default=False,\n help=,\n )\n\n gp.add_argument(\n '--no-debug-endpoints',\n action='store_true',\n default=False,\n help='If set, `/status` `/post` endpoints are removed from HTTP interface. ',\n )\n\n gp.add_argument(\n '--no-crud-endpoints',\n action='store_true',\n default=False,\n help=,\n )\n\n gp.add_argument(\n '--expose-endpoints',\n type=str,\n help=,\n )\n\n gp.add_argument(\n '--uvicorn-kwargs',\n action=KVAppendAction,\n metavar='KEY: VALUE',\n nargs='*',\n help=,\n )\n\n gp.add_argument(\n '--ssl-certfile',\n type=str,\n help=,\n dest='ssl_certfile',\n )\n\n gp.add_argument(\n '--ssl-keyfile',\n type=str,\n help=,\n dest='ssl_keyfile',\n )\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 400, "n_words": 102, "vocab_size": 56, "complexity": 1, "nloc": 80, "token_counts": 204, "n_ast_nodes": 307, "n_identifiers": 15, "random_cut": "def mixin_http_gateway_parser(parser=None):\n \n gp = add_arg_group(parser, title='HTTP Gateway')\n\n gp.add_argument(\n '--title',\n type=str,\n help='The title of this HTTP server. It will be used in automatics docs such as Swagger UI.',\n )\n\n gp.add_argument(\n '--description',\n type=str,\n help='The description of this HTTP server. It will be used in automatics docs such as Swagger UI.',\n )\n\n gp.add_argument(\n '--cors',\n action='store_true',\n default=False,\n help=,\n )\n\n gp.add_argument(\n '--no-debug-endpoints',\n action='store_true',\n default=False,\n help='If set, `/status` `/post` endpoints are removed from HTTP interface. ',\n )\n\n gp.add_argument(\n '--no-crud-endpoints',\n action='store_true',\n default=False,\n help=,\n )\n\n gp.add_argument(\n '--expose-endpoints',\n type=str,\n help=,\n )\n\n gp.add_argument(\n '--uvicorn-kwargs',\n action=KVAppendAction,\n metavar='KEY: VALUE',\n nargs='*',\n help=,\n )\n\n gp.add_argument(\n '--ssl-certfile',\n type=str,\n help=,\n dest='ssl_certfile',\n " }, { "id": 169561, "commit_id": "55dc32437ea43a238975439ddb6c9dda81b33020", "repo": "pandas", "path": "pandas/tests/indexes/timedeltas/test_timedelta.py", "file_name": "test_timedelta.py", "fun_name": "test_freq_conversion", "commit_message": "API: .astype td64->td64 give requested dtype (#48963)\n\n* API: .astype td64->td64 give requested dtype\r\n\r\n* fix missing import\r\n\r\n* Update doc/source/whatsnew/v1.6.0.rst\r\n\r\nCo-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>\r\n\r\nCo-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>", "code": "def test_freq_conversion(self, index_or_series):\n\n # doc example\n\n scalar = Timedelta(days=31)\n td = index_or_series(\n [scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT],\n dtype=\"m8[ns]\",\n )\n\n result = td / np.timedelta64(1, \"D\")\n expected = index_or_series(\n [31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan]\n )\n tm.assert_equal(result, expected)\n\n # We don't support \"D\" reso, so we use the pre-2.0 behavior\n # casting to float64\n result = td.astype(\"timedelta64[D]\")\n expected = index_or_series([31, 31, 31, np.nan])\n tm.assert_equal(result, expected)\n\n result = td / np.timedelta64(1, \"s\")\n expected = index_or_series(\n [31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan]\n )\n tm.assert_equal(result, expected)\n\n exp_values = np.asarray(td).astype(\"m8[s]\")\n exp_tda = TimedeltaArray._simple_new(exp_values, dtype=exp_values.dtype)\n expected = index_or_series(exp_tda)\n assert expected.dtype == \"m8[s]\"\n result = td.astype(\"timedelta64[s]\")\n tm.assert_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 326, "n_words": 121, "vocab_size": 69, "complexity": 1, "nloc": 25, "token_counts": 230, "n_ast_nodes": 355, "n_identifiers": 25, "random_cut": "def test_freq_conversion(self, index_or_series):\n\n # doc example\n\n scalar = Timedelta(days=31)\n td = index_or_series(\n [sca" }, { "id": 321981, "commit_id": "3d3bc82c85da3df001d44d36a4caf0291a7d877f", "repo": "PaddleNLP", "path": "paddlenlp/transformers/roformer/modeling.py", "file_name": "modeling.py", "fun_name": "positional_embedding", "commit_message": "use paddle.einsum instead of paddlenlp.ops.einsum (#1557)", "code": "def positional_embedding(self, inputs):\n seq_len = inputs.shape[1]\n pos_seq = paddle.arange(0, seq_len, dtype=dtype_float)\n indices = paddle.arange(0, self.head_dim, 2, dtype=dtype_float)\n indices = 1 / 10000**(indices / self.head_dim)\n sinusoid_inp = paddle.einsum(\"i,d->id\", pos_seq, indices)\n pos_emb = paddle.concat(\n [paddle.sin(sinusoid_inp), paddle.cos(sinusoid_inp)], axis=-1)\n pos_emb = paddle.reshape(pos_emb, (1, 1, seq_len, self.head_dim))\n pos_emb.stop_gradient = True\n return pos_emb\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 120, "n_words": 47, "vocab_size": 33, "complexity": 1, "nloc": 11, "token_counts": 126, "n_ast_nodes": 186, "n_identifiers": 21, "random_cut": "def positional_embedding(self, inputs):\n seq_len = inputs.shape[1]\n pos_seq = paddle.arange(0, seq_len, dtype=dtype_float)\n indices = paddle.arange(0, self.head_dim, 2, dtype=dtype_float)\n indices = 1 / 10000**(indices / self.head_dim)\n sinusoid_inp = paddle.einsum(\"i,d->id\", pos_seq" }, { "id": 254826, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/lrn.py", "file_name": "lrn.py", "fun_name": "export_default", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export_default() -> None:\n alpha = 0.0001\n beta = 0.75\n bias = 1.0\n nsize = 3\n node = onnx.helper.make_node(\n 'LRN',\n inputs=['x'],\n outputs=['y'],\n size=3\n )\n x = np.random.randn(5, 5, 5, 5).astype(np.float32)\n square_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)\n for n, c, h, w in np.ndindex(x.shape):\n square_sum[n, c, h, w] = sum(x[n,\n max(0, c - int(math.floor((nsize - 1) / 2))):min(5, c + int(math.ceil((nsize - 1) / 2)) + 1),\n h,\n w] ** 2)\n y = x / ((bias + (alpha / nsize) * square_sum) ** beta)\n expect(node, inputs=[x], outputs=[y],\n name='test_lrn_default')\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 358, "n_words": 87, "vocab_size": 61, "complexity": 2, "nloc": 21, "token_counts": 225, "n_ast_nodes": 327, "n_identifiers": 36, "random_cut": "def export_default() -> None:\n alpha = 0.0001\n beta = 0.75\n bias = 1.0\n nsize = 3\n node = onnx.helper.make_node(\n 'LRN',\n inputs=['x'],\n outputs=['y'],\n size=3\n )\n x = np.random.randn(5, 5, 5, 5).astype(np.float32)\n square_sum = np.zeros((5, 5, 5, 5)).astype(np.float32)\n for n, c, h, w in np.ndindex(x.shape):\n square_s" }, { "id": 259874, "commit_id": "a47d569e670fd4102af37c3165c9b1ddf6fd3005", "repo": "scikit-learn", "path": "benchmarks/bench_tsne_mnist.py", "file_name": "bench_tsne_mnist.py", "fun_name": "load_data", "commit_message": "ENH improve ARFF parser using pandas (#21938)\n\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Adrin Jalali ", "code": "def load_data(dtype=np.float32, order=\"C\", shuffle=True, seed=0):\n \n print(\"Loading dataset...\")\n data = fetch_openml(\"mnist_784\", as_frame=True, parser=\"pandas\")\n\n X = check_array(data[\"data\"], dtype=dtype, order=order)\n y = data[\"target\"]\n\n if shuffle:\n X, y = _shuffle(X, y, random_state=seed)\n\n # Normalize features\n X /= 255\n return X, y\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 71, "n_words": 37, "vocab_size": 30, "complexity": 2, "nloc": 9, "token_counts": 88, "n_ast_nodes": 143, "n_identifiers": 17, "random_cut": "def load_data(dtype=np.float32, order=\"C\", shuffle=True, seed=0):\n \n print(\"Loading dataset...\")\n data = fetch_openml(\"mnist_784\", as_fra" }, { "id": 160789, "commit_id": "4ed458f16d9dd64554ccf49e315c5b8fb577d4cd", "repo": "numpy", "path": "numpy/lib/tests/test_arraysetops.py", "file_name": "test_arraysetops.py", "fun_name": "test_in1d_errors", "commit_message": "MAINT: change kind names for in1d\n\n- Switch dictionary->table, mergesort->sort", "code": "def test_in1d_errors(self):\n \n\n # Error 1: `kind` is not one of 'sort' 'table' or None.\n ar1 = np.array([1, 2, 3, 4, 5])\n ar2 = np.array([2, 4, 6, 8, 10])\n assert_raises(ValueError, in1d, ar1, ar2, kind='quicksort')\n\n # Error 2: `kind=\"table\"` does not work for non-integral arrays.\n obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object)\n obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object)\n assert_raises(ValueError, in1d, obj_ar1, obj_ar2, kind='table')\n\n for dtype in [np.int32, np.int64]:\n ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype)\n # The range of this array will overflow:\n overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype)\n\n # Error 3: `kind=\"table\"` will trigger a runtime error\n # if there is an integer overflow expected when computing the\n # range of ar2\n assert_raises(\n RuntimeError,\n in1d, ar1, overflow_ar2, kind='table'\n )\n\n # Non-error: `kind=None` will *not* trigger a runtime error\n # if there is an integer overflow, it will switch to\n # the `sort` algorithm.\n result = np.in1d(ar1, overflow_ar2, kind=None)\n assert_array_equal(result, [True] + [False] * 4)\n result = np.in1d(ar1, overflow_ar2, kind='sort')\n assert_array_equal(result, [True] + [False] * 4)\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 436, "n_words": 167, "vocab_size": 99, "complexity": 2, "nloc": 18, "token_counts": 244, "n_ast_nodes": 365, "n_identifiers": 22, "random_cut": "def test_in1d_errors(self):\n \n\n # Error 1: `kind` is not one of 'sort' 'table' or None.\n ar1 = np.array([1, 2, 3, 4, 5])\n ar2 = np.array([2, 4, 6, 8, 10])\n assert_raises(ValueError, in1d, ar1, ar2, kind='quicksort')\n\n # Error 2: `kind=\"table\"` does not work for non-integral arrays.\n obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object)\n obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object)\n assert_raises(ValueError, in1d, obj_ar1, obj_ar2, kind='table')\n\n for dtype in [np.int32, np.int64]:\n ar1 = np.array([-1, 2, 3" }, { "id": 101250, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_annotate_extract_boxes", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def _annotate_extract_boxes(cls, image, face, index):\n \n for area in (\"face\", \"head\"):\n face.load_aligned(image, centering=area, force=True)\n color = (0, 255, 0) if area == \"face\" else (0, 0, 255)\n top_left = face.aligned.original_roi[0]\n top_left = (top_left[0], top_left[1] - 10)\n cv2.putText(image, str(index), top_left, cv2.FONT_HERSHEY_DUPLEX, 1.0, color, 1)\n cv2.polylines(image, [face.aligned.original_roi], True, color, 1)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 127, "n_words": 47, "vocab_size": 40, "complexity": 3, "nloc": 8, "token_counts": 126, "n_ast_nodes": 177, "n_identifiers": 18, "random_cut": "def _annotate_extract_boxes(cls, image, face, index):\n \n for area in (\"face\", \"head\"):\n face.load_aligned(image, centering=area, force=True)\n color = (0, 255, 0) if area == \"face\" else (0, 0, 255)\n top" }, { "id": 336083, "commit_id": "d5acb4110a5536f5c0ace4a0c158f0e0c71c0a50", "repo": "diffusers", "path": "src/diffusers/models/unet_conditional.py", "file_name": "unet_conditional.py", "fun_name": "set_weights", "commit_message": "Finalize ldm (#96)\n\n* upload\r\n\r\n* make checkpoint work\r\n\r\n* finalize", "code": "def set_weights(self):\n self.is_overwritten = True\n if self.ldm:\n self.time_embedding.linear_1.weight.data = self.time_embed[0].weight.data\n self.time_embedding.linear_1.bias.data = self.time_embed[0].bias.data\n self.time_embedding.linear_2.weight.data = self.time_embed[2].weight.data\n self.time_embedding.linear_2.bias.data = self.time_embed[2].bias.data\n\n self.conv_in.weight.data = self.input_blocks[0][0].weight.data\n self.conv_in.bias.data = self.input_blocks[0][0].bias.data\n\n # ================ SET WEIGHTS OF ALL WEIGHTS ==================\n for i, input_layer in enumerate(self.input_blocks[1:]):\n block_id = i // (self.config.num_res_blocks + 1)\n layer_in_block_id = i % (self.config.num_res_blocks + 1)\n\n if layer_in_block_id == 2:\n self.downsample_blocks[block_id].downsamplers[0].conv.weight.data = input_layer[0].op.weight.data\n self.downsample_blocks[block_id].downsamplers[0].conv.bias.data = input_layer[0].op.bias.data\n elif len(input_layer) > 1:\n self.downsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n self.downsample_blocks[block_id].attentions[layer_in_block_id].set_weight(input_layer[1])\n else:\n self.downsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n\n self.mid.resnets[0].set_weight(self.middle_block[0])\n self.mid.resnets[1].set_weight(self.middle_block[2])\n self.mid.attentions[0].set_weight(self.middle_block[1])\n\n for i, input_layer in enumerate(self.output_blocks):\n block_id = i // (self.config.num_res_blocks + 1)\n layer_in_block_id = i % (self.config.num_res_blocks + 1)\n\n if len(input_layer) > 2:\n self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n self.upsample_blocks[block_id].attentions[layer_in_block_id].set_weight(input_layer[1])\n self.upsample_blocks[block_id].upsamplers[0].conv.weight.data = input_layer[2].conv.weight.data\n self.upsample_blocks[block_id].upsamplers[0].conv.bias.data = input_layer[2].conv.bias.data\n elif len(input_layer) > 1 and \"Upsample2D\" in input_layer[1].__class__.__name__:\n self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n self.upsample_blocks[block_id].upsamplers[0].conv.weight.data = input_layer[1].conv.weight.data\n self.upsample_blocks[block_id].upsamplers[0].conv.bias.data = input_layer[1].conv.bias.data\n elif len(input_layer) > 1:\n self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n self.upsample_blocks[block_id].attentions[layer_in_block_id].set_weight(input_layer[1])\n else:\n self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n\n self.conv_norm_out.weight.data = self.out[0].weight.data\n self.conv_norm_out.bias.data = self.out[0].bias.data\n self.conv_out.weight.data = self.out[2].weight.data\n self.conv_out.bias.data = self.out[2].bias.data\n\n self.remove_ldm()\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 790, "n_words": 140, "vocab_size": 77, "complexity": 10, "nloc": 45, "token_counts": 721, "n_ast_nodes": 1112, "n_identifiers": 39, "random_cut": "def set_weights(self):\n self.is_overwritten = True\n if self.ldm:\n self.time_embedding.linear_1.weight.data = self.time_embed[0].weight.data\n self.time_embedding.linear_1.bias.data = self.time_embed[0].bias.data\n self.time_embedding.linear_2.weight.data = self.time_embed[2].weight.data\n self.time_embedding.linear_2.bias.data = self.time_embed[2].bias.data\n\n self.conv_in.weight.data = self.input_blocks[0][0].weight.data\n self.conv_in.bias.data = self.input_blocks[0][0].bias.data\n\n # ================ SET WEIGHTS OF ALL WEIGHTS ==================\n for i, input_layer in enumerate(self.input_blocks[1:]):\n block_id = i // (self.config.num_res_blocks + 1)\n layer_in_block_id = i % (self.config.num_res_blocks + 1)\n\n if layer_in_block_id == 2:\n self.downsample_blocks[block_id].downsamplers[0].conv.weight.data = input_layer[0].op.weight.data\n self.downsample_blocks[block_id].downsamplers[0].conv.bias.data = input_layer[0].op.bias.data\n elif len(input_layer) > 1:\n self.downsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n self.downsample_blocks[block_id].attentions[layer_in_block_id].set_weight(input_layer[1])\n else:\n self.downsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n\n self.mid.resnets[0].set_weight(self.middle_block[0])\n self.mid.resnets[1].set_weight(self.middle_block[2])\n self.mid.attentions[0].set_weight(self.middle_block[1])\n\n for i, input_layer in enumerate(self.output_blocks):\n block_id = i // (self.config.num_res_blocks + 1)\n layer_in_block_id = i % (self.config.num_res_blocks + 1)\n\n if len(input_layer) > 2:\n self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n self.upsample_blocks[block_id].attentions[layer_in_block_id].set_weight(input_layer[1])\n self.upsample_blocks[block_id].upsamplers[0].conv.weight.data = input_layer[2].conv.weight.data\n self.upsample_blocks[block_id].upsamplers[0].conv.bias.data = input_layer[2].conv.bias.data\n elif len(input_layer) > 1 and \"Upsample2D\" in input_layer[1].__class__.__name__:\n self.upsample_blocks[block_id].resnets[layer_in_block_id].set_weight(input_layer[0])\n self.upsample_blocks[block_id].upsamplers[0].conv.weight.data = input_l" }, { "id": 261610, "commit_id": "aec37352812494307a7c94efb928a2a916052460", "repo": "scikit-learn", "path": "sklearn/linear_model/tests/test_common.py", "file_name": "test_common.py", "fun_name": "test_balance_property", "commit_message": "TST add balance property check for linear models (#22892)", "code": "def test_balance_property(model, with_sample_weight, global_random_seed):\n # Test that sum(y_predicted) == sum(y_observed) on the training set.\n # This must hold for all linear models with deviance of an exponential disperson\n # family as loss and the corresponding canonical link if fit_intercept=True.\n # Examples:\n # - squared error and identity link (most linear models)\n # - Poisson deviance with log link\n # - log loss with logit link\n # This is known as balance property or unconditional calibration/unbiasedness.\n # For reference, see Corollary 3.18, 3.20 and Chapter 5.1.5 of\n # M.V. Wuthrich and M. Merz, \"Statistical Foundations of Actuarial Learning and its\n # Applications\" (June 3, 2022). http://doi.org/10.2139/ssrn.3822407\n\n if (\n with_sample_weight\n and \"sample_weight\" not in inspect.signature(model.fit).parameters.keys()\n ):\n pytest.skip(\"Estimator does not support sample_weight.\")\n\n rel = 1e-4 # test precision\n if isinstance(model, SGDRegressor):\n rel = 1e-1\n elif hasattr(model, \"solver\") and model.solver == \"saga\":\n rel = 1e-2\n\n rng = np.random.RandomState(global_random_seed)\n n_train, n_features, n_targets = 100, 10, None\n if isinstance(\n model,\n (MultiTaskElasticNet, MultiTaskElasticNetCV, MultiTaskLasso, MultiTaskLassoCV),\n ):\n n_targets = 3\n X = make_low_rank_matrix(n_samples=n_train, n_features=n_features, random_state=rng)\n if n_targets:\n coef = (\n rng.uniform(low=-2, high=2, size=(n_features, n_targets))\n / np.max(X, axis=0)[:, None]\n )\n else:\n coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)\n\n expectation = np.exp(X @ coef + 0.5)\n y = rng.poisson(lam=expectation) + 1 # strict positive, i.e. y > 0\n if is_classifier(model):\n y = (y > expectation + 1).astype(np.float64)\n\n if with_sample_weight:\n sw = rng.uniform(low=1, high=10, size=y.shape[0])\n else:\n sw = None\n\n model.set_params(fit_intercept=True) # to be sure\n if with_sample_weight:\n model.fit(X, y, sample_weight=sw)\n else:\n model.fit(X, y)\n\n # Assert balance property.\n if is_classifier(model):\n assert np.average(model.predict_proba(X)[:, 1], weights=sw) == pytest.approx(\n np.average(y, weights=sw), rel=rel\n )\n else:\n assert np.average(model.predict(X), weights=sw, axis=0) == pytest.approx(\n np.average(y, weights=sw, axis=0), rel=rel\n )\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 570, "n_words": 270, "vocab_size": 175, "complexity": 12, "nloc": 47, "token_counts": 413, "n_ast_nodes": 631, "n_identifiers": 56, "random_cut": "def test_balance_property(model, with_sample_weight, global_random_seed):\n # Test that sum(y_predicted) == sum(y_observed) on the training set.\n # This must hold for all linear models with deviance of an exponential disperson\n # family as loss and the corresponding canonical link if fit_intercept=True.\n # Examples:\n # - squared error and identity link (most linear models)\n # - Poisson deviance with log link\n # - log loss with logit link\n # This is known as balance property or unconditional calibration/unbiasedness.\n # For reference, see Corollary 3.18, 3.20 and Chapter 5.1.5 of\n # M.V. Wuthrich and M. Merz, \"Statistical Foundations of Actuarial Learning and its\n # Applications\" (June 3, 2022). http://doi.org/10.2139/ssrn.3822407\n\n if (\n with_sample_weight\n and \"sample_weight\" not in inspect.signature(model.fit).parameters.keys()\n ):\n pytest.skip(\"Estimator does not support sample_weight.\")\n\n rel = 1e-4 # test precision\n if isinstance(model, SGDRegressor):\n rel = 1e-1\n elif hasattr(model, \"solver\") and model.solver == \"saga\":\n rel = 1e-2\n\n rng = np.random.RandomState(global_random_seed)\n n_train, n_features, n_targets = 100, 10, None\n if isinstance(\n model,\n (MultiTaskElasticNet, MultiTaskElasticNetCV, MultiTaskLasso, MultiTaskLassoCV),\n ):\n n_targets = 3\n X = make_low_rank_matrix(n_samples=n_train, n_features=n_features, random_state=rng)\n if n_targets:\n coef = (\n rng.uniform(low=-2, high=2, size=(n_features, n_targets))\n / np.max(X, axis=0)[:, None]\n )\n else:\n coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)\n\n expectation = np.exp(X @ coef + 0.5)\n y = rng.poisson(lam=expectation) + 1 # strict positive, i.e. y > 0\n if is_classifier(model):\n y = (y > expectation + 1).astype(np.float64" }, { "id": 48672, "commit_id": "c10f2266222c434485889b08cc1463acdb8fa169", "repo": "django-rest-framework", "path": "rest_framework/renderers.py", "file_name": "renderers.py", "fun_name": "get_rendered_html_form", "commit_message": "Refactor: Replace try/except with contextlib.suppress() (#8676)", "code": "def get_rendered_html_form(self, data, view, method, request):\n \n # See issue #2089 for refactoring this.\n serializer = getattr(data, 'serializer', None)\n if serializer and not getattr(serializer, 'many', False):\n instance = getattr(serializer, 'instance', None)\n if isinstance(instance, Page):\n instance = None\n else:\n instance = None\n\n # If this is valid serializer data, and the form is for the same\n # HTTP method as was used in the request then use the existing\n # serializer instance, rather than dynamically creating a new one.\n if request.method == method and serializer is not None:\n try:\n kwargs = {'data': request.data}\n except ParseError:\n kwargs = {}\n existing_serializer = serializer\n else:\n kwargs = {}\n existing_serializer = None\n\n with override_method(view, request, method) as request:\n if not self.show_form_for_method(view, method, request, instance):\n return\n\n if method in ('DELETE', 'OPTIONS'):\n return True # Don't actually need to return a form\n\n has_serializer = getattr(view, 'get_serializer', None)\n has_serializer_class = getattr(view, 'serializer_class', None)\n\n if (\n (not has_serializer and not has_serializer_class) or\n not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)\n ):\n return\n\n if existing_serializer is not None:\n with contextlib.suppress(TypeError):\n return self.render_form_for_serializer(existing_serializer)\n if has_serializer:\n if method in ('PUT', 'PATCH'):\n serializer = view.get_serializer(instance=instance, **kwargs)\n else:\n serializer = view.get_serializer(**kwargs)\n else:\n # at this point we must have a serializer_class\n if method in ('PUT', 'PATCH'):\n serializer = self._get_serializer(view.serializer_class, view,\n request, instance=instance, **kwargs)\n else:\n serializer = self._get_serializer(view.serializer_class, view,\n request, **kwargs)\n\n return self.render_form_for_serializer(serializer)\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 902, "n_words": 215, "vocab_size": 111, "complexity": 17, "nloc": 45, "token_counts": 308, "n_ast_nodes": 503, "n_identifiers": 30, "random_cut": "def get_rendered_html_form(self, data, view, method, request):\n \n # See issue #2089 for refactoring this.\n serializer = getattr(data, 'serializer', None)\n if serializer and not getattr(serializer, 'many', False):\n instance = getattr(serializer, 'instance', None)\n if isinstance(instance, Page):\n instance = None\n else:\n instance = None\n\n # If this is valid serializer data, and the form is for the same\n # HTTP method as was used in the request then use the existing\n # serializer instance, rather than dynamically creating a new one.\n if request.method == method and serializer is not None:\n try:\n kwargs = {'data': request.data}\n except ParseError:\n kwargs = {}\n existing_serializer = serializer\n else:\n kwargs = {}\n existing_serializer = None\n\n with override_method(view, request, metho" }, { "id": 66598, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v12_0/add_document_type_field_for_italy_einvoicing.py", "file_name": "add_document_type_field_for_italy_einvoicing.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tcompany = frappe.get_all(\"Company\", filters={\"country\": \"Italy\"})\n\tif not company:\n\t\treturn\n\n\tcustom_fields = {\n\t\t\"Sales Invoice\": [\n\t\t\tdict(\n\t\t\t\tfieldname=\"type_of_document\",\n\t\t\t\tlabel=\"Type of Document\",\n\t\t\t\tfieldtype=\"Select\",\n\t\t\t\tinsert_after=\"customer_fiscal_code\",\n\t\t\t\toptions=\"\\nTD01\\nTD02\\nTD03\\nTD04\\nTD05\\nTD06\\nTD16\\nTD17\\nTD18\\nTD19\\nTD20\\nTD21\\nTD22\\nTD23\\nTD24\\nTD25\\nTD26\\nTD27\",\n\t\t\t),\n\t\t]\n\t}\n\n\tcreate_custom_fields(custom_fields, update=True)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 14, "n_words": 30, "vocab_size": 29, "complexity": 2, "nloc": 16, "token_counts": 65, "n_ast_nodes": 133, "n_identifiers": 14, "random_cut": "def execute():\n\tcompany = frappe.get_all(\"Company\", filters={\"country\": \"Italy\"})\n\tif not company:\n\t\treturn\n\n\tcustom_fields = {\n\t\t\"Sales Invoice\": [\n\t\t\tdict(\n\t\t\t\tfieldname=\"type_of_document\",\n\t\t\t\tlabel=\"Type of Document\",\n\t\t\t\tfieldtype=\"Select\",\n\t\t\t\tinsert_after=\"customer_fiscal_code\",\n\t\t\t\toptions=\"\\nTD01\\nTD02\\nTD03\\nTD04\\nTD05\\nTD06\\nTD16\\nTD17\\nTD18\\nTD19\\nTD20\\nTD21\\nTD22\\nTD23\\nTD24\\nTD25\\nTD26\\nTD27\",\n\t\t\t),\n\t\t]" }, { "id": 115765, "commit_id": "68846c2339b46b41e5cc7c24023e079da09716f2", "repo": "mindsdb", "path": "mindsdb/api/mongo/responders/insert.py", "file_name": "insert.py", "fun_name": "_result", "commit_message": "create predictor via handler in sql api", "code": "def _result(self, query, request_env, mindsdb_env):\n table = query['insert']\n\n if table == 'databases':\n for doc in query['documents']:\n if '_id' in doc:\n del doc['_id']\n for field in ('name', 'engine', 'connection_args'):\n if field not in doc:\n raise Exception(f\"'{field}' must be specified\")\n\n status = HandlerStatusResponse(success=False)\n try:\n handler = mindsdb_env['integration_controller'].create_handler(\n handler_type=doc['engine'],\n connection_data=doc['connection_args']\n )\n status = handler.check_connection()\n except Exception as e:\n status.error_message = str(e)\n\n if status.success is False:\n raise Exception(f\"Can't connect to db: {status.error_message}\")\n\n integration = mindsdb_env['integration_controller'].get(doc['name'])\n if integration is not None:\n raise Exception(f\"Database '{doc['name']}' already exists.\")\n\n for doc in query['documents']:\n mindsdb_env['integration_controller'].add(doc['name'], doc['engine'], doc['connection_args'])\n\n result = {\n \"n\": len(query['documents']),\n \"ok\": 1\n }\n elif table == 'predictors':\n predictors_columns = [\n 'name',\n 'status',\n 'accuracy',\n 'predict',\n 'select_data_query',\n 'training_options',\n 'connection'\n ]\n\n models = mindsdb_env['model_interface'].get_models()\n\n if len(query['documents']) != 1:\n raise Exception(\"Must be inserted just one predictor at time\")\n\n for doc in query['documents']:\n if '_id' in doc:\n del doc['_id']\n\n bad_columns = [x for x in doc if x not in predictors_columns]\n if len(bad_columns) > 0:\n raise Exception(f\"Is no possible insert this columns to 'predictors' collection: {', '.join(bad_columns)}\")\n\n if 'name' not in doc:\n raise Exception(\"Please, specify 'name' field\")\n\n if 'predict' not in doc:\n raise Exception(\"Please, specify 'predict' field\")\n\n if doc['name'] in [x['name'] for x in models]:\n raise Exception(f\"Predictor with name '{doc['name']}' already exists\")\n\n select_data_query = doc.get('select_data_query')\n if select_data_query is None:\n raise Exception(\"'select_data_query' must be in query\")\n\n kwargs = doc.get('training_options', {})\n if 'timeseries_settings' in kwargs:\n # mongo shell client sends int as float. need to convert it to int\n for key in ('window', 'horizon'):\n val = kwargs['timeseries_settings'].get(key)\n if val is not None:\n kwargs['timeseries_settings'][key] = int(val)\n\n integrations = mindsdb_env['integration_controller'].get_all().keys()\n connection = doc.get('connection')\n if connection is None:\n if 'default_mongodb' in integrations:\n connection = 'default_mongodb'\n else:\n for integration in integrations:\n if integration.startswith('mongodb_'):\n connection = integration\n break\n\n if connection is None:\n raise Exception(\"Can't find connection for data source\")\n\n predict = doc['predict']\n if not isinstance(predict, list):\n predict = [x.strip() for x in predict.split(',')]\n\n create_predictor_ast = CreatePredictor(\n name=doc['name'],\n integration_name=connection,\n query_str=select_data_query,\n targets=predict,\n # TODO add TS settings!!!\n )\n\n lw_handler = mindsdb_env['integration_controller'].get_handler('lightwood')\n result = lw_handler.query(create_predictor_ast)\n if result.type == RESPONSE_TYPE.ERROR:\n raise Exception(result.error_message)\n\n result = {\n \"n\": len(query['documents']),\n \"ok\": 1\n }\n else:\n raise Exception(\"Only insert to 'predictors' or 'databases' allowed\")\n\n return result\n\n\nresponder = Responce()\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1917, "n_words": 351, "vocab_size": 190, "complexity": 33, "nloc": 95, "token_counts": 595, "n_ast_nodes": 1085, "n_identifiers": 59, "random_cut": "def _result(self, query, request_env, mindsdb_env):\n table = query['insert']\n\n if table == 'databases':\n for doc in query['documents']:\n if '_id' in doc:\n del doc['_id']\n for field in ('name', 'engine', 'connection_args'):\n if field not in doc:\n raise Exception(f\"'{field}' must be specified\")\n\n status = HandlerStatusResponse(success=False)\n try:\n handler = mindsdb_env['integration_controller'].create_handler(\n handler_type=doc['engine'],\n connection_data=doc['connection_args']\n )\n status = handler.check_connection()\n except Exception as e:\n status.error_message = str(e)\n\n if status.success is False:\n raise Exception(f\"Can't connect to db: {status.error_message}\")\n\n integration = mindsdb_env['integratio" }, { "id": 215811, "commit_id": "a35b29b2651bf33c5d5b45e64bc7765ffde4aff4", "repo": "salt", "path": "tests/pytests/functional/modules/file/test_symlink.py", "file_name": "test_symlink.py", "fun_name": "test_symlink_target_relative_path", "commit_message": "Add some funtional tests\n\nAdd functional tests for the following:\n- file.readlink\n- file.replace\n- file.symlink\n\nRemove unit tests for file.replace as they are duplicated in the added\nfunctional test", "code": "def test_symlink_target_relative_path(file, source):\n \n target = \"..{}symlink.lnk\".format(os.path.sep)\n with pytest.raises(SaltInvocationError) as exc:\n file.symlink(source, target)\n assert \"Link path must be absolute\" in exc.value.message\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 39, "n_words": 20, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 46, "n_ast_nodes": 81, "n_identifiers": 15, "random_cut": "def test_symlink_target_relative_path(file, source):\n \n target = \"..{}symlink.lnk\".format(os.path.sep)\n with pytest.raises(SaltInvocationError) as exc:\n " }, { "id": 251737, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/addons/test_serverplayback.py", "file_name": "test_serverplayback.py", "fun_name": "test_server_playback_kill", "commit_message": "make it black!", "code": "async def test_server_playback_kill():\n s = serverplayback.ServerPlayback()\n with taddons.context(s) as tctx:\n tctx.configure(s, server_replay_refresh=True, server_replay_kill_extra=True)\n\n f = tflow.tflow()\n f.response = mitmproxy.test.tutils.tresp(content=f.request.content)\n s.load_flows([f])\n\n f = tflow.tflow()\n f.request.host = \"nonexistent\"\n await tctx.cycle(s, f)\n assert f.error\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 92, "n_words": 31, "vocab_size": 25, "complexity": 1, "nloc": 11, "token_counts": 97, "n_ast_nodes": 161, "n_identifiers": 23, "random_cut": "async def test_server_playback_kill():\n s = serverplayback.ServerPlayback()\n with taddons.context(s) as tctx:\n tctx.configure(s, server_replay_refresh=True, server_replay_kill_extra=True)\n\n f = tflow.tflow()\n f.response = mitmproxy.test.tutils.tresp(content=f.request.content)\n s.load_flows([f])\n\n " }, { "id": 130394, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/aws/config.py", "file_name": "config.py", "fun_name": "bootstrap_aws", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def bootstrap_aws(config):\n # create a copy of the input config to modify\n config = copy.deepcopy(config)\n\n # Log warnings if user included deprecated `head_node` or `worker_nodes`\n # fields. Raise error if no `available_node_types`\n check_legacy_fields(config)\n # Used internally to store head IAM role.\n config[\"head_node\"] = {}\n\n # If a LaunchTemplate is provided, extract the necessary fields for the\n # config stages below.\n config = _configure_from_launch_template(config)\n\n # If NetworkInterfaces are provided, extract the necessary fields for the\n # config stages below.\n config = _configure_from_network_interfaces(config)\n\n # The head node needs to have an IAM role that allows it to create further\n # EC2 instances.\n config = _configure_iam_role(config)\n\n # Configure SSH access, using an existing key pair if possible.\n config = _configure_key_pair(config)\n global_event_system.execute_callback(\n CreateClusterEvent.ssh_keypair_downloaded,\n {\"ssh_key_path\": config[\"auth\"][\"ssh_private_key\"]},\n )\n\n # Pick a reasonable subnet if not specified by the user.\n config = _configure_subnet(config)\n\n # Cluster workers should be in a security group that permits traffic within\n # the group, and also SSH access from outside.\n config = _configure_security_group(config)\n\n # Provide a helpful message for missing AMI.\n _check_ami(config)\n\n return config\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 269, "n_words": 172, "vocab_size": 110, "complexity": 1, "nloc": 16, "token_counts": 87, "n_ast_nodes": 163, "n_identifiers": 16, "random_cut": "def bootstrap_aws(config):\n # create a copy of the input config to modify\n config = copy.deepcopy(config)\n\n # Log warnings if user included deprecated `head_node` or `worker_nodes`\n # fields. Raise error if no `available_node_types`\n check_legacy_fields(config)\n # Used internally to store head IAM role.\n config[\"head_node\"] = {}\n\n # If a LaunchTemplate is provided, extract the necessary fields for the\n # config stages below.\n config = _configure_from_launch_template(config)\n\n # If NetworkInterfaces are provided, extract the necessary fields for the\n # config stages below.\n config = _configure_from_network_interfaces(config)\n\n # The head node needs to have an IAM role that allows it to create further\n # EC2 instances.\n config = _configure_iam_role(config)\n\n # Configure SSH access, using an existing key pair if possible.\n config = _configure_key_pair(config)\n global_event_system.execute_callback(\n CreateClusterEvent.ssh_keypair_downloaded,\n {\"ssh_key_path\": config[\"auth\"][\"ssh_private_key\"]},\n )\n\n # Pick a reasonable subnet if not specified by the user.\n config = _configure_subnet(config)\n\n # Cluster workers should be in a security group that permits traffic within\n # the group, and also SSH access from outside.\n c" }, { "id": 261583, "commit_id": "d8fa96c29828e3ca79ddd5d7466521ac4d95213c", "repo": "scikit-learn", "path": "sklearn/impute/tests/test_common.py", "file_name": "test_common.py", "fun_name": "sparse_imputers", "commit_message": "ENH keep features with all missing values during imputation (#24770)\n\nCo-authored-by: Chiara Marmo \r\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>\r\nCo-authored-by: Vitor SRG \r\nFixes https://github.com/scikit-learn/scikit-learn/pull/16695\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16426\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16977", "code": "def sparse_imputers():\n return [SimpleImputer()]\n\n\n# ConvergenceWarning will be raised by the IterativeImputer\n@pytest.mark.filterwarnings(\"ignore::sklearn.exceptions.ConvergenceWarning\")\n@pytest.mark.parametrize(\"imputer\", imputers(), ids=lambda x: x.__class__.__name__)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.filterwarnings(\"ignore::sklearn.exceptions.ConvergenceWarning\")\n@pytest.mark.parametrize(\"imputer\", imputers(), ids=lambda x: x.__class__.__name__)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 17, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 74, "n_identifiers": 11, "random_cut": "def sparse_imputers():\n return [SimpleImputer()]\n\n\n# ConvergenceWarning will be raised by the IterativeImputer\n@pytest.mark.filterwarnings(\"ignore::sklearn.exceptions.ConvergenceWarning\")\n" }, { "id": 154434, "commit_id": "808b2dd3487bba0dfb7d90fe9b6cea4c5b1b1d52", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/omnisci_on_native/interchange/dataframe_protocol/column.py", "file_name": "column.py", "fun_name": "get_buffers", "commit_message": "FIX-#3983: FIX-#4107: Materialize 'rowid' columns when selecting rows by position (#4834)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def get_buffers(self) -> Dict[str, Any]:\n self._materialize_actual_buffers()\n at = self._pyarrow_table\n # Get the last column since the first one could be the index\n pyarrow_array = at.column(-1).chunks[0]\n\n result = dict()\n result[\"data\"] = self._get_data_buffer(pyarrow_array)\n result[\"validity\"] = self._get_validity_buffer(pyarrow_array)\n result[\"offsets\"] = self._get_offsets_buffer(pyarrow_array)\n\n return result\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 101, "n_words": 39, "vocab_size": 31, "complexity": 1, "nloc": 9, "token_counts": 76, "n_ast_nodes": 127, "n_identifiers": 16, "random_cut": "def get_buffers(self) -> Dict[str, Any]:\n self._materialize_actual_buffers()\n at = self._pyarrow_table\n # Get the last column since the first one could be the index\n pyarrow_array = at.column(-1).chunks[0]\n\n result = dict()\n " }, { "id": 202527, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/custom_pk/tests.py", "file_name": "tests.py", "fun_name": "test_zero_non_autoincrement_pk", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_zero_non_autoincrement_pk(self):\n Employee.objects.create(employee_code=0, first_name=\"Frank\", last_name=\"Jones\")\n employee = Employee.objects.get(pk=0)\n self.assertEqual(employee.employee_code, 0)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 45, "n_ast_nodes": 73, "n_identifiers": 12, "random_cut": "def test_zero_non_autoincrement_pk(self):\n Employee.objects.cre" }, { "id": 149645, "commit_id": "810e190e164be61a530e4d550c246dddbcdb4f1c", "repo": "freqtrade", "path": "tests/strategy/test_default_strategy.py", "file_name": "test_default_strategy.py", "fun_name": "test_strategy_test_v3", "commit_message": "added tests for bot_start", "code": "def test_strategy_test_v3(result, fee, is_short, side):\n strategy = StrategyTestV3({})\n\n metadata = {'pair': 'ETH/BTC'}\n assert type(strategy.minimal_roi) is dict\n assert type(strategy.stoploss) is float\n assert type(strategy.timeframe) is str\n indicators = strategy.populate_indicators(result, metadata)\n assert type(indicators) is DataFrame\n assert type(strategy.populate_buy_trend(indicators, metadata)) is DataFrame\n assert type(strategy.populate_sell_trend(indicators, metadata)) is DataFrame\n assert strategy.bot_started is True\n\n trade = Trade(\n open_rate=19_000,\n amount=0.1,\n pair='ETH/BTC',\n fee_open=fee.return_value,\n is_short=is_short\n )\n\n assert strategy.confirm_trade_entry(pair='ETH/BTC', order_type='limit', amount=0.1,\n rate=20000, time_in_force='gtc',\n current_time=datetime.utcnow(),\n side=side, entry_tag=None) is True\n assert strategy.confirm_trade_exit(pair='ETH/BTC', trade=trade, order_type='limit', amount=0.1,\n rate=20000, time_in_force='gtc', exit_reason='roi',\n sell_reason='roi',\n current_time=datetime.utcnow(),\n side=side) is True\n\n assert strategy.custom_stoploss(pair='ETH/BTC', trade=trade, current_time=datetime.now(),\n current_rate=20_000, current_profit=0.05) == strategy.stoploss\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 470, "n_words": 87, "vocab_size": 55, "complexity": 1, "nloc": 29, "token_counts": 264, "n_ast_nodes": 392, "n_identifiers": 43, "random_cut": "def test_strategy_test_v3(result, fee, is_short, side):\n strategy = StrategyTestV3({})\n\n metadata = {'pair': 'ETH/BTC'}\n assert type(strategy.minimal_roi) is dict\n assert type(strategy.stoploss) is float\n assert type(strategy.timeframe) is str\n indicators = strategy.populate_indicators(result, metadata)\n assert type(indicators) is DataFrame\n assert type(strategy.populate_buy_trend(indicators, metadata)) is DataFrame\n assert type(strategy.populate_sell_trend(indicators, metadata)) is DataFrame\n assert strategy.bot_started is True\n\n trade = Trade(\n open_rate=19_000,\n amount=0.1,\n pair='ETH/BTC',\n fee_open=fee.return_value,\n is_short=is_short\n )\n\n assert strategy.confirm_trade_entry(pair='ETH/BTC', order_type='limit', amount=0.1,\n rate=20000, time_in_force='gtc',\n current_time=datetime.utcnow(),\n side=side, entry_tag=None) is True\n assert strategy.confirm_trade_exit(pair='ETH/BTC', trade=trade, order_type='limit', amount=0.1,\n rate=20000, time_in_force='gtc', exit_reason='roi',\n sell_reason='roi',\n current_time=datetime.utcnow(),\n side=side) is True\n\n assert strategy.custom_stoploss(pair='ETH/BTC', trade=trade, current_time=datetime.now(),\n current_rate=20_000, current_profi" }, { "id": 6940, "commit_id": "1b746109a24dd8d7dfa076c874821d8b9d3e440d", "repo": "ludwig", "path": "ludwig/utils/data_utils.py", "file_name": "data_utils.py", "fun_name": "read_stata", "commit_message": "Use the Backend to check for dask dataframes, instead of a hard check. (#2113)", "code": "def read_stata(data_fp, df_lib):\n # https://github.com/dask/dask/issues/9055\n if is_dask_lib(df_lib):\n logger.warning(\"Falling back to pd.read_stata() since dask backend does not support it\")\n return dd.from_pandas(pd.read_stata(data_fp), npartitions=1)\n return df_lib.read_stata(data_fp)\n\n\n@spread", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@spread", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 45, "n_words": 24, "vocab_size": 23, "complexity": 2, "nloc": 5, "token_counts": 42, "n_ast_nodes": 73, "n_identifiers": 11, "random_cut": "def read_stata(data_fp, df_lib):\n # https://github.com/dask/dask/issues/9055\n if is_dask_lib(df_lib):\n logger.warning(\"Falling back to pd.read_stata() since " }, { "id": 8015, "commit_id": "e7c99518c8b19303db8409dad51ffa24c8075308", "repo": "ludwig", "path": "tests/ludwig/decoders/test_sequence_decoder.py", "file_name": "test_sequence_decoder.py", "fun_name": "test_sequence_generator_decoder", "commit_message": "Test for parameter updates in Ludwig Decoders - Part 4 (#2354)\n\n* feat: reintroduce functions to test parameter updates\r\n\r\n* feat: reintroduce functions to test parameter updates\r\n\r\n* WIP: working example with fully connected test\r\n\r\n* WIP: more fleshed out example of fully connected test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* refactor: adjust parameter settings to pass parameter update test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* refactor: looping construct in parameter update test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: add parameter update test to tabnet module.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: incorporate reviewer comment\r\n\r\nparameterize threshold for parameter updates depending\r\non configuration of ludwig component\r\n\r\n* test: rename from LudwigModel to LudwigModule\r\n\r\nincorporate reviewer comment\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* doc: update comments\r\n\r\n* feat: function to report back updated parameters\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* refactor: parameter update checker function\r\n\r\nincorporate review comments and add support for\r\nfrozen parameters\r\n\r\n* refactor: test for parameter updates to use parameter checker function\r\n\r\nincorporate reviewer comments, remove from simple components\r\nfocus test on more complex components.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* code: clean up unused variables\r\n\r\n* doc: clean up comments\r\n\r\n* doc: code comment\r\n\r\n* test: add parameter update test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: add parameter update test to resnet\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* refactor: handle frozen parameters consistently\r\n\r\n* test: add parameter update test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: add parameter update test to set encoder\r\n\r\n* test: add parameter update test to sequence encoder\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: add parameter update test to categorical encoder\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* code: fix strings\r\n\r\n* code: remove debugging code\r\n\r\n* test: add parameter update test to image encoders\r\n\r\n* test: incorporate reviewr commnent\r\n\r\nseparate out parameter update checking function into\r\nseperate module.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: incorporate reviewer comment\r\n\r\ncode cleanup\r\n\r\n* code: remove unneeded import\r\n\r\n* refactor: update for module refactoring\r\n\r\n* test: add parameter update check\r\n\r\n* test: add parameter update check\r\n\r\n* test: add parameter update check\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* refactor: add support for combiner output testing\r\n\r\n* WIP: add parameter update test\r\n\r\nfix incorrect comparision in tabtransformer test\r\n\r\n* fix: error in retrieving input feature type\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* WIP: fine tuning parameter update test\r\n\r\n* WIP: disale parameter update check in tabtransformer combiner\r\n\r\n* test: add parameter update test to attention reducer\r\n\r\n* test: add parameter update test to multi-head self attention\r\n\r\n* test: add parameter update test for TransformerBlock and TransformerStack\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: working parameter update test for TabTransformerCombiner\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix: error in retrieving tensor to compute loss\r\n\r\n* doc: update comments\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* doc: document magic numbers in TabTransformer parameter update test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* doc: clarified comments\r\n\r\n* doc: document reason for bypassing parameter update testing\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* doc: explain rationale for adjusting trainable parameter count in assertion tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: add parameter update checking to unit tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: add parameter update checking to unit tests\r\n\r\n* refactor: simplify code\r\n\r\n* refactor: split test_tabtransformer_combiner by input feature types\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: add test case for single token sequence for self attention\r\n\r\n* refactor: add adjustment for single categorical input feature and document rationale\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* test: remove single category input feature test case\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def test_sequence_generator_decoder(cell_type, num_layers, batch_size):\n hidden_size = 256\n vocab_size = 50\n max_sequence_length = 10\n\n # make repeatable\n set_random_seed(RANDOM_SEED)\n\n combiner_outputs = {HIDDEN: torch.rand([batch_size, hidden_size])}\n sequence_rnn_decoder = SequenceGeneratorDecoder(\n input_size=hidden_size,\n vocab_size=vocab_size,\n max_sequence_length=max_sequence_length,\n cell_type=cell_type,\n num_layers=num_layers,\n )\n\n output = sequence_rnn_decoder(combiner_outputs, target=None)\n\n assert list(output[LOGITS].size()) == [batch_size, max_sequence_length, vocab_size]\n\n # check for parameter updating\n target = torch.randn(output[LOGITS].shape)\n fpc, tpc, upc, not_updated = check_module_parameters_updated(sequence_rnn_decoder, (combiner_outputs, None), target)\n assert upc == tpc, f\"Failed to update parameters. Parameters not update: {not_updated}\"\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 147, "n_words": 70, "vocab_size": 59, "complexity": 1, "nloc": 18, "token_counts": 133, "n_ast_nodes": 199, "n_identifiers": 28, "random_cut": "def test_sequence_generator_decoder(cell_type, num_layers, batch_size):\n hidden_size = 256\n vocab_size = 50\n max_sequence_length = 10\n\n # make repeatable\n set_random_seed(RANDOM_SEED)\n\n combiner_outputs = {HIDDEN: torch.rand([batch_size, hidden_size])}\n sequence_rnn_decoder = SequenceGeneratorDecoder(\n input_size=hidden_size,\n vocab_size=vocab_size,\n max_sequence_length=max_sequence_length,\n cell_type=cell_type,\n num_layers=num_layers,\n )\n\n output = sequence_rnn_decoder(combiner_outputs, target=None)\n\n assert list(output[LOGITS].size()) ==" }, { "id": 241810, "commit_id": "a1546047bc146bf3189fa905c3415475b0e47931", "repo": "scipy", "path": "scipy/stats/_stats_py.py", "file_name": "_stats_py.py", "fun_name": "hmean", "commit_message": "ENH: stats: add weights in harmonic mean (#15347)\n\nCo-authored-by: Pamphile Roy ", "code": "def hmean(a, axis=0, dtype=None, *, weights=None):\n \n if not isinstance(a, np.ndarray):\n a = np.array(a, dtype=dtype)\n elif dtype:\n # Must change the default dtype allowing array type\n if isinstance(a, np.ma.MaskedArray):\n a = np.ma.asarray(a, dtype=dtype)\n else:\n a = np.asarray(a, dtype=dtype)\n\n if np.all(a >= 0):\n # Harmonic mean only defined if greater than or equal to zero.\n if weights is not None:\n weights = np.asanyarray(weights, dtype=dtype)\n\n with np.errstate(divide='ignore'):\n return 1.0 / np.average(1.0 / a, axis=axis, weights=weights)\n else:\n raise ValueError(\"Harmonic mean only defined if all elements greater \"\n \"than or equal to zero\")\n\n\nModeResult = namedtuple('ModeResult', ('mode', 'count'))\n\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 231, "n_words": 93, "vocab_size": 66, "complexity": 6, "nloc": 16, "token_counts": 155, "n_ast_nodes": 265, "n_identifiers": 20, "random_cut": "def hmean(a, axis=0, dtype=None, *, weights=None):\n \n if not isinstance(a, np.ndarray):\n a = np.array(a, dtype=dtype)\n elif d" }, { "id": 33594, "commit_id": "c126a239bcea9c68453cf86045a5177afbe2be6c", "repo": "transformers", "path": "tests/models/longformer/test_modeling_tf_longformer.py", "file_name": "test_modeling_tf_longformer.py", "fun_name": "test_chunk", "commit_message": "Fix tflongformer int dtype (#18907)\n\n* Use int64 throughout TFLongFormer\r\n\r\n* make style\r\n\r\n* Do some more fixed casting in TFLongFormer\r\n\r\n* Fix some wonky \"is None\" conditionals\r\n\r\n* Cast all the dtypes, salt the earth\r\n\r\n* Fix copies to TFLED as well and do some casting there\r\n\r\n* dtype fix in TFLongformer test\r\n\r\n* Make fixup\r\n\r\n* Expand tolerances on the LED tests too (I think this is a TF32 thing)\r\n\r\n* Expand test tolerances for LED a tiny bit (probably a Tensorfloat thing again)", "code": "def test_chunk(self):\n hidden_states = self._get_hidden_states()\n batch_size = 1\n seq_length = 8\n hidden_size = 4\n hidden_states = tf.reshape(hidden_states, (batch_size, seq_length, hidden_size))\n\n chunked_hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2)\n\n # expected slices across chunk and seq length dim\n expected_slice_along_seq_length = tf.convert_to_tensor([0.4983, -0.7584, -1.6944], dtype=tf.float32)\n expected_slice_along_chunk = tf.convert_to_tensor([0.4983, -1.8348, -0.7584, 2.0514], dtype=tf.float32)\n\n self.assertTrue(shape_list(chunked_hidden_states) == [1, 3, 4, 4])\n tf.debugging.assert_near(\n chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, rtol=1e-3, atol=1e-4\n )\n tf.debugging.assert_near(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, rtol=1e-3, atol=1e-4)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 169, "n_words": 68, "vocab_size": 53, "complexity": 1, "nloc": 14, "token_counts": 190, "n_ast_nodes": 242, "n_identifiers": 24, "random_cut": "def test_chunk(self):\n hidden_states = self._get_hidden_states()\n batch_size = 1\n seq_length = 8\n hidden_size = 4\n hidden_states = tf.reshape(hidden_states, (batch_size, seq_length, hidden_size))\n\n chunked_hidden_states = TFLongformerSelfA" }, { "id": 319200, "commit_id": "7106c68032077f9b98a1295505139d1bd13614b7", "repo": "paperless-ngx", "path": "src/documents/matching.py", "file_name": "matching.py", "fun_name": "matches", "commit_message": "FIX BUG: case-sensitive matching was not possible", "code": "def matches(matching_model, document):\n search_kwargs = {}\n\n # Check that match is not empty\n if matching_model.match.strip() == \"\":\n return False\n\n if matching_model.is_insensitive:\n search_kwargs = {\"flags\": re.IGNORECASE}\n document_content = document.content.lower()\n else:\n document_content = document.content\n\n if matching_model.matching_algorithm == MatchingModel.MATCH_ALL:\n for word in _split_match(matching_model):\n search_result = re.search(rf\"\\b{word}\\b\", document_content, **search_kwargs)\n if not search_result:\n return False\n log_reason(\n matching_model,\n document,\n f\"it contains all of these words: {matching_model.match}\",\n )\n return True\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_ANY:\n for word in _split_match(matching_model):\n if re.search(rf\"\\b{word}\\b\", document_content, **search_kwargs):\n log_reason(matching_model, document, f\"it contains this word: {word}\")\n return True\n return False\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_LITERAL:\n result = bool(\n re.search(\n rf\"\\b{re.escape(matching_model.match)}\\b\",\n document_content,\n **search_kwargs,\n ),\n )\n if result:\n log_reason(\n matching_model,\n document,\n f'it contains this string: \"{matching_model.match}\"',\n )\n return result\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_REGEX:\n try:\n match = re.search(\n re.compile(matching_model.match, **search_kwargs),\n document_content,\n )\n except re.error:\n logger.error(\n f\"Error while processing regular expression \" f\"{matching_model.match}\",\n )\n return False\n if match:\n log_reason(\n matching_model,\n document,\n f\"the string {match.group()} matches the regular expression \"\n f\"{matching_model.match}\",\n )\n return bool(match)\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_FUZZY:\n from fuzzywuzzy import fuzz\n\n match = re.sub(r\"[^\\w\\s]\", \"\", matching_model.match)\n text = re.sub(r\"[^\\w\\s]\", \"\", document_content)\n if matching_model.is_insensitive:\n match = match.lower()\n text = text.lower()\n if fuzz.partial_ratio(match, text) >= 90:\n # TODO: make this better\n log_reason(\n matching_model,\n document,\n f\"parts of the document content somehow match the string \"\n f\"{matching_model.match}\",\n )\n return True\n else:\n return False\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_AUTO:\n # this is done elsewhere.\n return False\n\n else:\n raise NotImplementedError(\"Unsupported matching algorithm\")\n\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1014, "n_words": 226, "vocab_size": 118, "complexity": 18, "nloc": 81, "token_counts": 367, "n_ast_nodes": 655, "n_identifiers": 38, "random_cut": "def matches(matching_model, document):\n search_kwargs = {}\n\n # Check that match is not empty\n if matching_model.match.strip() == \"\":\n return False\n\n if matching_model.is_insensitive:\n search_kwargs = {\"flags\": re.IGNORECASE}\n document_content = document.content.lower()\n else:\n document_content = document.content\n\n if matching_model.matching_algorithm == MatchingModel.MATCH_ALL:\n for word in _split_match(matching_model):\n search_result = re.search(rf\"\\b{word}\\b\", document_content, **search_kwargs)\n if not search_result:\n return False\n log_reason(\n matching_model,\n document,\n f\"it contains all of these words: {matching_model.match}\",\n )\n return True\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_ANY:\n for word in _split_match(matching_model):\n if re.search(rf\"\\b{word}\\b\", document_content, **search_kwargs):\n log_reason(matching_model, document, f\"it contains this word: {word}\")\n return True\n return False\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_LITERAL:\n result = bool(\n re.search(\n rf\"\\b{re.escape(matching_model.match)}\\b\",\n document_content,\n **search_kwargs,\n ),\n )\n if result:\n log_reason(\n matching_model,\n document,\n f'it contains this string: \"{matching_model.match}\"',\n )\n return result\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_REGEX:\n try:\n match = re.search(\n re.compile(matching_model.match, **search_kwargs),\n document_content,\n )\n except re.error:\n logger.error(\n f\"Error while processing regular expression \" f\"{matching_model.match}\",\n )\n return False\n if match:\n log_reason(\n matching_model,\n document,\n f\"the string {match.group()} matches the regular expression \"\n f\"{matching_model.match}\",\n )\n return bool(match)\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_FUZZY:\n from fuzzywuzzy import fuzz\n\n match = re.sub(r\"" }, { "id": 8115, "commit_id": "e4939d23146d1d144f993cea8c59f5f694edcd80", "repo": "ludwig", "path": "ludwig/schema/trainer.py", "file_name": "trainer.py", "fun_name": "get_model_type_jsonschema", "commit_message": "fixes (#2516)", "code": "def get_model_type_jsonschema():\n return {\n \"type\": \"string\",\n \"enum\": [MODEL_ECD, MODEL_GBM],\n \"default\": MODEL_ECD,\n \"title\": \"type\",\n \"description\": \"Select the model type.\",\n }\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 59, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 31, "n_ast_nodes": 59, "n_identifiers": 3, "random_cut": "def get_model_type_jsonschema():\n return {\n \"type\": \"string\",\n \"enum\": [MODEL_ECD, MODEL_GBM],\n \"default\": MODEL_ECD,\n \"title\": \"type\",\n \"descripti" }, { "id": 283878, "commit_id": "826cd8a723d8e2b810c51bf8266c09e8e55059c4", "repo": "OpenBBTerminal", "path": "tests/bots/stocks/options/test_vol.py", "file_name": "test_vol.py", "fun_name": "test_vol_command_invalid", "commit_message": "Add tests for bots/stocks (#1616)\n\n* Added test_quote\r\n\r\n* Added dps tests\r\n\r\n* Added more tests\r\n\r\n* Added government tests\r\n\r\n* Added insider tests\r\n\r\n* Added options tests\r\n\r\n* Added sia tests\r\n\r\n* Added ta tests\r\n\r\n* Readd coverage rc\r\n\r\n* Added test", "code": "def test_vol_command_invalid(ticker):\n with pytest.raises(Exception):\n vol_command(ticker, \"2022-04-08\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 15, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 6, "random_cut": "def test_vol_command_invalid(ticker):\n with pytest.raises(Exception" }, { "id": 165304, "commit_id": "6caefb19f4d7c05451fafca182c6eb39fe9901ed", "repo": "pandas", "path": "pandas/tests/window/test_base_indexer.py", "file_name": "test_base_indexer.py", "fun_name": "test_rolling_forward_skewness", "commit_message": "ENH: Rolling window with step size (GH-15354) (#45765)", "code": "def test_rolling_forward_skewness(constructor, step):\n values = np.arange(10.0)\n values[5] = 100.0\n\n indexer = FixedForwardWindowIndexer(window_size=5)\n rolling = constructor(values).rolling(window=indexer, min_periods=3, step=step)\n result = rolling.skew()\n\n expected = constructor(\n [\n 0.0,\n 2.232396,\n 2.229508,\n 2.228340,\n 2.229091,\n 2.231989,\n 0.0,\n 0.0,\n np.nan,\n np.nan,\n ]\n )[::step]\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n \"func,expected\",\n [\n (\"cov\", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]),\n (\n \"corr\",\n [\n 1.0,\n 1.0,\n 1.0,\n 0.8704775290207161,\n 0.018229084250926637,\n -0.861357304646493,\n 1.0,\n 1.0,\n np.nan,\n np.nan,\n ],\n ),\n ],\n)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"func,expected\",\n [\n (\"cov\", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]),\n (\n \"corr\",\n [\n 1.0,\n 1.0,\n 1.0,\n 0.8704775290207161,\n 0.018229084250926637,\n -0.861357304646493,\n 1.0,\n 1.0,\n np.nan,\n np.nan,\n ],\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 427, "n_words": 69, "vocab_size": 47, "complexity": 1, "nloc": 21, "token_counts": 120, "n_ast_nodes": 246, "n_identifiers": 21, "random_cut": "def test_rolling_forward_skewness(constructor, step):\n values = np.arange(10.0)\n values[5] = 100.0\n\n indexer = FixedForwardWindowIndexer(window_size=5)\n rolling = constructor(values).rolling(window=indexer, min_periods=3, step=step)\n result = rolling.skew()\n\n expected = constructor(\n [\n 0.0,\n 2.232396,\n 2.229508,\n 2.228340,\n 2.229091,\n 2.231989,\n 0.0,\n 0.0,\n np.nan,\n np.nan,\n ]\n )[::step]\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n \"func,expected\",\n [\n (\"cov\", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]),\n (\n \"corr\",\n [\n 1.0,\n 1.0,\n 1.0,\n 0.8704775290207161,\n 0.018229084250926637,\n -0.861357304646493,\n 1.0,\n " }, { "id": 113791, "commit_id": "2e3a777062fa2f1c224c9e954374e699e6d66838", "repo": "nni", "path": "test/ut/sdk/mutable/test_mutable.py", "file_name": "test_mutable.py", "fun_name": "test_dedup", "commit_message": "Mutable V3 (Stage 7) - Test and Docs (#5200)", "code": "def test_dedup():\n a = Categorical([1, 2, 3], label='a')\n b = Categorical([1, 2, 3], label='a')\n assert a.equals(b)\n\n assert len(_dedup_labeled_mutables([a, b])) == 1\n\n b = Categorical([1, 2, 3, 4], label='a')\n with pytest.raises(ValueError, match='are different'):\n _dedup_labeled_mutables([a, b])\n\n b = MyCategorical([1, 2, 3], label='a')\n with pytest.raises(ValueError, match='are different'):\n _dedup_labeled_mutables([a, b])\n\n a = Numerical(0, 1, log_distributed=True, label='a')\n b = Numerical(0, 1, log_distributed=True, label='a')\n\n assert len(_dedup_labeled_mutables([a, b])) == 1\n assert not a.equals(Numerical(0, 1, log_distributed=False, label='a'))\n assert not a.equals(Numerical(mu=0, sigma=1, label='a'))\n\n a = Numerical(0, 1, label='a', default=0.5)\n b = Numerical(0, 1, label='a', default=0.3)\n assert not a.equals(b)\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 150, "n_words": 89, "vocab_size": 36, "complexity": 1, "nloc": 19, "token_counts": 263, "n_ast_nodes": 401, "n_identifiers": 18, "random_cut": "def test_dedup():\n a = Categorical([1, 2, 3], label='a')\n b = Categorical([1, 2, 3], label='a')\n assert a.equals(b)\n\n assert len(_dedup_labeled_mutables([a, b])) == 1\n\n b = Categorical([1, 2, 3, 4], label='a')\n with pytest.raises(ValueError, match='are different'):\n _dedup_labeled_mutables([a, b])\n\n b = MyCategorical([1, 2, 3], label='a')\n with pytest.raises(ValueError, match='are different'):\n _dedup_labeled_mutables([a, b])\n\n a = Numerical(0, 1, log_distributed=True, label='a')\n b = Numerical(0, 1, log_distributed=True, label='a')\n\n assert len(_dedup_labeled_mutables([a, b])) == 1\n assert not a.equals(Numerical(0" }, { "id": 70985, "commit_id": "de3fcba9e95818e9634ab7de6bfcb1f4221f2775", "repo": "wagtail", "path": "wagtail/contrib/frontend_cache/tests.py", "file_name": "tests.py", "fun_name": "test_azure_front_door_get_client", "commit_message": "Fix warnings from flake8-comprehensions.", "code": "def test_azure_front_door_get_client(self):\n mock_credentials = mock.MagicMock()\n backends = get_backends(backend_settings={\n 'azure_front_door': {\n 'BACKEND': 'wagtail.contrib.frontend_cache.backends.AzureFrontDoorBackend',\n 'RESOURCE_GROUP_NAME': 'test-resource-group',\n 'FRONT_DOOR_NAME': 'wagtail-io-fake-front-door-name',\n 'SUBSCRIPTION_ID': 'fake-subscription-id',\n 'CREDENTIALS': mock_credentials,\n },\n })\n client = backends['azure_front_door']._get_client()\n self.assertEqual(set(backends.keys()), {'azure_front_door'})\n self.assertIsInstance(client, FrontDoorManagementClient)\n self.assertEqual(client.config.subscription_id, 'fake-subscription-id')\n self.assertIs(client.config.credentials, mock_credentials)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 185, "n_words": 33, "vocab_size": 31, "complexity": 1, "nloc": 16, "token_counts": 105, "n_ast_nodes": 185, "n_identifiers": 19, "random_cut": "def test_azure_front_door_get_client(self):\n mock_credentials = mock.MagicMock()\n backends = get_backends(backend_settings={\n 'azure_front_door': {\n 'BACKEND': 'wagtail.contrib.frontend_cache.backends.AzureFrontDoorBackend',\n 'RESOURCE_GROUP_NAME': 'test-resource-group',\n 'FRONT_DOOR_NAME': 'wagtail-io-fake-front-door-name',\n 'SUBSCRIPTION_ID': 'fake-subscription-id',\n 'CREDENTIALS': mock_credentials,\n },\n })\n client = backends['azure_front_door']._get_client()\n self.assertEqual(set(backends.keys()), {'azure_front_door'})\n self.assertIsInstance(client, FrontDoorManagementClient)\n self.assertEqual(client.config.subscription_id, 'fake-subscription-id')\n self.assertIs(client.config.credentials, mock_credentials)\n" }, { "id": 334237, "commit_id": "e779b250e1c253e7db7f379744a76d1f66fe63c8", "repo": "diffusers", "path": "src/diffusers/models/unet/modeling_unet.py", "file_name": "modeling_unet.py", "fun_name": "forward", "commit_message": "add first template for DDPM forward", "code": "def forward(self, x, time_emb=None):\n\n scale_shift = None\n if exists(self.mlp) and exists(time_emb):\n time_emb = self.mlp(time_emb)\n time_emb = rearrange(time_emb, \"b c -> b c 1 1\")\n scale_shift = time_emb.chunk(2, dim=1)\n\n h = self.block1(x, scale_shift=scale_shift)\n\n h = self.block2(h)\n return h + self.res_conv(x)\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 106, "n_words": 39, "vocab_size": 29, "complexity": 3, "nloc": 9, "token_counts": 84, "n_ast_nodes": 132, "n_identifiers": 14, "random_cut": "def forward(self, x, time_emb=None):\n\n scale_shift = None\n " }, { "id": 157196, "commit_id": "f309f9ff4e525c8ec632f42e81b1f8abe566bc1f", "repo": "dask", "path": "dask/dataframe/groupby.py", "file_name": "groupby.py", "fun_name": "_mul_cols", "commit_message": "Fix flaky `test_dataframe_aggregations_multilevel` (#9701)", "code": "def _mul_cols(df, cols):\n \n _df = df.__class__()\n for i, j in it.combinations_with_replacement(cols, 2):\n col = f\"{i}{j}\"\n _df[col] = df[i] * df[j]\n\n # Fix index in a groupby().apply() context\n # https://github.com/dask/dask/issues/8137\n # https://github.com/pandas-dev/pandas/issues/43568\n # Make sure index dtype is int (even if _df is empty)\n # https://github.com/dask/dask/pull/9701\n _df.index = np.zeros(len(_df), dtype=int)\n return _df\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 95, "n_words": 51, "vocab_size": 39, "complexity": 2, "nloc": 7, "token_counts": 66, "n_ast_nodes": 116, "n_identifiers": 16, "random_cut": "def _mul_cols(df, cols):\n \n _df = df.__class__()\n for i, j in it.combinations_with_replacement(cols, 2):\n col = f\"{i}{j}\"\n _df[col] = df[i] * df[j]\n\n # Fix index in a groupby().apply() context\n # https://github.com/dask/dask/issues/8137\n # https://github.com/pandas-dev/pandas/is" }, { "id": 160684, "commit_id": "84fd4a5a0e064d1f6be7bc0c96663690faa0353b", "repo": "numpy", "path": "numpy/ma/tests/test_core.py", "file_name": "test_core.py", "fun_name": "test_where", "commit_message": "TST: Fixup tests that cause FPEs during casts", "code": "def test_where(self):\n # Test the where function\n x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n xm.set_fill_value(1e+20)\n\n d = where(xm > 2, xm, -9)\n assert_equal(d, [-9., -9., -9., -9., -9., 4.,\n -9., -9., 10., -9., -9., 3.])\n assert_equal(d._mask, xm._mask)\n d = where(xm > 2, -9, ym)\n assert_equal(d, [5., 0., 3., 2., -1., -9.,\n -9., -10., -9., 1., 0., -9.])\n assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])\n d = where(xm > 2, xm, masked)\n assert_equal(d, [-9., -9., -9., -9., -9., 4.,\n -9., -9., 10., -9., -9., 3.])\n tmp = xm._mask.copy()\n tmp[(xm <= 2).filled(True)] = True\n assert_equal(d._mask, tmp)\n\n with np.errstate(invalid=\"warn\"):\n # The fill value is 1e20, it cannot be converted to `int`:\n with pytest.warns(RuntimeWarning, match=\"invalid value\"):\n ixm = xm.astype(int)\n d = where(ixm > 2, ixm, masked)\n assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])\n assert_equal(d.dtype, ixm.dtype)\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 469, "n_words": 200, "vocab_size": 90, "complexity": 1, "nloc": 28, "token_counts": 536, "n_ast_nodes": 646, "n_identifiers": 32, "random_cut": "def test_where(self):\n # Test the where function\n x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask" }, { "id": 80110, "commit_id": "ec6229c23600ebae8ec0d5db6846b095a9468151", "repo": "wagtail", "path": "wagtail/blocks/migrations/utils.py", "file_name": "utils.py", "fun_name": "map_stream_block_value", "commit_message": "Add StreamField migration helpers from https://github.com/sandilsranasinghe/wagtail-streamfield-migration-toolkit/", "code": "def map_stream_block_value(stream_block_value, block_def, block_path, **kwargs):\n \n\n mapped_value = []\n for child_block in stream_block_value:\n\n if not should_alter_block(child_block[\"type\"], block_path):\n mapped_value.append(child_block)\n\n else:\n try:\n child_block_def = block_def.child_blocks[child_block[\"type\"]]\n except KeyError:\n raise InvalidBlockDefError(\n \"No current block def named {}\".format(child_block[\"type\"])\n )\n mapped_child_value = map_block_value(\n child_block[\"value\"],\n block_def=child_block_def,\n block_path=block_path[1:],\n **kwargs,\n )\n mapped_value.append({**child_block, \"value\": mapped_child_value})\n\n return mapped_value\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 262, "n_words": 46, "vocab_size": 41, "complexity": 4, "nloc": 20, "token_counts": 111, "n_ast_nodes": 182, "n_identifiers": 16, "random_cut": "def map_stream_block_value(stream_block_value, block_def, block_path, **kwargs):\n \n\n mapped_value = []\n for child_block in stream_block_value:\n\n if not should_alter_block(child_block[\"type\"], block_path):\n mapped_value.append(child_block)\n\n else:\n try:\n child_block_def = block_def.child_blocks[child_block[\"type\"]]\n except KeyError:\n raise InvalidBlockDefError(\n " }, { "id": 276937, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/kernelized_utils_test.py", "file_name": "kernelized_utils_test.py", "fun_name": "test_almost_identical_vectors", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_almost_identical_vectors(self, exact_kernel_fn, expected_values):\n \n x = tf.constant([1.0, 0.4, -2.1, -1.1])\n y = tf.constant([1.01, 0.39, -2.099, -1.101])\n exact_kernel = exact_kernel_fn(x, y)\n shape = exact_kernel.shape.as_list()\n self.assertLen(shape, 2)\n # x and y are almost identical and therefore K(x, y) will be almost equal to\n # the identity value of the kernel.\n self.assertAllClose(expected_values, exact_kernel, atol=1e-3)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 114, "n_words": 51, "vocab_size": 41, "complexity": 1, "nloc": 7, "token_counts": 101, "n_ast_nodes": 127, "n_identifiers": 14, "random_cut": "def test_almost_identical_vectors(self, exact_kernel_fn, expected_values):\n \n x = tf.constant([1.0, 0.4, -2.1, -1.1])\n y = tf.constant([1.01, 0.39, -2.099, -1.101])\n exact_kernel = exact_kernel_fn(x, y)\n shape = exact_kernel.shape.as_list()\n self.assertLen(shape, 2)\n # x and y are almost identical and therefore K(x, y) will be almost equal to\n # the identity value of the kernel.\n self.assertAllClose(expected_values, exact_kernel, atol=1e-3)\n" }, { "id": 181576, "commit_id": "fcbcdcbb7b5f797c840f7f3955865441cace513c", "repo": "tpot", "path": "tpot/base.py", "file_name": "base.py", "fun_name": "_check_proba", "commit_message": "made predict_proba consistent with sklearn", "code": "def _check_proba(self):\n if not hasattr(self, 'fitted_pipeline_'):\n raise AttributeError(\n \"A pipeline has not yet been optimized. Please call fit() first.\"\n )\n \n else:\n if not (hasattr(self.fitted_pipeline_, \"predict_proba\")):\n raise AttributeError(\n \"The fitted pipeline does not have the predict_proba() function.\"\n )\n \n return True\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 180, "n_words": 39, "vocab_size": 31, "complexity": 3, "nloc": 11, "token_counts": 41, "n_ast_nodes": 74, "n_identifiers": 5, "random_cut": "def _check_proba(self):\n" }, { "id": 267358, "commit_id": "f7d7604454c171bd3200f303a0e5c46a8354a474", "repo": "ansible", "path": "lib/ansible/plugins/test/uri.py", "file_name": "uri.py", "fun_name": "is_uri", "commit_message": "Add uri, urn and url test plugins (#77423)\n\nand docs!\r\n\r\nCo-authored-by: Tabah Baridule \r\nCo-authored-by: Felix Fontein ", "code": "def is_uri(value, schemes=None):\n \n try:\n x = urlparse(value)\n isit = all([x.scheme is not None, x.path is not None, not schemes or x.scheme in schemes])\n except Exception as e:\n isit = False\n return isit\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 65, "n_words": 32, "vocab_size": 24, "complexity": 3, "nloc": 7, "token_counts": 57, "n_ast_nodes": 91, "n_identifiers": 11, "random_cut": "def is_uri(value, schemes=None):\n \n try:\n x = urlparse(value)\n isit = all([x.scheme is not None, x.path is not None, not schemes or x." }, { "id": 202443, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/custom_columns/tests.py", "file_name": "tests.py", "fun_name": "setUpTestData", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def setUpTestData(cls):\n cls.a1 = Author.objects.create(first_name=\"John\", last_name=\"Smith\")\n cls.a2 = Author.objects.create(first_name=\"Peter\", last_name=\"Jones\")\n cls.authors = [cls.a1, cls.a2]\n\n cls.article = Article.objects.create(\n headline=\"Django lets you build web apps easily\", primary_author=cls.a1\n )\n cls.article.authors.set(cls.authors)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 79, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 86, "n_ast_nodes": 141, "n_identifiers": 15, "random_cut": "def setUpTestData(cls):\n cls.a1 = Author.objects.create(first_name=\"John\", last_name=\"Smith\")\n cls.a2 = Author.objects.create(fir" }, { "id": 242117, "commit_id": "f8e4e9c2dd94c6f4789639dd891b8a6d5fb16e14", "repo": "Pillow", "path": "Tests/test_file_apng.py", "file_name": "test_file_apng.py", "fun_name": "test_apng_save_disposal_previous", "commit_message": "Added enums", "code": "def test_apng_save_disposal_previous(tmp_path):\n test_file = str(tmp_path / \"temp.png\")\n size = (128, 64)\n transparent = Image.new(\"RGBA\", size, (0, 0, 0, 0))\n red = Image.new(\"RGBA\", size, (255, 0, 0, 255))\n green = Image.new(\"RGBA\", size, (0, 255, 0, 255))\n\n # test OP_NONE\n transparent.save(\n test_file,\n save_all=True,\n append_images=[red, green],\n disposal=PngImagePlugin.Disposal.OP_PREVIOUS,\n )\n with Image.open(test_file) as im:\n im.seek(2)\n assert im.getpixel((0, 0)) == (0, 255, 0, 255)\n assert im.getpixel((64, 32)) == (0, 255, 0, 255)\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 141, "n_words": 66, "vocab_size": 42, "complexity": 1, "nloc": 16, "token_counts": 165, "n_ast_nodes": 241, "n_identifiers": 21, "random_cut": "def test_apng_save_disposal_previous(tmp_path):\n test_file = str(tmp_path / \"temp.png\")\n size = (128, 64)\n transparent = Image.new(\"RGBA\", size, (0, 0, 0, 0))\n red = Image.new(\"RGBA\", size, (255, 0, 0, 255))\n green = Image.new(\"RGBA\", size, (0, 255, 0, 255))\n\n # test OP_NONE\n transparent.save(\n test_file,\n save_all=True,\n append_images=[red, green],\n " }, { "id": 177192, "commit_id": "bffcd74649fb95a57fb834846eb3c7d9693c55b8", "repo": "networkx", "path": "networkx/algorithms/isomorphism/tests/vf2pp/test_feasibility.py", "file_name": "test_feasibility.py", "fun_name": "test_feasibility_different_labels", "commit_message": "Preliminary VF2++ Implementation (#5788)\n\n* Preliminary implementation of the candidate node pair ordering of VF2++\r\n\r\n* Removed unused lines of code\r\n\r\n* Added todos\r\n\r\n* Added demo and pseudocode for VF2++\r\n\r\n* Pointed out a problem with the pseudocode\r\n\r\n* Initialisation of the VF2++ basis structure\r\n\r\n* Initialise the GraphMatcher\r\n\r\n* Remove useless changes\r\n\r\n* Check labels for the node ordering + demo\r\n\r\n* Code to verify the ordering\r\n\r\n* Implement the ISO feasibility check\r\n\r\n* Implement the IND feasibility\r\n\r\n* Create State class\r\n\r\n* Fixed Dan's code for the ordering\r\n\r\n* Preliminary form of the node ordering\r\n\r\n* Add visualisation\r\n\r\n* Use list comprehension for the Ti computation\r\n\r\n* Remove function\r\n\r\n* Create Unit Tests\r\n\r\n* Add labels check + update unit tests\r\n\r\n* Add pre-computation of G-labels\r\n\r\n* Remove todo\r\n\r\n* First implementation of the candidate selection\r\n\r\n* Initial version of candidate selection\r\n\r\n* Remove unnecessary files\r\n\r\n* Merge candidate selection cases into one\r\n\r\n* Create a function to incrementally update Ti and Ti_out\r\n\r\n* Unit Test for the Ti updating\r\n\r\n* Implement the Ti/Ti_out restoring\r\n\r\n* Finish the restoring of Ti and create unit test\r\n\r\n* Update test file names\r\n\r\n* Uncommented test section\r\n\r\n* Replace redundant loop with for-any\r\n\r\n* Create unit test for candidate selection using the same label for all nodes\r\n\r\n* Create unit test for candidate selection using different labels for the nodes\r\n\r\n* Update feasibility tests without the use of the state class\r\n\r\n* Create more unit tests for the feasibility checking\r\n\r\n* Provide explanation for the unit tests\r\n\r\n* First successful test of the complete ISO VF2++ algorithm (except from the buggy ordering)\r\n\r\n* Fix bug: when popping a node to climb up the DFS tree we need the previous node ordering (containing the node that we just popped)\r\n\r\n* Create a separate file for the VF2++ ISO algorithm\r\n\r\n* Delete file\r\n\r\n* Remove redundant iteration and memory use\r\n\r\n* Demo for different labels\r\n\r\n* Add benchmark for the incremental Ti updating\r\n\r\n* Remove unnecessary class\r\n\r\n* Fix bug with the ordering WOOOHOOOOO\r\n\r\n* Unit tests for the node ordering\r\n\r\n* Add unit tests for the VF2++ ISO\r\n\r\n* Fix ordering\r\n\r\n* Probablly fix logic error in ordering\r\n\r\n* Reformatted with black\r\n\r\n* Test precommit\r\n\r\n* Test precommit\r\n\r\n* Test pre commit\r\n\r\n* Testing pre commit\r\n\r\n* Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Add unit tests for vf2++\r\n\r\n* Added vf2++ unit test\r\n\r\n* Added precheck for VF2++\r\n\r\n* Add unit tests for the precheck\r\n\r\n* Updated the benchmarking\r\n\r\n* Updated the benchmark\r\n\r\n* Apply hooks\r\n\r\n* Add documentation for the ordering\r\n\r\n* Add documentation for the candidate selection\r\n\r\n* Added documentation for the feasibility\r\n\r\n* Added documentation for vf2++\r\n\r\n* Separate functions for ISO feasibility\r\n\r\n* Refine unit tests\r\n\r\n* Apply hooks\r\n\r\n* Force reformat all files\r\n\r\n* Remove redundant return statements from VF2__\r\n\r\n* Apply hooks\r\n\r\n* Apply hooks\r\n\r\n* Format\r\n\r\n* Minor changes\r\n\r\n* Add unit tests\r\n\r\n* Adjusted benchmark\r\n\r\n* Fix benchmark\r\n\r\n* Isort\r\n\r\n* Isort benchmark\r\n\r\n* Apply optimization in the candidate selection\r\n\r\n* Track matched node with pointer\r\n\r\n* Adjust benchmark\r\n\r\n* Restructure in VF2 function\r\n\r\n* Make VF2++ EXTREMELY PRETTY\r\n\r\n* Removed sorting in feasibility rules\r\n\r\n* Get rid of visited set, check mapping instead\r\n\r\n* Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Made color assignement deterministic in VF2++ unit tests\r\n\r\n* Add keyword argument in unit tests\r\n\r\n* Hoepfully fix pipeline errors\r\n\r\n* Add vf2++ unit tests for multigraphs\r\n\r\n* Add Unit tests for Feasibility\r\n\r\n* Add unit tests for feasibility on multi graphs\r\n\r\n* Finalize feasibility tests for multigraph settings\r\n\r\n* Update documentation\r\n\r\n* Remove list comprehension and boost performance\r\n\r\n* Add unit tests for both graphs and multi graphs, using same labels\r\n\r\n* Isort\r\n\r\n* Optimized precheck\r\n\r\n* Replace loop with any\r\n\r\n* Optimize multigraph chceck\r\n\r\n* Transfer except statement\r\n\r\n* Check order consistency\r\n\r\n* Cache degrees and labels from the beginning\r\n\r\n* Delete benchmark to create new\r\n\r\n* Fix precheck bug\r\n\r\n* Adjust unit tests\r\n\r\n* Add benchmark for perofmance comparison between VF2 and VF2++\r\n\r\n* Fix Ti computing tests\r\n\r\n* Hopefully fix isort\r\n\r\n* Add benchmark for the candidate selection methods\r\n\r\n* Rename modules: lower case, remove +\r\n\r\n* Refactor VF2++ arguments\r\n\r\n* Adjust VF2++ to work with multiple node labels\r\n\r\n* Add unit tests for multiple labels\r\n\r\n* Adjust for different number of labels per node\r\n\r\n* Finish arguments of VF2++\r\n\r\n* Add user functions\r\n\r\n* Exported the two vf2++ functions\r\n\r\n* Added underscore prefix to private functions and fixed tests\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/isomorphism/demo.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Apply suggested changes\r\n\r\n* Refactor rst files\r\n\r\n* Rm unnecessary toctree from isomorphism page.\r\n\r\n* Autodoc vf2pp module + public functions.\r\n\r\n* Rm dedicated vf2pp reference article.\r\n\r\n* Rm extra vf2pp listing from autosummaries.\r\n\r\n* Add summary of three functions to module docstring.\r\n\r\n* Make sure docstrings match their functions.\r\n\r\n* Refactor everything\r\n\r\n* Format code\r\n\r\n* Add unit test\r\n\r\n* Inline process level function in node ordering\r\n\r\n* Perform intersection first rather than last\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp_helpers/candidates.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Replace return statement with multiple operations and make it more readable\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Fix multigraph bug in update_Tinout\r\n\r\n* Abstract the argmax function\r\n\r\n* Add unit test for first case of candidate selection\r\n\r\n* Create unit test for all candidate selection cases\r\n\r\n* Remove re-definition of namedtuple parameters\r\n\r\n* Update doc/reference/algorithms/isomorphism.rst\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/__init__.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Delete benchmark file\r\n\r\n* Add demo file\r\n\r\n* Create util file containing the helper functions, common across all unit tests\r\n\r\n* Fix CI/CD\r\n\r\n* Make unit tests for Ti updating specific\r\n\r\n* Remove util functions from vf2pp tests\r\n\r\n* Remove utils functions from multivf2pp tests\r\n\r\n* Remove utils functions from candidate tests\r\n\r\n* Remove utils functions from ordering checks\r\n\r\n* Remove utils functions from Ti tests\r\n\r\n* Add example in docstring\r\n\r\n* Remove unused utils functions\r\n\r\n* Separate initialization of vf2pp\r\n\r\n* Inline functions and add new abstract function for pushing to stack\r\n\r\n* Inline push to stack\r\n\r\n* Add commentsa\r\n\r\n* Separate precheck functions\r\n\r\n* Replace method with existing networkx function\r\n\r\n* Include label initialization inside parameter initializer function\r\n\r\n* Rename Tiout to Titilde\r\n\r\n* Update networkx/algorithms/isomorphism/tests/vf2pp/test_Ti_computing.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Use canonical setitem for dictionary insertions\r\n\r\n* Update networkx/algorithms/isomorphism/tests/vf2pp/test_precheck.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Remove variable assignement\r\n\r\n* Merge unit tests of vf2pp for graphs and multigraphs into the same file\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Change variable name\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Re-write ordering unit tests\r\n\r\n* Rename vf2pp solver\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Replace abstractified argmax function with two loops for readability\r\n\r\n* Apply final changes\r\n\r\n* Fix mistake\r\n\r\n* Update ref guide to reflect new fn names.\r\n\r\n* Update docstrings\r\n * Fix line length in module docstring\r\n * Copy updated parameter section to all 3 public fns.\r\n * Add Yields section to all_isomorphisms fn.\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult ", "code": "def test_feasibility_different_labels(self):\n G1 = nx.Graph(\n [\n (0, 1),\n (1, 2),\n (1, 14),\n (0, 4),\n (1, 5),\n (2, 6),\n (3, 7),\n (3, 6),\n (4, 10),\n (4, 9),\n (6, 10),\n (20, 9),\n (20, 15),\n (20, 12),\n (20, 11),\n (12, 13),\n (11, 13),\n (20, 8),\n (20, 2),\n (20, 5),\n (20, 0),\n ]\n )\n mapped = {\n 0: \"a\",\n 1: \"b\",\n 2: \"c\",\n 3: \"d\",\n 4: \"e\",\n 5: \"f\",\n 6: \"g\",\n 7: \"h\",\n 8: \"i\",\n 9: \"j\",\n 10: \"k\",\n 11: \"l\",\n 12: \"m\",\n 13: \"n\",\n 14: \"o\",\n 15: \"p\",\n 20: \"x\",\n }\n G2 = nx.relabel_nodes(G1, mapped)\n\n l1 = {n: \"none\" for n in G1.nodes()}\n l2 = dict()\n\n l1.update(\n {\n 9: \"blue\",\n 15: \"blue\",\n 12: \"blue\",\n 11: \"green\",\n 2: \"green\",\n 8: \"red\",\n 0: \"red\",\n 5: \"yellow\",\n }\n )\n l2.update({mapped[n]: l for n, l in l1.items()})\n\n gparams = _GraphParameters(\n G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None\n )\n sparams = _StateParameters(\n {0: \"a\", 1: \"b\", 2: \"c\", 3: \"d\"},\n {\"a\": 0, \"b\": 1, \"c\": 2, \"d\": 3},\n {4, 5, 6, 7, 14},\n {9, 10, 15, 12, 11, 13, 8},\n {\"e\", \"f\", \"g\", \"h\", \"o\"},\n {\"j\", \"k\", \"l\", \"m\", \"n\", \"i\", \"p\"},\n )\n\n u, v = 20, \"x\"\n assert not _cut_PT(u, v, gparams, sparams)\n\n # Change structure in G2 such that, ONLY consistency is harmed\n G2.remove_edge(mapped[20], mapped[2])\n G2.add_edge(mapped[20], mapped[3])\n l2.update({mapped[3]: \"green\"})\n\n # Consistency check fails, while the cutting rules are satisfied!\n assert not _cut_PT(u, v, gparams, sparams)\n assert not _consistent_PT(u, v, gparams, sparams)\n\n # Compensate in G1 and make it consistent\n G1.remove_edge(20, 2)\n G1.add_edge(20, 3)\n l1.update({3: \"green\"})\n assert not _cut_PT(u, v, gparams, sparams)\n assert _consistent_PT(u, v, gparams, sparams)\n\n # ONLY fail the cutting check\n l1.update({5: \"red\"})\n assert _cut_PT(u, v, gparams, sparams)\n assert _consistent_PT(u, v, gparams, sparams)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 1251, "n_words": 278, "vocab_size": 175, "complexity": 3, "nloc": 87, "token_counts": 598, "n_ast_nodes": 893, "n_identifiers": 28, "random_cut": "def test_feasibility_different_labels(self):\n G1 = nx.Graph(\n [\n (0, 1),\n (1, 2),\n (1, 14),\n (0, 4),\n (1, 5),\n (2, 6),\n (3, 7),\n (3, 6),\n (4, 10),\n (4, 9),\n (6, 10),\n (20, 9),\n (20, 15),\n (20, 12),\n (20, 11),\n (12, 13),\n (11, 13),\n (20, 8),\n (20, 2),\n (20, 5),\n (20, 0),\n ]\n )\n mapped = {\n 0: \"a\",\n 1: \"b\",\n 2: \"c\",\n 3: \"d\",\n 4: \"e\",\n 5: \"f\",\n 6: \"g\",\n 7: \"h\",\n 8: \"i\",\n 9: \"j\",\n 10: \"k\",\n 11: \"l\",\n 12: \"m\",\n 13: \"n\",\n 14: \"o\",\n 15: \"p\",\n 20: \"x\",\n }\n G2 = nx.relabel_nodes(G1, mapped)\n\n l1 = {n: \"none\" for n in G1.nodes()}\n l2 = dict()\n\n l1.update(\n {\n 9: \"blue\",\n 15: \"blue\",\n 12: \"blue\",\n 11: \"green\",\n 2: \"green\",\n 8: \"red\",\n 0: \"red\",\n 5: \"yellow\",\n }\n )\n l2.update({mapped[n]: l for n, l in l1.items()})\n\n gparams = _GraphParameters(\n G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None\n )\n sparams = _StateParameters(\n {0: \"a\", 1: \"b\", 2: \"c\", 3: \"d\"},\n {\"a\": 0, \"b\": 1, \"c\": 2, \"d\": 3},\n {4, 5, 6, 7, 14},\n {9, 10, 15, 12, 11, 13, 8},\n {\"e\", \"f\", \"g\", \"h\", \"o\"},\n {\"j\", \"k\", \"l\", \"m\", \"n\", \"i\", \"p\"},\n " }, { "id": 204557, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/mail/backends/console.py", "file_name": "console.py", "fun_name": "write_message", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def write_message(self, message):\n msg = message.message()\n msg_data = msg.as_bytes()\n charset = (\n msg.get_charset().get_output_charset() if msg.get_charset() else \"utf-8\"\n )\n msg_data = msg_data.decode(charset)\n self.stream.write(\"%s\\n\" % msg_data)\n self.stream.write(\"-\" * 79)\n self.stream.write(\"\\n\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 94, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 10, "token_counts": 78, "n_ast_nodes": 137, "n_identifiers": 12, "random_cut": "def write_message(self, message):\n msg = message.message()\n msg_data = msg.as_bytes()\n charset = (\n msg.get_charset().get_output_charset() if msg" }, { "id": 219609, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_py_abc.py", "file_name": "_py_abc.py", "fun_name": "_dump_registry", "commit_message": "add python 3.10.4 for windows", "code": "def _dump_registry(cls, file=None):\n \n print(f\"Class: {cls.__module__}.{cls.__qualname__}\", file=file)\n print(f\"Inv. counter: {get_cache_token()}\", file=file)\n for name in cls.__dict__:\n if name.startswith(\"_abc_\"):\n value = getattr(cls, name)\n if isinstance(value, WeakSet):\n value = set(value)\n print(f\"{name}: {value!r}\", file=file)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 132, "n_words": 29, "vocab_size": 24, "complexity": 4, "nloc": 9, "token_counts": 74, "n_ast_nodes": 150, "n_identifiers": 15, "random_cut": "def _dump_registry(cls, file=None):\n \n print(f\"Class: {cls.__module__}.{cls.__qualname__}\", file=file)\n print(f\"Inv. counter: {get_cache_token()}\", file=file)\n for name in cls.__dict__:\n if name.startswith(\"_abc_\"):\n value = getattr(cls, name)\n if isinstance(value, WeakSet):\n value = set(value)\n print(f\"{name" }, { "id": 45506, "commit_id": "69f6f9e01b6df76c3c8fa266d460324163957887", "repo": "airflow", "path": "scripts/ci/pre_commit/pre_commit_migration_reference.py", "file_name": "pre_commit_migration_reference.py", "fun_name": "insert_version", "commit_message": "Autogenerate migration reference doc (#21601)\n\n* document airflow version in each alembic migration module and use this to autogen the doc\r\n* update each migration module to have the same description used in migration ref (so it can be used in autogen)", "code": "def insert_version(old_content, file):\n new_content = re.sub(\n r'(^depends_on.*)',\n lambda x: f\"{x.group(1)}\\nairflow_version = '{airflow_version}'\",\n old_content,\n flags=re.MULTILINE,\n )\n file.write_text(new_content)\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 52, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 8, "token_counts": 37, "n_ast_nodes": 73, "n_identifiers": 12, "random_cut": "def insert_version(old_content, file):\n new_content = re.sub(\n " }, { "id": 196913, "commit_id": "91599434d1aeefcef688e31de1e574e1a25af5b8", "repo": "sympy", "path": "sympy/matrices/tests/test_matrices.py", "file_name": "test_matrices.py", "fun_name": "test_deprecated_classof_a2idx", "commit_message": "Update the deprecation for sympy.matrices.matrices.classof and a2idx", "code": "def test_deprecated_classof_a2idx():\n with warns_deprecated_sympy():\n from sympy.matrices.matrices import classof\n M = Matrix([[1, 2], [3, 4]])\n IM = ImmutableMatrix([[1, 2], [3, 4]])\n assert classof(M, IM) == ImmutableDenseMatrix\n\n with warns_deprecated_sympy():\n from sympy.matrices.matrices import a2idx\n assert a2idx(-1, 3) == 2\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 83, "n_words": 36, "vocab_size": 25, "complexity": 1, "nloc": 9, "token_counts": 85, "n_ast_nodes": 130, "n_identifiers": 11, "random_cut": "def test_deprecated_classof_a2idx():\n with warns_deprecated_sympy():\n from sym" }, { "id": 57974, "commit_id": "fab393e6c8f9e2e922b03555d7bfffad76df1918", "repo": "prefect", "path": "tests/orion/api/test_deployments.py", "file_name": "test_deployments.py", "fun_name": "test_read_deployment_by_name", "commit_message": "Deployment API tests passing", "code": "async def test_read_deployment_by_name(self, client, flow, deployment):\n response = await client.get(f\"/deployments/name/{flow.name}/{deployment.name}\")\n assert response.status_code == status.HTTP_200_OK\n assert response.json()[\"id\"] == deployment.id\n assert response.json()[\"name\"] == deployment.name\n assert response.json()[\"flow_id\"] == str(deployment.flow_id)\n assert response.json()[\"infrastructure_document_id\"] == str(\n deployment.infrastructure_document_id\n )\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 32, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 87, "n_ast_nodes": 157, "n_identifiers": 16, "random_cut": "async def test_read_deployment_by_name(self, client, flow, deployment):\n response = await client.get(f\"/deployments/name/{flow.name}/{deployment.name}\")\n assert response.status_code == status.HTTP_200_OK\n assert response.json()[\"id\"] == deployment.id\n assert response.jso" }, { "id": 8668, "commit_id": "cd6551d85e2850062fc66a283c5a3c758b772901", "repo": "ludwig", "path": "tests/integration_tests/synthetic_test_data.py", "file_name": "synthetic_test_data.py", "fun_name": "get_generated_data_for_optimizer", "commit_message": "Add synthetic test data integration test utils, and use them for loss value decrease tests. (#2789)\n\n* Add synthetic test data integration test utils, and use them for loss value decrease tests.\r\n\r\n* Address PR feedback\r\n\r\n* Fix tests.", "code": "def get_generated_data_for_optimizer():\n # function generates simple training data that guarantee convergence\n # within 30 epochs for suitable config\n\n # generate data\n np.random.seed(RANDOM_SEED)\n x = np.array(range(NUMBER_OBSERVATIONS)).reshape(-1, 1)\n y = 2 * x + 1 + np.random.normal(size=x.shape[0]).reshape(-1, 1)\n raw_df = pd.DataFrame(np.concatenate((x, y), axis=1), columns=[\"x\", \"y\"])\n raw_df[\"x\"] = (raw_df[\"x\"] - raw_df[\"x\"].min()) / (raw_df[\"x\"].max() - raw_df[\"x\"].min())\n raw_df[\"y\"] = (raw_df[\"y\"] - raw_df[\"y\"].min()) / (raw_df[\"y\"].max() - raw_df[\"y\"].min())\n\n # create training data\n train, valid_test = train_test_split(raw_df, train_size=0.7)\n\n # create validation and test data\n validation, test = train_test_split(valid_test, train_size=0.5)\n\n return GeneratedData(train, validation, test)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 126, "n_words": 85, "vocab_size": 59, "complexity": 1, "nloc": 10, "token_counts": 208, "n_ast_nodes": 338, "n_identifiers": 29, "random_cut": "def get_generated_data_for_optimizer():\n # function generates simple training data that guarantee convergence\n # within 30 epochs for suitable config\n\n # generate data\n np.random.seed(RANDOM_SEED)\n x = np.array(range(NUMBER_OBSERVATIONS)).reshape(-1, 1)\n y = 2 * x + 1 + np.random.normal(size=x.shape[0]).reshape(-1, 1)\n raw_df = pd.DataFrame(np.concatenate((x, y), axis=1), columns=[\"x\", \"y\"])\n raw_df[\"x\"] = (raw_df[\"x\"] - raw_df[\"x\"].min()) / (raw_df[\"x\"].max() - raw_df[\"x\"].min())\n raw_df[\"y\"] = (raw_df[\"y\"] - raw_df[\"y\"].min()) / (raw_df[\"y\"].max() - raw_df[\"y\"].min())\n\n" }, { "id": 31220, "commit_id": "49becbaa5549b477b0d96c55f207614773c0ab42", "repo": "transformers", "path": "src/transformers/image_utils.py", "file_name": "image_utils.py", "fun_name": "resize", "commit_message": "Enable crop_center method to handle (W, H, C) images (#17626)\n\n* enable crop_center method to handle (W, H, C) images\r\n\r\n* minor style and comment edits", "code": "def resize(self, image, size, resample=PIL.Image.BILINEAR, default_to_square=True, max_size=None):\n \n self._ensure_format_supported(image)\n\n if not isinstance(image, PIL.Image.Image):\n image = self.to_pil_image(image)\n\n if isinstance(size, list):\n size = tuple(size)\n\n if isinstance(size, int) or len(size) == 1:\n if default_to_square:\n size = (size, size) if isinstance(size, int) else (size[0], size[0])\n else:\n width, height = image.size\n # specified size only for the smallest edge\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size if isinstance(size, int) else size[0]\n\n if short == requested_new_short:\n return image\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n size = (new_short, new_long) if width <= height else (new_long, new_short)\n\n return image.resize(size, resample=resample)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 547, "n_words": 146, "vocab_size": 90, "complexity": 14, "nloc": 26, "token_counts": 241, "n_ast_nodes": 370, "n_identifiers": 25, "random_cut": "def resize(self, image, size, resample=PIL.Image.BILINEAR, default_to_square=True, max_size=None):\n \n self._ensure_format_supported(image)\n\n if not isinstance(image, PIL.Image.Image):\n image = self.to_pil_image(image)\n\n if isinstance(size, list):\n size = tuple(size)\n\n if isinstance(size, int) or len(size) == 1:\n if default_to_square:\n size = (size, size) if isinstance(size, int) else (size[0], size[0])\n else:\n width, height = image.size\n # specified size only for the smallest edge\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size if isinstance(size, int) else size[0]\n\n if short == requested_new_short:\n return image\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n " }, { "id": 27276, "commit_id": "60b1c18f600a7667bf55dc6f3df035235d771b86", "repo": "saleor", "path": "saleor/graphql/order/mutations/order_discount_update.py", "file_name": "order_discount_update.py", "fun_name": "perform_mutation", "commit_message": "Split order mutations files into file per mutation (#9747)", "code": "def perform_mutation(cls, _root, info, **data):\n order_discount = cls.get_node_or_error(\n info, data.get(\"discount_id\"), only_type=\"OrderDiscount\"\n )\n order = order_discount.order\n input = data.get(\"input\")\n cls.validate(info, order, order_discount, input)\n\n reason = input.get(\"reason\", order_discount.reason)\n value_type = input.get(\"value_type\", order_discount.value_type)\n value = input.get(\"value\", order_discount.value)\n\n order_discount_before_update = copy.deepcopy(order_discount)\n\n order_discount.reason = reason\n order_discount.value = value\n order_discount.value_type = value_type\n order_discount.save()\n\n cls.recalculate_order(order)\n\n if (\n order_discount_before_update.value_type != value_type\n or order_discount_before_update.value != value\n ):\n # call update event only when we changed the type or value of the discount\n order_discount.refresh_from_db()\n events.order_discount_updated_event(\n order=order,\n user=info.context.user,\n app=info.context.app,\n order_discount=order_discount,\n old_order_discount=order_discount_before_update,\n )\n return OrderDiscountUpdate(order=order)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 353, "n_words": 83, "vocab_size": 63, "complexity": 3, "nloc": 29, "token_counts": 186, "n_ast_nodes": 294, "n_identifiers": 28, "random_cut": "def perform_mutation(cls, _root, info, **data):\n order_discount = cls.get_node_or_error(\n info, data.get(\"discount_id\"), only_type=\"OrderDiscount\"\n )\n order = order_discount.order\n input = data.get(\"input\")\n cls.validate(info, order, order_discount, input)\n\n reason = input.get(\"reason\", order_discount.reason)\n value_type = input.get(\"value_type\", order_discount.value_type)\n value = input.get(\"value\", order_discount.value)\n\n order_discount_before_update = copy.deepcopy(order_discount)\n\n order_discount.reason = reason\n order_discount.value = value\n order_discount.value_type = value_type\n order_discou" }, { "id": 146234, "commit_id": "1100c982223757f697a410a0d0c3d8bf3ff9c805", "repo": "ray", "path": "python/ray/serve/tests/test_cli.py", "file_name": "test_cli.py", "fun_name": "test_mixed_kwargs", "commit_message": "[serve] Implement Serve Application object (#22917)\n\nThe concept of a Serve Application, a data structure containing all information needed to deploy Serve on a Ray cluster, has surfaced during recent design discussions. This change introduces a formal Application data structure and refactors existing code to use it.", "code": "def test_mixed_kwargs(self):\n args_and_kwargs = (\n \"argval1\",\n \"argval2\",\n \"--kwarg1==kw==val1\",\n \"--kwarg2\",\n \"kwval2\",\n \"--kwarg3\",\n \"=kwval=3\",\n \"--kwarg4=\",\n \"--kwarg5\",\n \"kwval5\",\n )\n args, kwargs = _process_args_and_kwargs(args_and_kwargs)\n assert args == [\"argval1\", \"argval2\"]\n assert kwargs == {\n \"kwarg1\": \"=kw==val1\",\n \"kwarg2\": \"kwval2\",\n \"kwarg3\": \"=kwval=3\",\n \"kwarg4\": \"\",\n \"kwarg5\": \"kwval5\",\n }\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 246, "n_words": 40, "vocab_size": 33, "complexity": 1, "nloc": 22, "token_counts": 70, "n_ast_nodes": 137, "n_identifiers": 6, "random_cut": "def test_mixed_kwargs(self):\n args_and_kwargs = (\n \"argval1\",\n \"argval2\",\n \"--kwarg1==kw==val1\",\n \"--kwarg2\",\n \"kwval2\",\n \"--kwarg3\",\n \"=kwval=3\",\n \"--kwarg4=\",\n \"--kwarg5\",\n \"kwval5\",\n )\n args, kwargs = _process_args_and_kwargs(args_and_kwargs)\n assert args == [\"argval1\", \"argval2\"]\n " }, { "id": 153592, "commit_id": "605efa618e7994681f57b11d04d417f353ef8d50", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "reorder_levels", "commit_message": "DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def reorder_levels(self, order, axis=0): # noqa: PR01, RT01, D200\n \n axis = self._get_axis_number(axis)\n new_labels = self.axes[axis].reorder_levels(order)\n return self.set_axis(new_labels, axis=axis, inplace=False)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 4, "token_counts": 48, "n_ast_nodes": 75, "n_identifiers": 9, "random_cut": "def reorder_levels(self, order, axis=0): # noqa: PR01, " }, { "id": 47017, "commit_id": "6933022e94acf139b2dea9a589bb8b25c62a5d20", "repo": "airflow", "path": "tests/providers/google/cloud/hooks/test_dataproc.py", "file_name": "test_dataproc.py", "fun_name": "test_delete_batch", "commit_message": "Fix new MyPy errors in main (#22884)\n\nThose MyPe errors are side effect of some new dependencies.", "code": "def test_delete_batch(self, mock_client):\n self.hook.delete_batch(\n batch_id=BATCH_ID,\n region=GCP_LOCATION,\n project_id=GCP_PROJECT,\n )\n mock_client.assert_called_once_with(GCP_LOCATION)\n mock_client.return_value.delete_batch.assert_called_once_with(\n request=dict(\n name=BATCH_NAME.format(GCP_PROJECT, GCP_LOCATION, BATCH_ID),\n ),\n metadata=(),\n retry=DEFAULT,\n timeout=None,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 156, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 15, "token_counts": 73, "n_ast_nodes": 106, "n_identifiers": 22, "random_cut": "def test_delete_batch(self, mock_client):\n self.hook.delete_batch(\n batch_id=BATCH_ID,\n region=GCP_LOCATION,\n project_id=GCP_PROJECT,\n )\n mock_client.assert_called_once_with(GCP_LOCATION)\n m" }, { "id": 155315, "commit_id": "a77a6464c83ad9e3d91fdf9650aa5e43764f6aa9", "repo": "modin", "path": "asv_bench/benchmarks/benchmarks.py", "file_name": "benchmarks.py", "fun_name": "time_drop_dups_string", "commit_message": "TEST-#5350: port DropDuplicates and LevelAlign benchmarks from pandas github (#5351)\n\nSigned-off-by: arunjose696 \r\nCo-authored-by: Anatoly Myachev ", "code": "def time_drop_dups_string(self, shape):\n self.series.drop_duplicates(inplace=True)\n execute(self.series)\n\n\nfrom .utils import setup # noqa: E402, F401\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 48, "n_identifiers": 9, "random_cut": "def time_drop_dups_string(self, shape):\n self.series.drop_duplicates(inplace=True)\n execute(self.series)\n\n\nfrom .utils import setup # noqa: E402, F401\n" }, { "id": 247996, "commit_id": "961ee75a9b0b25731eea0031b4ba99a79c050844", "repo": "synapse", "path": "synapse/_scripts/synapse_port_db.py", "file_name": "synapse_port_db.py", "fun_name": "_setup_auth_chain_sequence", "commit_message": "Disallow untyped defs in synapse._scripts (#12422)\n\nOf note: \r\n\r\n* No untyped defs in `register_new_matrix_user`\r\n\r\nThis one might be contraversial. `request_registration` has three\r\ndependency-injection arguments used for testing. I'm removing the\r\ninjection of the `requests` module and using `unitest.mock.patch` in the\r\ntest cases instead.\r\n\r\nDoing `reveal_type(requests)` and `reveal_type(requests.get)` before the\r\nchange:\r\n\r\n```\r\nsynapse/_scripts/register_new_matrix_user.py:45: note: Revealed type is \"Any\"\r\nsynapse/_scripts/register_new_matrix_user.py:46: note: Revealed type is \"Any\"\r\n```\r\n\r\nAnd after:\r\n\r\n```\r\nsynapse/_scripts/register_new_matrix_user.py:44: note: Revealed type is \"types.ModuleType\"\r\nsynapse/_scripts/register_new_matrix_user.py:45: note: Revealed type is \"def (url: Union[builtins.str, builtins.bytes], params: Union[Union[_typeshed.SupportsItems[Union[builtins.str, builtins.bytes, builtins.int, builtins.float], Union[builtins.str, builtins.bytes, builtins.int, builtins.float, typing.Iterable[Union[builtins.str, builtins.bytes, builtins.int, builtins.float]], None]], Tuple[Union[builtins.str, builtins.bytes, builtins.int, builtins.float], Union[builtins.str, builtins.bytes, builtins.int, builtins.float, typing.Iterable[Union[builtins.str, builtins.bytes, builtins.int, builtins.float]], None]], typing.Iterable[Tuple[Union[builtins.str, builtins.bytes, builtins.int, builtins.float], Union[builtins.str, builtins.bytes, builtins.int, builtins.float, typing.Iterable[Union[builtins.str, builtins.bytes, builtins.int, builtins.float]], None]]], builtins.str, builtins.bytes], None] =, data: Union[Any, None] =, headers: Union[Any, None] =, cookies: Union[Any, None] =, files: Union[Any, None] =, auth: Union[Any, None] =, timeout: Union[Any, None] =, allow_redirects: builtins.bool =, proxies: Union[Any, None] =, hooks: Union[Any, None] =, stream: Union[Any, None] =, verify: Union[Any, None] =, cert: Union[Any, None] =, json: Union[Any, None] =) -> requests.models.Response\"\r\n```\r\n\r\n* Drive-by comment in `synapse.storage.types`\r\n\r\n* No untyped defs in `synapse_port_db`\r\n\r\nThis was by far the most painful. I'm happy to break this up into\r\nsmaller pieces for review if it's not managable as-is.", "code": "async def _setup_auth_chain_sequence(self) -> None:\n curr_chain_id: Optional[\n int\n ] = await self.sqlite_store.db_pool.simple_select_one_onecol(\n table=\"event_auth_chains\",\n keyvalues={},\n retcol=\"MAX(chain_id)\",\n allow_none=True,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 15, "token_counts": 63, "n_ast_nodes": 66, "n_identifiers": 12, "random_cut": "async def _setup_auth_chain_sequence(self) -> None:\n curr_chain_id: Optional[\n int\n ] = await self.sqlite_store.db_pool.simple_select_one_onecol(\n " }, { "id": 17506, "commit_id": "51c09ed5fc947d65cc3360b7b59d629f71b5f34f", "repo": "ccxt", "path": "python/ccxt/async_support/okx.py", "file_name": "okx.py", "fun_name": "modify_margin_helper", "commit_message": "1.72.17\n\n[ci skip]", "code": "async def modify_margin_helper(self, symbol, amount, type, params={}):\n await self.load_markets()\n market = self.market(symbol)\n posSide = self.safe_string(params, 'posSide', 'net')\n params = self.omit(params, ['posSide'])\n request = {\n 'instId': market['id'],\n 'amt': amount,\n 'type': type,\n 'posSide': posSide,\n }\n response = await self.privatePostAccountPositionMarginBalance(self.extend(request, params))\n #\n # {\n # \"code\": \"0\",\n # \"data\": [\n # {\n # \"amt\": \"0.01\",\n # \"instId\": \"ETH-USD-SWAP\",\n # \"posSide\": \"net\",\n # \"type\": \"reduce\"\n # }\n # ],\n # \"msg\": \"\"\n # }\n #\n data = self.safe_value(response, 'data', [])\n entry = self.safe_value(data, 0, {})\n errorCode = self.safe_string(response, 'code')\n status = 'ok' if (errorCode == '0') else 'failed'\n responseAmount = self.safe_number(entry, 'amt')\n responseType = self.safe_string(entry, 'type')\n marketId = self.safe_string(entry, 'instId')\n responseMarket = self.safe_market(marketId, market)\n code = responseMarket['base'] if responseMarket['inverse'] else responseMarket['quote']\n symbol = responseMarket['symbol']\n return {\n 'info': response,\n 'type': responseType,\n 'amount': responseAmount,\n 'code': code,\n 'symbol': symbol,\n 'status': status,\n }\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 563, "n_words": 135, "vocab_size": 94, "complexity": 3, "nloc": 30, "token_counts": 229, "n_ast_nodes": 399, "n_identifiers": 27, "random_cut": "async def modify_margin_helper(self, symbol, amount, type, params={}):\n await self.load_markets()\n market = self.market(symbol)\n posSide = self.safe_string(params, 'posSide', 'net')\n params = self.omit(params, ['posSide'])\n request = {\n 'instId': market['id'],\n 'amt': amount,\n 'type': type,\n 'posSide': posSide,\n }\n response = await self.privatePostAccountPositionMarginBalance(self.extend(request, params))\n #\n # {\n # \"code\": \"0\",\n # \"data\": [\n # {\n # \"amt\": \"0.01\",\n # \"instId\": \"ETH-USD-SWAP\",\n # \"posSide\": \"net\",\n # \"type\": \"reduce\"\n # }\n # ],\n # \"msg\": \"\"\n # }\n #\n data = self.safe_value(response, 'data', [])\n entry = self.safe_value(data, 0, {})\n errorCode = self.safe_string(response, 'code')\n status = 'ok' if (errorCode == '0') else 'failed'\n responseAmount = self.safe_number(entry, 'amt')\n responseType = self.safe_string(entry, 'type')\n marketId = self.safe_string(entry, 'instId')\n responseMarket = self.safe_market(marketId, market)\n code = responseMarket['base'] if responseMarket['inverse'] else responseMarket['quote']\n symbol = responseMarket['symbol']\n return {\n 'info': response,\n 'type': responseType,\n 'amount': response" }, { "id": 165030, "commit_id": "cf515a5a8892d0fdc17b6cdc8ee0c1fe9b679dee", "repo": "pandas", "path": "pandas/tests/io/parser/common/test_file_buffer_url.py", "file_name": "test_file_buffer_url.py", "fun_name": "test_internal_eof_byte_to_file", "commit_message": "TST: Use uuid instead of random chars for temp files (#45996)", "code": "def test_internal_eof_byte_to_file(all_parsers):\n # see gh-16559\n parser = all_parsers\n data = b'c1,c2\\r\\n\"test \\x1a test\", test\\r\\n'\n expected = DataFrame([[\"test \\x1a test\", \" test\"]], columns=[\"c1\", \"c2\"])\n path = f\"__{uuid.uuid4()}__.csv\"\n\n with tm.ensure_clean(path) as path:\n with open(path, \"wb\") as f:\n f.write(data)\n\n result = parser.read_csv(path)\n tm.assert_frame_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 96, "n_words": 41, "vocab_size": 33, "complexity": 1, "nloc": 10, "token_counts": 78, "n_ast_nodes": 156, "n_identifiers": 18, "random_cut": "def test_internal_eof_byte_to_file(all_parsers):\n # see gh-16559\n parser = all_parsers\n data = b'c1,c2\\r\\n\"test \\x1a test\", test\\r\\n'\n expected = DataFrame([[\"test \\x1a test\", \" test\"]], columns=[\"c1\", \"c2\"])\n path = f\"__{uuid.uuid4()}__.csv\"\n\n with tm.ensure_clean(path) as path:\n with open(path, \"wb\") as f:\n f.write(data)\n\n result = parser.read_csv(path)\n tm.a" }, { "id": 211122, "commit_id": "06c8cf7e5a75be43c51323a6c21e21af291e5728", "repo": "PaddleDetection", "path": "ppdet/metrics/metrics.py", "file_name": "metrics.py", "fun_name": "accumulate", "commit_message": "fix voc save_result in infer (#6547)", "code": "def accumulate(self):\n output = \"bbox.json\"\n if self.output_eval:\n output = os.path.join(self.output_eval, output)\n with open(output, 'w') as f:\n json.dump(self.results, f)\n logger.info('The bbox result is saved to bbox.json.')\n if self.save_prediction_only:\n return\n\n logger.info(\"Accumulating evaluatation results...\")\n self.detection_map.accumulate()\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 129, "n_words": 32, "vocab_size": 29, "complexity": 3, "nloc": 11, "token_counts": 72, "n_ast_nodes": 126, "n_identifiers": 16, "random_cut": "def accumulate(self):\n output = \"bbox.json\"\n if se" }, { "id": 137609, "commit_id": "c03c4c67182d7f79d36465f2ffbef88b2b958afb", "repo": "ray", "path": "python/ray/tune/tests/test_searchers.py", "file_name": "test_searchers.py", "fun_name": "assertCorrectExperimentOutput", "commit_message": "[Tune] Fix `AxSearch` search space conversion for fixed list hyperparameters (#31088)\n\nHandles lists of hyperparameters correctly so that the search space is passed into Ax and back to Tune in the correct format (lists stay as lists rather than getting converted to a dict).\r\n\r\nSigned-off-by: Justin Yu ", "code": "def assertCorrectExperimentOutput(self, analysis):\n best_trial = analysis.best_trial\n self.assertLessEqual(best_trial.config[\"report\"], 2.0)\n # Make sure that constant parameters aren't lost\n # Hyperopt converts lists to tuples, so check for either\n self.assertIn(best_trial.config[\"list\"], ([1, 2, 3], (1, 2, 3)))\n self.assertEqual(best_trial.config[\"num\"], 4)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 76, "n_words": 35, "vocab_size": 33, "complexity": 1, "nloc": 5, "token_counts": 69, "n_ast_nodes": 103, "n_identifiers": 8, "random_cut": "def assertCorrectExperimentOutput(self, analysis):\n best_trial = analysis.best_trial\n self.assertLessEqual(best_trial.config[\"report\"], 2.0)\n # Make sure that constant parameters aren't lo" }, { "id": 61014, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/req/req_install.py", "file_name": "req_install.py", "fun_name": "is_wheel", "commit_message": "upd; format", "code": "def is_wheel(self):\n # type: () -> bool\n if not self.link:\n return False\n return self.link.is_wheel\n\n # Things valid for sdists", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 53, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 4, "token_counts": 19, "n_ast_nodes": 33, "n_identifiers": 3, "random_cut": "def is_wheel(self):\n # type: () -> bool\n if not self.link:\n return False\n " }, { "id": 311256, "commit_id": "70321ed795086f385dd521f04156a26ff0948ffd", "repo": "core", "path": "tests/components/tradfri/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "test_battery_sensor", "commit_message": "Add battery sensor for Tradfri blinds (#65067)", "code": "async def test_battery_sensor(hass, mock_gateway, mock_api_factory):\n \n mock_gateway.mock_devices.append(\n mock_sensor(test_state=[{\"attribute\": \"battery_level\", \"value\": 60}])\n )\n await setup_integration(hass)\n\n sensor_1 = hass.states.get(\"sensor.tradfri_sensor_0\")\n assert sensor_1 is not None\n assert sensor_1.state == \"60\"\n assert sensor_1.attributes[\"unit_of_measurement\"] == \"%\"\n assert sensor_1.attributes[\"device_class\"] == \"battery\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 67, "n_words": 33, "vocab_size": 27, "complexity": 1, "nloc": 10, "token_counts": 77, "n_ast_nodes": 138, "n_identifiers": 14, "random_cut": "async def test_battery_sensor(hass, mock_gateway, mock_api_factory):\n \n mock_gateway.mock_devices.append(\n mock_sensor(test_state=[{\"attribute\": \"battery_level\", \"value\": 60}])\n )\n await setup_integration(hass)\n\n sensor_1 = hass.states.get(\"sensor.tradfri_sensor_0\")\n assert sensor_1 is not None\n assert sensor_1.state == \"60\"\n assert sensor_1.attributes[\"unit_of_measurement\"] == \"%\"\n assert sensor_1.attribute" }, { "id": 77147, "commit_id": "03fc621129e6256a42c0c0d118b5312792825a44", "repo": "wagtail", "path": "wagtail/snippets/views/snippets.py", "file_name": "snippets.py", "fun_name": "_run_before_hooks", "commit_message": "Refactor Snippets edit view to extend from generic EditView (#8344)", "code": "def _run_before_hooks(self):\n for fn in hooks.get_hooks(\"before_edit_snippet\"):\n result = fn(self.request, self.object)\n if hasattr(result, \"status_code\"):\n return result\n return None\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 67, "n_words": 17, "vocab_size": 15, "complexity": 3, "nloc": 6, "token_counts": 39, "n_ast_nodes": 64, "n_identifiers": 9, "random_cut": "def _run_before_hooks(self):\n for fn " }, { "id": 222840, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/cygwinccompiler.py", "file_name": "cygwinccompiler.py", "fun_name": "object_filenames", "commit_message": "add python 3.10.4 for windows", "code": "def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):\n \n if output_dir is None:\n output_dir = ''\n obj_names = []\n for src_name in source_filenames:\n # use normcase to make sure '.rc' is really '.rc' and not '.RC'\n base, ext = os.path.splitext(os.path.normcase(src_name))\n if ext not in (self.src_extensions + ['.rc','.res']):\n raise UnknownFileError(\"unknown file type '%s' (from '%s')\" % \\\n (ext, src_name))\n if strip_dir:\n base = os.path.basename (base)\n if ext in ('.res', '.rc'):\n # these need to be compiled to object files\n obj_names.append (os.path.join(output_dir,\n base + ext + self.obj_extension))\n else:\n obj_names.append (os.path.join(output_dir,\n base + self.obj_extension))\n return obj_names\n\n# the same as cygwin plus some additional parameters", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 396, "n_words": 98, "vocab_size": 70, "complexity": 6, "nloc": 18, "token_counts": 146, "n_ast_nodes": 240, "n_identifiers": 19, "random_cut": "def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):\n \n if output_dir is None:\n output_dir = ''\n obj_names = []\n for src_name in source_filenames:\n # use normcase t" }, { "id": 272077, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/feature_column/dense_features_v2.py", "file_name": "dense_features_v2.py", "fun_name": "build", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def build(self, _):\n for column in self._feature_columns:\n with tf.name_scope(column.name):\n column.create_state(self._state_manager)\n # We would like to call Layer.build and not _DenseFeaturesHelper.build.\n # pylint: disable=protected-access\n super(kfc._BaseFeaturesLayer, self).build(\n None\n ) # pylint: disable=bad-super-call\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 102, "n_words": 30, "vocab_size": 27, "complexity": 2, "nloc": 7, "token_counts": 45, "n_ast_nodes": 75, "n_identifiers": 13, "random_cut": "def build(self, _):\n " }, { "id": 14350, "commit_id": "594effa279668bd955e98f1cd5c036b37d3bbd40", "repo": "pydantic", "path": "pydantic/fields.py", "file_name": "fields.py", "fun_name": "get_default", "commit_message": "Switching to `pydantic_core` (#4516)\n\n* working on core schema generation\r\n\r\n* adapting main.py\r\n\r\n* getting tests to run\r\n\r\n* fix tests\r\n\r\n* disable pyright, fix mypy\r\n\r\n* moving to class-based model generation\r\n\r\n* working on validators\r\n\r\n* change how models are created\r\n\r\n* start fixing test_main.py\r\n\r\n* fixing mypy\r\n\r\n* SelfType\r\n\r\n* recursive models working, more tests fixed\r\n\r\n* fix tests on <3.10\r\n\r\n* get docs build to pass\r\n\r\n* starting to cleanup types.py\r\n\r\n* starting works on custom types\r\n\r\n* working on using annotated-types\r\n\r\n* using annoated types for constraints\r\n\r\n* lots of cleanup, fixing network tests\r\n\r\n* network tests passing :tada:\r\n\r\n* working on types\r\n\r\n* working on types and cleanup\r\n\r\n* fixing UUID type, restructing again\r\n\r\n* more types and newer pydantic-core\r\n\r\n* working on Iterable\r\n\r\n* more test_types tests\r\n\r\n* support newer pydantic-core, fixing more test_types.py\r\n\r\n* working through more test_types.py\r\n\r\n* test_types.py at last passing locally :tada:\r\n\r\n* fixing more tests in test_types.py\r\n\r\n* fix datetime_parse tests and linting\r\n\r\n* get tests running again, rename to test_datetime.py\r\n\r\n* renaming internal modules\r\n\r\n* working through mypy errors\r\n\r\n* fixing mypy\r\n\r\n* refactoring _generate_schema.py\r\n\r\n* test_main.py passing\r\n\r\n* uprev deps\r\n\r\n* fix conftest and linting?\r\n\r\n* importing Annotated\r\n\r\n* ltining\r\n\r\n* import Annotated from typing_extensions\r\n\r\n* fixing 3.7 compatibility\r\n\r\n* fixing tests on 3.9\r\n\r\n* fix linting\r\n\r\n* fixing SecretField and 3.9 tests\r\n\r\n* customising get_type_hints\r\n\r\n* ignore warnings on 3.11\r\n\r\n* spliting repr out of utils\r\n\r\n* removing unused bits of _repr, fix tests for 3.7\r\n\r\n* more cleanup, removing many type aliases\r\n\r\n* clean up repr\r\n\r\n* support namedtuples and typeddicts\r\n\r\n* test is_union\r\n\r\n* removing errors, uprev pydantic-core\r\n\r\n* fix tests on 3.8\r\n\r\n* fixing private attributes and model_post_init\r\n\r\n* renaming and cleanup\r\n\r\n* remove unnecessary PydanticMetadata inheritance\r\n\r\n* fixing forward refs and mypy tests\r\n\r\n* fix signatures, change how xfail works\r\n\r\n* revert mypy tests to 3.7 syntax\r\n\r\n* correct model title\r\n\r\n* try to fix tests\r\n\r\n* fixing ClassVar forward refs\r\n\r\n* uprev pydantic-core, new error format\r\n\r\n* add \"force\" argument to model_rebuild\r\n\r\n* Apply suggestions from code review\r\n\r\nSuggestions from @tiangolo and @hramezani :pray:\r\n\r\nCo-authored-by: Hasan Ramezani \r\nCo-authored-by: Sebastián Ramírez \r\n\r\n* more suggestions from @tiangolo\r\n\r\n* extra -> json_schema_extra on Field\r\n\r\nCo-authored-by: Hasan Ramezani \r\nCo-authored-by: Sebastián Ramírez ", "code": "def get_default(self) -> Any:\n # we don't want to call default_factory as it may have side effects, so we default to None as the\n # least-worse alternative\n return _utils.smart_deepcopy(self.default) if self.default_factory is None else None\n", "url": "https://github.com/pydantic/pydantic.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 35, "vocab_size": 29, "complexity": 2, "nloc": 2, "token_counts": 24, "n_ast_nodes": 39, "n_identifiers": 7, "random_cut": "def get_default(self) -> Any:\n # we don't want to call default_factory as it may have side effects, so we default to None as the\n # least-worse alternative\n return _utils.smart_deepco" }, { "id": 277791, "commit_id": "f3cafc77c269f7ecbf80bb4cf4b54e28c153f4e6", "repo": "keras", "path": "keras/callbacks.py", "file_name": "callbacks.py", "fun_name": "_get_most_recently_modified_file_matching_pattern", "commit_message": "resolve line-too-long in root directory", "code": "def _get_most_recently_modified_file_matching_pattern(self, pattern):\n \n dir_name = os.path.dirname(pattern)\n base_name = os.path.basename(pattern)\n base_name_regex = \"^\" + re.sub(r\"{.*}\", r\".*\", base_name) + \"$\"\n\n # If tf.train.latest_checkpoint tells us there exists a latest\n # checkpoint, use that as it is more robust than `os.path.getmtime()`.\n latest_tf_checkpoint = tf.train.latest_checkpoint(dir_name)\n if latest_tf_checkpoint is not None and re.match(\n base_name_regex, os.path.basename(latest_tf_checkpoint)\n ):\n return latest_tf_checkpoint\n\n latest_mod_time = 0\n file_path_with_latest_mod_time = None\n n_file_with_latest_mod_time = 0\n file_path_with_largest_file_name = None\n\n if tf.io.gfile.exists(dir_name):\n for file_name in os.listdir(dir_name):\n # Only consider if `file_name` matches the pattern.\n if re.match(base_name_regex, file_name):\n file_path = os.path.join(dir_name, file_name)\n mod_time = os.path.getmtime(file_path)\n if (\n file_path_with_largest_file_name is None\n or file_path > file_path_with_largest_file_name\n ):\n file_path_with_largest_file_name = file_path\n if mod_time > latest_mod_time:\n latest_mod_time = mod_time\n file_path_with_latest_mod_time = file_path\n # In the case a file with later modified time is found,\n # reset the counter for the number of files with latest\n # modified time.\n n_file_with_latest_mod_time = 1\n elif mod_time == latest_mod_time:\n # In the case a file has modified time tied with the\n # most recent, increment the counter for the number of\n # files with latest modified time by 1.\n n_file_with_latest_mod_time += 1\n\n if n_file_with_latest_mod_time == 1:\n # Return the sole file that has most recent modified time.\n return file_path_with_latest_mod_time\n else:\n # If there are more than one file having latest modified time,\n # return the file path with the largest file name.\n return file_path_with_largest_file_name\n\n\n@keras_export(\"keras.callbacks.BackupAndRestore\", v1=[])", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.callbacks.BackupAndRestore\", v1=[])", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 864, "n_words": 222, "vocab_size": 111, "complexity": 11, "nloc": 33, "token_counts": 193, "n_ast_nodes": 339, "n_identifiers": 32, "random_cut": "def _get_most_recently_modified_file_matching_pattern(self, pattern):\n \n dir_name = os.path.dirname(pattern)\n base_name = os.path.basename(pattern)\n base_name_regex = \"^\" + re.sub(r\"{.*}\", r\".*\", base_name) + \"$\"\n\n # If tf.train.latest_checkpoint tells us there exists a latest\n # checkpoint, use that as it is more robust than `os.path.getmtime()`.\n latest_tf_checkpoint = tf.train.latest_checkpoint(dir_name)\n if latest_tf_checkpoint is not None and re.match(\n base_name_regex, os.path.basename(latest_tf_checkpoint)\n ):\n return latest_tf_checkpoint\n\n latest_mod_time = 0\n file_path_with_latest_mod_time = None\n n_file_with_latest_mod_time = 0\n file_path_with_largest_file_name = None\n\n if tf.io.gfile.exists(dir_name):\n for file_name in os.listdir(dir_name):\n # Only consider if `file_name` matches the pattern.\n if re.match(base_name_regex, file_name):\n file_path = os.path.join(dir_name, file_name)\n mod_time = os.path.getmtime(file_path)\n if (\n file_path_with_largest_file_name is None\n or file_path > file_path_with_largest_file_name\n ):\n file_path_with_largest_file_name = file_path\n if mod_time > latest_mod_time:\n latest_mod_time = mod_time\n file_path_with_latest_mod_time = file_path\n # In the case a file with later modified time is found,\n # reset the counter for the number of files with latest\n # modified time.\n n_file_with_latest_mod_time = 1\n elif mod_time == latest_mod_time:\n # In the case a file has modified time tied with the\n # most recent, increment the counter for the number of\n # files with latest modified time by 1.\n n_file_with_latest_mod_time += 1\n\n if n_file_with_latest_mod_time == 1:\n # Return the sole file that has most recent modified time.\n return file_path_with_latest_mod_time\n else:\n # If there are more than one file having latest modified time,\n # return the file path with the largest fil" }, { "id": 41402, "commit_id": "a07ef69882ed76e09a0ed43d6f3ea33780c1b2be", "repo": "seaborn", "path": "seaborn/tests/_core/test_properties.py", "file_name": "test_properties.py", "fun_name": "test_rcparam_default", "commit_message": "Transition mappings->properties, leaving a few loose ends", "code": "def test_rcparam_default(self):\n\n with mpl.rc_context({\"lines.linewidth\": 2}):\n assert self.prop().default_range == (1, 4)\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 27, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 53, "n_identifiers": 6, "random_cut": "def test_rcparam_default(self):\n\n with mpl.rc_context({\"lines.linewidth\": 2}):\n assert self" }, { "id": 281126, "commit_id": "ea964109d654394cc0a5237e6ec5510ba6404097", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py", "file_name": "dd_controller.py", "fun_name": "call_basic", "commit_message": "Crypto menu refactor (#1119)\n\n* enabled some crypto commands in dd to be called independent of source loaded\r\n\r\n* support for coin_map_df in all dd functions + load ta and plot chart refactor\r\n\r\n* updated tests and removed coingecko scrapping where possible\r\n\r\n* removed ref of command from hugo\r\n\r\n* updated pycoingecko version\r\n\r\n* refactoring load\r\n\r\n* refactored load to fetch prices; pred can run independent of source now\r\n\r\n* load by default usd on cp/cg and usdt on cb/bin\r\n\r\n* updated to rich for formatting and updated dependencies\r\n\r\n* fixed changes requested\r\n\r\n* update docs\r\n\r\n* revert discord requirements\r\n\r\n* removed absolute from calculate change for price\r\n\r\n* fixing pr issues\r\n\r\n* fix loading issue when similar coins exist, move coins to home, fill n/a\r\n\r\n* update docs for coins\r\n\r\n* adds load to ta and pred menu", "code": "def call_basic(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"basic\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n if ns_parser:\n coinpaprika_view.display_basic(\n self.coin_map_df[\"CoinPaprika\"],\n ns_parser.export,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 173, "n_words": 24, "vocab_size": 21, "complexity": 2, "nloc": 17, "token_counts": 62, "n_ast_nodes": 98, "n_identifiers": 18, "random_cut": "def call_basic(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"basic\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EX" }, { "id": 94847, "commit_id": "03cbfbf6c5e459657d688055cf9fce074efe039a", "repo": "sentry", "path": "src/sentry/api/serializers/models/auditlogentry.py", "file_name": "auditlogentry.py", "fun_name": "override_actor_id", "commit_message": "fix(auditlog): shorten scim audit log name (#37763)", "code": "def override_actor_id(user):\n # overrides the usage of actor_id only to make SCIM token\n # name more readable (for now)\n scim_prefix = \"scim-internal-integration-\"\n scim_regex = re.compile(\n scim_prefix\n + r\"[0-9a-fA-F]{6}\\-[0-9a-fA-F]{8}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{7}\"\n )\n scim_match = re.match(scim_regex, user.get_display_name())\n return scim_match\n\n\n@register(AuditLogEntry)", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@register(AuditLogEntry)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 69, "n_words": 36, "vocab_size": 31, "complexity": 1, "nloc": 8, "token_counts": 35, "n_ast_nodes": 70, "n_identifiers": 11, "random_cut": "def override_actor_id(user):\n # overrides the usage of actor_id only to make SCIM token\n # name more readable (for now)\n scim_prefix = \"scim-internal-integration-\"\n scim_regex = re.compile(\n scim_prefix\n + r\"[0-9a-fA-F]{6}\\-[0" }, { "id": 192011, "commit_id": "3e4d062cbc2d5f0891452f57bb9adeb8a64f1cce", "repo": "vision", "path": "test/builtin_dataset_mocks.py", "file_name": "builtin_dataset_mocks.py", "fun_name": "_make_detection_anns_folder", "commit_message": "Expand tests for prototype datasets (#5187)\n\n* refactor prototype datasets tests\r\n\r\n* skip tests with insufficient third party dependencies\r\n\r\n* cleanup\r\n\r\n* add tests for SBD prototype dataset\r\n\r\n* add tests for SEMEION prototype dataset\r\n\r\n* add tests for VOC prototype dataset\r\n\r\n* add tests for CelebA prototype dataset\r\n\r\n* add tests for DTD prototype dataset\r\n\r\n* add tests for FER2013 prototype dataset\r\n\r\n* add tests for CLEVR prototype dataset\r\n\r\n* add tests for oxford-iiit-pet prototype dataset\r\n\r\n* enforce tests for new datasets\r\n\r\n* add missing archive generation for oxford-iiit-pet tests\r\n\r\n* add tests for CUB200 prototype datasets\r\n\r\n* fix split generation\r\n\r\n* add capability to mark parametrization and xfail cub200 traverse tests", "code": "def _make_detection_anns_folder(cls, root, name, *, file_name_fn, num_examples):\n folder = root / name\n folder.mkdir(parents=True, exist_ok=True)\n\n for idx in range(num_examples):\n cls._make_detection_ann_file(folder, file_name_fn(idx))\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 51, "n_words": 20, "vocab_size": 20, "complexity": 2, "nloc": 5, "token_counts": 51, "n_ast_nodes": 75, "n_identifiers": 13, "random_cut": "def _make_detection_anns_folder(cls, root, name, *, file_name_fn, num_examples):\n folder = root / name\n folder.mkdir(parents=True, exist_ok=True)\n\n for idx in range(num_examples):\n cls._make_detection_ann_file(folder, file_name_fn(idx))\n" }, { "id": 148006, "commit_id": "51a4a1a80267e231f45905453cf2029c22d370a9", "repo": "ray", "path": "dashboard/modules/actor/actor_head.py", "file_name": "actor_head.py", "fun_name": "get_actors", "commit_message": "[State Observability] Basic functionality for centralized data (#23744)\n\nSupport listing actor/pg/job/node/workers\r\n\r\nDesign doc: https://docs.google.com/document/d/1IeEsJOiurg-zctOcBjY-tQVbsCmURFSnUCTkx_4a7Cw/edit#heading=h.9ub9e6yvu9p2\r\n\r\nNote that this PR doesn't contain any output except ids. I will update them in the follow-up PRs.", "code": "async def get_actors(self, req) -> aiohttp.web.Response:\n data = await self._dashboard_head.gcs_state_aggregator.get_actors()\n return rest_response(\n success=True, message=\"\", result=data, convert_google_style=False\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 48, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 44, "n_ast_nodes": 69, "n_identifiers": 14, "random_cut": "async def get_actors(self, req) -> aiohttp.web.Response:\n data = await self._dashboard_head.gcs_state_aggregator.get" }, { "id": 292555, "commit_id": "a60c37cdb8cc9d0b9bad1dedb92b6068cd9d1244", "repo": "core", "path": "homeassistant/components/samsungtv/config_flow.py", "file_name": "config_flow.py", "fun_name": "_try_connect", "commit_message": "Expose Samsung wrapper as async (#67042)\n\nCo-authored-by: epenet ", "code": "async def _try_connect(self) -> None:\n \n for method in SUPPORTED_METHODS:\n self._bridge = SamsungTVBridge.get_bridge(self.hass, method, self._host)\n result = await self._bridge.async_try_connect()\n if result == RESULT_SUCCESS:\n return\n if result != RESULT_CANNOT_CONNECT:\n raise data_entry_flow.AbortFlow(result)\n LOGGER.debug(\"No working config found\")\n raise data_entry_flow.AbortFlow(RESULT_CANNOT_CONNECT)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 137, "n_words": 35, "vocab_size": 30, "complexity": 4, "nloc": 11, "token_counts": 72, "n_ast_nodes": 119, "n_identifiers": 17, "random_cut": "async def _try_connect(self) -> None:\n \n " }, { "id": 75058, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/image_operations.py", "file_name": "image_operations.py", "fun_name": "get_rect", "commit_message": "Reformat with black", "code": "def get_rect(self):\n \n return Rect(\n -self.offset[0],\n -self.offset[1],\n -self.offset[0] + self.size[0] / self.scale[0],\n -self.offset[1] + self.size[1] / self.scale[1],\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 82, "n_words": 17, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 70, "n_ast_nodes": 104, "n_identifiers": 6, "random_cut": "def get_rect(self):\n \n return Rect(\n -self.offset[0],\n -self.offset[1],\n -self.offset[0] + self.size[0] / self.scale[0],\n -self.offset[1] + self.size[1] / self.scale[1],\n )\n" }, { "id": 34496, "commit_id": "81156d20cd76c1a43ed44fdbc785e237d60b6896", "repo": "transformers", "path": "tests/test_add_new_model_like.py", "file_name": "test_add_new_model_like.py", "fun_name": "test_get_model_files", "commit_message": "Add model like (#14992)\n\n* Add new model like command\r\n\r\n* Bad doc-styler\r\n\r\n* black and doc-styler, stop fighting!\r\n\r\n* black and doc-styler, stop fighting!\r\n\r\n* At last\r\n\r\n* Clean up\r\n\r\n* Typo\r\n\r\n* Bad doc-styler\r\n\r\n* Bad doc-styler\r\n\r\n* All good maybe?\r\n\r\n* Use constants\r\n\r\n* Add doc and type hints\r\n\r\n* More cleaning\r\n\r\n* Add doc\r\n\r\n* Fix Copied from\r\n\r\n* Doc template\r\n\r\n* Use typing.Pattern instead\r\n\r\n* Framework-specific files\r\n\r\n* Fixes\r\n\r\n* Select frameworks clean model init\r\n\r\n* Deal with frameworks in main init\r\n\r\n* fixes\r\n\r\n* Last fix\r\n\r\n* Prompt user for info\r\n\r\n* Delete exemple config\r\n\r\n* Last fixes\r\n\r\n* Add test config\r\n\r\n* Fix bug with model_type included in each other\r\n\r\n* Fixes\r\n\r\n* More fixes\r\n\r\n* More fixes\r\n\r\n* Adapt config\r\n\r\n* Remove print statements\r\n\r\n* Will fix tokenization later, leave it broken for now\r\n\r\n* Add test\r\n\r\n* Quality\r\n\r\n* Try this way\r\n\r\n* Debug\r\n\r\n* Maybe by setting the path?\r\n\r\n* Let's try another way\r\n\r\n* It should go better when actually passing the arg...\r\n\r\n* Remove debug statements and style\r\n\r\n* Fix config\r\n\r\n* Add tests\r\n\r\n* Test require the three backends\r\n\r\n* intermediate commit\r\n\r\n* Revamp pattern replacements and start work on feature extractors\r\n\r\n* Adapt model info\r\n\r\n* Finalize code for processors\r\n\r\n* Fix in main init additions\r\n\r\n* Finish questionnaire for processing classes\r\n\r\n* Fix file name\r\n\r\n* Fix for real\r\n\r\n* Fix patterns\r\n\r\n* Style\r\n\r\n* Remove needless warnings\r\n\r\n* Copied from should work now.\r\n\r\n* Include Copied form in blocks\r\n\r\n* Add test\r\n\r\n* More fixes and tests\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Lysandre Debut \r\n\r\n* Address review comment\r\n\r\nCo-authored-by: Lysandre Debut ", "code": "def test_get_model_files(self):\n # BERT\n bert_files = get_model_files(\"bert\")\n\n doc_file = str(Path(bert_files[\"doc_file\"]).relative_to(REPO_PATH))\n self.assertEqual(doc_file, \"docs/source/model_doc/bert.mdx\")\n\n model_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files[\"model_files\"]}\n self.assertEqual(model_files, BERT_MODEL_FILES)\n\n self.assertEqual(bert_files[\"module_name\"], \"bert\")\n\n test_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files[\"test_files\"]}\n bert_test_files = {\n \"tests/test_tokenization_bert.py\",\n \"tests/test_modeling_bert.py\",\n \"tests/test_modeling_tf_bert.py\",\n \"tests/test_modeling_flax_bert.py\",\n }\n self.assertEqual(test_files, bert_test_files)\n\n # VIT\n vit_files = get_model_files(\"vit\")\n doc_file = str(Path(vit_files[\"doc_file\"]).relative_to(REPO_PATH))\n self.assertEqual(doc_file, \"docs/source/model_doc/vit.mdx\")\n\n model_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files[\"model_files\"]}\n self.assertEqual(model_files, VIT_MODEL_FILES)\n\n self.assertEqual(vit_files[\"module_name\"], \"vit\")\n\n test_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files[\"test_files\"]}\n vit_test_files = {\n \"tests/test_feature_extraction_vit.py\",\n \"tests/test_modeling_vit.py\",\n \"tests/test_modeling_tf_vit.py\",\n \"tests/test_modeling_flax_vit.py\",\n }\n self.assertEqual(test_files, vit_test_files)\n\n # Wav2Vec2\n wav2vec2_files = get_model_files(\"wav2vec2\")\n doc_file = str(Path(wav2vec2_files[\"doc_file\"]).relative_to(REPO_PATH))\n self.assertEqual(doc_file, \"docs/source/model_doc/wav2vec2.mdx\")\n\n model_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files[\"model_files\"]}\n self.assertEqual(model_files, WAV2VEC2_MODEL_FILES)\n\n self.assertEqual(wav2vec2_files[\"module_name\"], \"wav2vec2\")\n\n test_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files[\"test_files\"]}\n wav2vec2_test_files = {\n \"tests/test_feature_extraction_wav2vec2.py\",\n \"tests/test_modeling_wav2vec2.py\",\n \"tests/test_modeling_tf_wav2vec2.py\",\n \"tests/test_modeling_flax_wav2vec2.py\",\n \"tests/test_processor_wav2vec2.py\",\n \"tests/test_tokenization_wav2vec2.py\",\n }\n self.assertEqual(test_files, wav2vec2_test_files)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 502, "n_words": 118, "vocab_size": 66, "complexity": 7, "nloc": 45, "token_counts": 357, "n_ast_nodes": 608, "n_identifiers": 21, "random_cut": "def test_get_model_files(self):\n # BERT\n bert_files = get_model_files(\"bert\")\n\n doc_file = str(Path(bert_files[\"doc_file\"]).relative_to(REPO_PATH))\n self.assertEqual(doc_file, \"docs/source/model_doc/bert.mdx\")\n\n model_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files[\"model_files\"]}\n self.assertEqual(model_files, BERT_MODEL_FILES)\n\n self.assertEqual(bert_files[\"module_name\"], \"bert\")\n\n test_files = {str(Path(f).relative_to(REPO_PATH)) for f in bert_files[\"test_files\"]}\n bert_test_files = {\n \"tests/test_tokenization_bert.py\",\n \"tests/test_modeling_bert.py\",\n \"tests/test_modeling_tf_bert.py\",\n \"tests/test_modeling_flax_bert.py\",\n }\n self.assertEqual(test_files, bert_test_files)\n\n # VIT\n vit_files = get_model_files(\"vit\")\n doc_file = str(Path(vit_files[\"doc_file\"]).relative_to(REPO_PATH))\n self.assertEqual(doc_file, \"docs/source/model_doc/vit.mdx\")\n\n model_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files[\"model_files\"]}\n self.assertEqual(model_files, VIT_MODEL_FILES)\n\n self.assertEqual(vit_files[\"module_name\"], \"vit\")\n\n test_files = {str(Path(f).relative_to(REPO_PATH)) for f in vit_files[\"test_files\"]}\n vit_test_files = {\n \"tests/test_feature_extraction_vit.py\",\n \"tests/test_modeling_vit.py\",\n \"tests/test_modeling_tf_vit.py\",\n \"tests/test_modeling_flax_vit.py\",\n }\n self.assertEqual(test_files, vit_test_files)\n\n # Wav2Vec2\n wav2vec2_files = get_model_files(\"wav2vec2\")\n doc_file = str(Path(wav2vec2_files[\"doc_file\"]).relative_to(REPO_PATH))\n self.assertEqual(doc_file, \"docs/source/model_doc/wav2vec2.mdx\")\n\n model_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files[\"model_files\"]}\n self.assertEqual(model_files, WAV2VEC2_MODEL_FILES)\n\n self.assertEqual(wav2vec2_files[\"module_name\"], \"wav2vec2\")\n\n test_files = {str(Path(f).relative_to(REPO_PATH)) for f in wav2vec2_files[\"test_files\"]}\n wav2vec2_test_files = {\n \"tests/test_feature_extraction_wav2vec2.py\",\n \"tests/test_modeling_wav2vec2.py\",\n \"tests/test_modeling_tf_wav2vec2.py\",\n \"tests/test_modeling_flax_wav2vec2.py\",\n " }, { "id": 10453, "commit_id": "def0a12f6a312b1133d10750226f485b42e319f8", "repo": "jina", "path": "tests/integration/v2_api/test_func_routing.py", "file_name": "test_func_routing.py", "fun_name": "test_target_peapod_with_two_pathways_one_skip", "commit_message": "feat: flow post return DocumentArray (#4137)", "code": "def test_target_peapod_with_two_pathways_one_skip():\n f = (\n Flow(port_expose=1234)\n .add()\n .add(needs=['gateway', 'executor0'])\n .add(name='my_target')\n )\n with f:\n results = Client(port=1234).post(\n on='/search',\n inputs=Document(),\n return_results=True,\n target_peapod='my_target',\n )\n assert len(results[0].data.docs) == 1\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 126, "n_words": 25, "vocab_size": 23, "complexity": 1, "nloc": 15, "token_counts": 83, "n_ast_nodes": 140, "n_identifiers": 19, "random_cut": "def test_target_peapod_with_two_pathways_one_skip():\n f = (\n Flow(port_expose=1234)\n .add()\n .add(needs=['gateway', 'executor0'])\n .add(name='my_tar" }, { "id": 129532, "commit_id": "2010f131756770cbea68d993c13a8bf88c52e381", "repo": "ray", "path": "dashboard/tests/test_dashboard.py", "file_name": "test_dashboard.py", "fun_name": "test_get_cluster_status", "commit_message": "Fix dashboard test bug (#21742)\n\nCurrently `wait_until_succeeded_without_exception` is used in the dashboard, and it returns True/False. Unfortunately, there are lots of code that doesn't assert on this method (which means things are not actually tested).", "code": "def test_get_cluster_status(ray_start_with_dashboard):\n assert (wait_until_server_available(ray_start_with_dashboard[\"webui_url\"])\n is True)\n address_info = ray_start_with_dashboard\n webui_url = address_info[\"webui_url\"]\n webui_url = format_web_url(webui_url)\n\n # Check that the cluster_status endpoint works without the underlying data\n # from the GCS, but returns nothing.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 61, "n_words": 33, "vocab_size": 27, "complexity": 1, "nloc": 24, "token_counts": 184, "n_ast_nodes": 57, "n_identifiers": 6, "random_cut": "def test_get_cluster_status(ray_start_with_dashboard):\n assert (wait_until_server_available(ray_start_with_dashboard[\"webui_url\"])\n is True)\n address_info = ray_start_with_dashboard\n we" }, { "id": 7526, "commit_id": "363b080127063c4de191407610c139d84a85f80b", "repo": "ludwig", "path": "tests/ludwig/utils/test_dataframe_utils.py", "file_name": "test_dataframe_utils.py", "fun_name": "test_to_numpy_dataset_empty", "commit_message": "Fix: Random dataset splitting with 0.0 probability for optional validation or test sets. (#2382)\n\n* Fix: Random splitting with 0 probability\n\n* Add warning about the potential cost of reading remote parquet files.\n\n* Merge conditions for checking whether to create validation/test datasets.\n\n* Use len() dataset to determine if the dataset is empty.\n\n* Remove obsolete comment.\n\n* Check len(df.index) != 0 directly instead of try/except.\n\n* Try using old pinned version of ray.\n\n* Add warning for when validation/test datasets are empty.\n\n* Revert pytest.yml changes.", "code": "def test_to_numpy_dataset_empty():\n pd_df = pd.DataFrame()\n\n np_df = to_numpy_dataset(pd_df, backend=LOCAL_BACKEND)\n\n assert np_df == {}\n\n\n@pytest.mark.distributed", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@pytest.mark.distributed", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 21, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 26, "n_ast_nodes": 53, "n_identifiers": 11, "random_cut": "def test_to_numpy_dataset_empty():\n " }, { "id": 65018, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/purchase_invoice/purchase_invoice.py", "file_name": "purchase_invoice.py", "fun_name": "change_release_date", "commit_message": "style: format code with black", "code": "def change_release_date(name, release_date=None):\n\tif frappe.db.exists(\"Purchase Invoice\", name):\n\t\tpi = frappe.get_doc(\"Purchase Invoice\", name)\n\t\tpi.db_set(\"release_date\", release_date)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 10, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 4, "token_counts": 39, "n_ast_nodes": 76, "n_identifiers": 10, "random_cut": "def change_release_date(name, release_date=None):\n\tif frappe.db.exists(\"Purchase Invoice\", name):\n\t\tpi = frappe.get_doc(\"Purchase Invoice\", name)\n\t\tpi.db_set(\"rel" }, { "id": 241568, "commit_id": "650c710efacd633fa283955145342bb64063c883", "repo": "lightning", "path": "tests/strategies/test_deepspeed_strategy.py", "file_name": "test_deepspeed_strategy.py", "fun_name": "test_deepspeed_defaults", "commit_message": "Rename training plugin test files & names to strategy (#11303)", "code": "def test_deepspeed_defaults(tmpdir):\n \n strategy = DeepSpeedStrategy()\n assert strategy.config is not None\n assert isinstance(strategy.config[\"zero_optimization\"], dict)\n\n\n@RunIf(min_gpus=1, deepspeed=True, standalone=True)", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@RunIf(min_gpus=1, deepspeed=True, standalone=True)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 27, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 73, "n_identifiers": 11, "random_cut": "def test_deepspeed_defaults(tmpdir):\n \n strategy = DeepSpeedStrategy()\n assert strategy.config is not None\n assert isinstance(strategy.con" }, { "id": 201058, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/annotations/tests.py", "file_name": "tests.py", "fun_name": "test_alias_annotation_expression", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_alias_annotation_expression(self):\n qs = Book.objects.alias(\n is_book_alias=Value(1),\n ).annotate(is_book=Coalesce(\"is_book_alias\", 0))\n self.assertIs(hasattr(qs.first(), \"is_book_alias\"), False)\n for book in qs:\n with self.subTest(book=book):\n self.assertEqual(book.is_book, 1)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 83, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 8, "token_counts": 75, "n_ast_nodes": 122, "n_identifiers": 17, "random_cut": "def test_alias_annotation_expression(self):\n qs = Book.objects.alias(\n " }, { "id": 41906, "commit_id": "2b9f85b71d1dd48e9aaa61d7f3a2b8109b80044d", "repo": "seaborn", "path": "tests/_core/test_plot.py", "file_name": "test_plot.py", "fun_name": "test_anonymous_title", "commit_message": "Remove private attribute access in compound legend (#2878)", "code": "def test_anonymous_title(self, xy):\n\n p = Plot(**xy, color=[\"a\", \"b\", \"c\", \"d\"]).add(MockMark()).plot()\n legend, = p._figure.legends\n assert legend.get_title().get_text() == \"\"\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 37, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 57, "n_ast_nodes": 99, "n_identifiers": 14, "random_cut": "def test_anonymous_title(self, xy):\n" }, { "id": 338359, "commit_id": "074d8d5a5a131501cc0a2a690cc14337395dcb09", "repo": "accelerate", "path": "src/accelerate/test_utils/scripts/test_distributed_data_loop.py", "file_name": "test_distributed_data_loop.py", "fun_name": "test_can_join_uneven_inputs", "commit_message": "Add `join_uneven_inputs` context manager to Accelerator (#820)\n\n* Add test for join context manager\r\n\r\n* Add join_uneven_inputs context manager\r\n\r\n* Format\r\n\r\n* add conditional import for join\r\n\r\n* Replace bare yield with nullcontext\r\n\r\n* Update accelerator to maintain references to dataloaders\r\n\r\n* add override option to join context manager\r\n\r\n* format\r\n\r\n* Add minimal docstring\r\n\r\n* updates based on initial feedback\r\n\r\n* remove launcher used for local testing from test script\r\n\r\n* fix quality issues\r\n\r\n* DEBUG: try resetting accelerator state to fix test\r\n\r\n* Revert \"DEBUG: try resetting accelerator state to fix test\"\r\n\r\nThis reverts commit a13a56ea8e084cad72317cd451a176a2d3fa5dff.\r\n\r\n* Reset state after accelerator tests\r\n\r\n* Update src/accelerate/accelerator.py\r\n\r\nCo-authored-by: Zachary Mueller \r\n\r\n* Warn if at least one iterable dataset seen\r\n\r\n* remove launcher used for local test running\r\n\r\nCo-authored-by: Zachary Mueller ", "code": "def test_can_join_uneven_inputs():\n accelerator = create_accelerator(even_batches=False)\n\n model = torch.nn.Linear(1, 1)\n ddp_model = accelerator.prepare(model)\n\n dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\n\n batch_idxs = []\n with accelerator.join_uneven_inputs([ddp_model]):\n for batch_idx, batch in enumerate(dl):\n output = ddp_model(batch[0].float())\n loss = output.sum()\n loss.backward()\n batch_idxs.append(batch_idx)\n\n accelerator.wait_for_everyone()\n\n if accelerator.process_index == 0:\n assert batch_idxs == [0, 1]\n elif accelerator.process_index == 1:\n assert batch_idxs == [0]\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 144, "n_words": 53, "vocab_size": 40, "complexity": 4, "nloc": 17, "token_counts": 134, "n_ast_nodes": 215, "n_identifiers": 27, "random_cut": "def test_can_join_uneven_inputs():\n accelerator = create_accelerator(even_batches=False)\n\n model = torch.nn.Linear(1, 1)\n ddp_model = accelerator.prepare(model)\n\n dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)\n\n batch_idxs = []\n with accelerator.join_uneven_inputs([ddp_model]):\n for" }, { "id": 97752, "commit_id": "d65d6f52729c01f0108f4e077630409fab8844e2", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_include_series", "commit_message": "ref(metrics): Update metrics names with enums in tests (#33201)\n\nReplaces metric names with enums as soon once we\r\nintroduce naming layer, these tests will probably\r\nbreak otherwise", "code": "def test_include_series(self):\n indexer.record(self.organization.id, \"session.status\")\n self.store_session(self.build_session(project_id=self.project.id, started=time.time() - 60))\n response = self.get_success_response(\n self.organization.slug,\n field=f\"sum({SessionMetricKey.SESSION.value})\",\n statsPeriod=\"1h\",\n interval=\"1h\",\n includeTotals=\"0\",\n )\n\n assert response.data[\"groups\"] == [\n {\"by\": {}, \"series\": {f\"sum({SessionMetricKey.SESSION.value})\": [1.0]}}\n ]\n\n response = self.get_success_response(\n self.organization.slug,\n field=f\"sum({SessionMetricKey.SESSION.value})\",\n statsPeriod=\"1h\",\n interval=\"1h\",\n includeSeries=\"0\",\n includeTotals=\"0\",\n )\n\n assert response.data[\"groups\"] == []\n\n\n@freeze_time((timezone.now() - timedelta(days=2)).replace(hour=3, minute=26))", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@freeze_time((timezone.now() - timedelta(days=2)).replace(hour=3, minute=26))", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 238, "n_words": 45, "vocab_size": 32, "complexity": 1, "nloc": 22, "token_counts": 147, "n_ast_nodes": 313, "n_identifiers": 32, "random_cut": "def test_include_series(self):\n indexer.record(self.organization.id, \"session.status\")\n self.store_session(self.build_session(project_id=self.project.id, started=time.time() - 60))\n response = self.get_success_response(\n self.organization.slug,\n field=f\"sum({SessionMetricKey.SESSION.value})\",\n statsPeriod=\"1h\",\n interval=\"1h\",\n includeTotals=\"0\",\n )\n\n assert response.data[\"groups\"] == [\n {\"by\": {}, \"series\": {f\"sum({SessionMetricKey.SESSION.value})\": [1.0]}}\n ]\n\n response = self.get_success_response(\n self.organization.slug,\n field=f\"sum({SessionMetric" }, { "id": 154843, "commit_id": "d86dda5094eba47840f42a21cf4b2c953e698960", "repo": "modin", "path": "modin/pandas/test/test_series.py", "file_name": "test_series.py", "fun_name": "test_str_islower", "commit_message": "TEST-#5040: Rework test_series using eval_general() (#5041)\n\nSigned-off-by: Vasily Litvinov ", "code": "def test_str_islower(data):\n modin_series, pandas_series = create_test_series(data)\n eval_general(modin_series, pandas_series, lambda series: series.str.islower())\n\n\n@pytest.mark.parametrize(\"data\", test_string_data_values, ids=test_string_data_keys)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"data\", test_string_data_values, ids=test_string_data_keys)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 18, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 30, "n_ast_nodes": 72, "n_identifiers": 15, "random_cut": "def test_str_islower(data):\n modin_series, pandas_series = create_test_series(data)\n eval_general(modin_series, pandas_series, lambda series: series.str.islower())\n\n\n@pytest.mark.parametrize(\"data\", test_string_data_values, ids=t" }, { "id": 152598, "commit_id": "435fd2112aee9a0e61408ac56663e41beea1e446", "repo": "stable-diffusion-webui", "path": "modules/swinir_model.py", "file_name": "swinir_model.py", "fun_name": "load_model", "commit_message": "Fixes, cleanup.", "code": "def load_model(self, path, scale=4):\n if \"http\" in path:\n dl_name = \"%s%s\" % (self.model_name.replace(\" \", \"_\"), \".pth\")\n filename = load_file_from_url(url=path, model_dir=self.model_path, file_name=dl_name, progress=True)\n else:\n filename = path\n if filename is None or not os.path.exists(filename):\n return None\n model = net(\n upscale=scale,\n in_chans=3,\n img_size=64,\n window_size=8,\n img_range=1.0,\n depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],\n embed_dim=240,\n num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],\n mlp_ratio=2,\n upsampler=\"nearest+conv\",\n resi_connection=\"3conv\",\n )\n\n pretrained_model = torch.load(filename)\n model.load_state_dict(pretrained_model[\"params_ema\"], strict=True)\n if not cmd_opts.no_half:\n model = model.half()\n return model\n\n", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 317, "n_words": 79, "vocab_size": 53, "complexity": 5, "nloc": 26, "token_counts": 201, "n_ast_nodes": 292, "n_identifiers": 37, "random_cut": "def load_model(self, path, scale=4):\n if \"http\" in path:\n dl_name = \"%s%s\" % (self.model_name.replace(\" \", \"_\"), \".pth\")\n filename = load_file_from_url(url=path, model_dir=self.model_path, file_name=dl_name, progress=True)\n else:\n filename = path\n if filename is None or not os.path.exists(filename):\n return None\n model = net(\n upscale=scale,\n in_chans=3,\n img_size=64,\n window_size=8,\n img_range=1.0,\n depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],\n embed_dim=240,\n num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],\n mlp_ratio=2,\n upsampler=\"nearest+conv\",\n resi_connection=\"3conv\",\n )\n\n pretrained_model = torch.load(filename)\n model.load_state_dict(pretrained_model[\"params_ema\"], strict=True)\n if not cmd_opts.no_half:\n model = model.half()\n return model\n\n" }, { "id": 10189, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "tests/unit/flow-orchestrate/test_flow_before_after.py", "file_name": "test_flow_before_after.py", "fun_name": "test_flow", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def test_flow(protocol):\n docs = random_docs(10)\n f = Flow(protocol=protocol).add(name='p1')\n\n with f:\n f.index(docs)\n assert f.num_pods == 2\n assert f._pod_nodes['p1'].num_peas == 2\n assert f.num_peas == 3\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 23, "vocab_size": 17, "complexity": 1, "nloc": 8, "token_counts": 58, "n_ast_nodes": 98, "n_identifiers": 12, "random_cut": "def test_flow(protocol):\n docs = random_docs(10)\n " }, { "id": 260916, "commit_id": "c18460f78441f11b3e6c15c12238695fcfe3c872", "repo": "scikit-learn", "path": "sklearn/ensemble/_stacking.py", "file_name": "_stacking.py", "fun_name": "predict", "commit_message": "EHN Add multilabel classification support for `StackingClassifier` (#24146)\n\n* Add stacking multilabel functionality\n\n* Add underscore to a class attr\n\n* Remove model from base estimator in test_stacking\n\n* Remove scale in train/test split in test_stacking_classifier_multilabel\n\n* Add stack_method as a test parameter, change RandomForestClassifier to KNeighborsClassifier in test\n\n* Update Changelog\n\n* fix doc typos\n\n* predict_proba output will be concatenate this list in an array of shape n_samples, n_outputs * n_classes - 1. Update test.\n\n* Update sklearn/ensemble/_stacking.py\n\nCo-authored-by: Guillaume Lemaitre \n\n* Update doc/whats_new/v1.0.rst\n\nCo-authored-by: Guillaume Lemaitre \n\n* update whats_new\n\n* add passthrough test\n\n* update whats_new with current PR\n\n* Apply suggestions from code review\n\nCo-authored-by: Julien Jerphanion \n\n* update tests\n\n* Apply suggestion to update comments on `concatenate`\n\nCo-authored-by: Julien Jerphanion \n\n* parametrized the two tests into one\n\n* parametrized the two tests into one\n\n* strip the mysterious trailing _r\n\n* fix multilabel list scenario\n\n* add Guillaume's recommendations\n\n* add test for\n\n* some fix\n\n* split tests\n\n* fix flake8\n\n* add suggestions\n\n* Trigger CI\n\n* remove multiclass-multioutput from comments and docstrings\n\nCo-authored-by: Nicolas \nCo-authored-by: Nestor Navarro \nCo-authored-by: Nestor Navarro \nCo-authored-by: Guillaume Lemaitre \nCo-authored-by: Julien Jerphanion ", "code": "def predict(self, X, **predict_params):\n \n y_pred = super().predict(X, **predict_params)\n if isinstance(self._label_encoder, list):\n # Handle the multilabel-indicator case\n y_pred = np.array(\n [\n self._label_encoder[target_idx].inverse_transform(target)\n for target_idx, target in enumerate(y_pred.T)\n ]\n ).T\n else:\n y_pred = self._label_encoder.inverse_transform(y_pred)\n return y_pred\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 181, "n_words": 34, "vocab_size": 29, "complexity": 3, "nloc": 12, "token_counts": 81, "n_ast_nodes": 129, "n_identifiers": 16, "random_cut": "def predict(self, X, **predict_params):\n \n y_pred = super().predict(X, **predict_params)\n if isinstance(self._label_encoder, list):\n # Handle the multilabel-indicator case\n y_pred = np.array(\n [\n self._label_encoder[target_idx].inverse_transform(target)\n " }, { "id": 72782, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/api/v2/tests/test_pages.py", "file_name": "test_pages.py", "fun_name": "test_descendant_of_when_filtering_by_child_of_gives_error", "commit_message": "Reformat with black", "code": "def test_descendant_of_when_filtering_by_child_of_gives_error(self):\n response = self.get_response(descendant_of=6, child_of=5)\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n content,\n {\"message\": \"filtering by descendant_of with child_of is not supported\"},\n )\n\n # ORDERING\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 84, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 57, "n_ast_nodes": 95, "n_identifiers": 12, "random_cut": "def test_descendant_of_when_filtering_by_child_of_gives_error(self):\n response = self.get_response(descendant_of=6, child_of=5)\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n content,\n {\"message\": \"filtering by descendant_of with child_of is not supported\"},\n )\n\n # ORDERING\n" }, { "id": 72620, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/widgets/button.py", "file_name": "button.py", "fun_name": "render", "commit_message": "Reformat with black", "code": "def render(self):\n attrs = {\n \"href\": self.url,\n \"class\": \" \".join(sorted(self.classes)),\n \"title\": self.label,\n }\n attrs.update(self.attrs)\n return format_html(\"{}
    \", flatatt(attrs), self.label)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 78, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 57, "n_ast_nodes": 96, "n_identifiers": 11, "random_cut": "def render(self):\n attrs = {\n \"href\": self.url,\n \"class\": \" \".join(sorted(self.classes)),\n \"title\": self.label,\n }\n attrs.update(self.attrs)\n return f" }, { "id": 201868, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/basic/tests.py", "file_name": "tests.py", "fun_name": "test_does_not_exist", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_does_not_exist(self):\n # Django raises an Article.DoesNotExist exception for get() if the\n # parameters don't match any object.\n with self.assertRaisesMessage(\n ObjectDoesNotExist, \"Article matching query does not exist.\"\n ):\n Article.objects.get(\n id__exact=2000,\n )\n # To avoid dict-ordering related errors check only one lookup\n # in single assert.\n with self.assertRaises(ObjectDoesNotExist):\n Article.objects.get(pub_date__year=2005, pub_date__month=8)\n with self.assertRaisesMessage(\n ObjectDoesNotExist, \"Article matching query does not exist.\"\n ):\n Article.objects.get(\n pub_date__week_day=6,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 231, "n_words": 62, "vocab_size": 46, "complexity": 1, "nloc": 15, "token_counts": 69, "n_ast_nodes": 121, "n_identifiers": 12, "random_cut": "def test_does_not_exist(self):\n # Django raises an Article.DoesNotExist exception for get() if the\n # parameters don't match any object.\n with self.assertRaisesMessage(\n ObjectDoesNotExist, \"Article matching query does not exist.\"\n ):\n Article.objects.get(\n id__exact=2000,\n )\n # To avoid dict-ordering related errors check only one lookup\n # in single assert.\n with self.assertRaises(ObjectDoesNotExist):\n Article.objects.get(pub_date__year=2005, pub_date__month=8)\n with self.assertRaisesMessage(\n ObjectDoesNotExist, \"Article matching query does not exist.\"\n ):\n " }, { "id": 101299, "commit_id": "2beceffad9b15c1fd78f06b9b272563321c5a41e", "repo": "faceswap", "path": "scripts/extract.py", "file_name": "extract.py", "fun_name": "_set_skip_list", "commit_message": "Data Augmentation update (#1263)\n\n- lib.detected_face\r\n - Subclass Masks for Landmark based masks\r\n - Add training mask propery + methods to DetectedFace\r\n - lib.training_training\r\n - subclass TrainingDataGenerator for training and preview data\r\n - Split cache into own module\r\n - Reduce thread count to 1 to prevent image corruption + data re-use\r\n - Process on largest model input/output size rather than stored image size\r\n - Size and crop masks during caching stage\r\n - Implement ring buffer for data flow\r\n - Fix preview reload bug\r\n - augmentation\r\n - typing\r\n - switch color aug order\r\n - better initialization\r\n - Fix warp + landmark warp to correctly apply at different image scales\r\n - Slightly improved warp caching\r\n - Don't store whether image is_preview. Handle all data as training images implicitly\r\n - plugins.trainer: Typing and fixes to work with trainingdata refactor", "code": "def _set_skip_list(self) -> None:\n \n if self._skip_num == 1 and not self._alignments.data:\n logger.debug(\"No frames to be skipped\")\n return\n skip_list = []\n for idx, filename in enumerate(self._images.file_list):\n if idx % self._skip_num != 0:\n logger.trace(\"Adding image '%s' to skip list due to \" # type: ignore\n \"extract_every_n = %s\", filename, self._skip_num)\n skip_list.append(idx)\n # Items may be in the alignments file if skip-existing[-faces] is selected\n elif os.path.basename(filename) in self._alignments.data:\n self._existing_count += 1\n logger.trace(\"Removing image: '%s' due to previously existing\", # type: ignore\n filename)\n skip_list.append(idx)\n if self._existing_count != 0:\n logger.info(\"Skipping %s frames due to skip_existing/skip_existing_faces.\",\n self._existing_count)\n logger.debug(\"Adding skip list: %s\", skip_list)\n self._images.add_skip_list(skip_list)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 368, "n_words": 97, "vocab_size": 69, "complexity": 7, "nloc": 26, "token_counts": 143, "n_ast_nodes": 242, "n_identifiers": 21, "random_cut": "def _set_skip_list(self) -> None:\n \n if self._skip_num == 1 and not self._al" }, { "id": 248183, "commit_id": "7fbf42499d92ec3c9a05d9f36ec5fecd1ab1f18c", "repo": "synapse", "path": "tests/server.py", "file_name": "server.py", "fun_name": "getPeer", "commit_message": "Use `getClientAddress` instead of `getClientIP`. (#12599)\n\ngetClientIP was deprecated in Twisted 18.4.0, which also added\r\ngetClientAddress. The Synapse minimum version for Twisted is\r\ncurrently 18.9.0, so all supported versions have the new API.", "code": "def getPeer(self):\n # We give an address so that getClientAddress/getClientIP returns a non null entry,\n # causing us to record the MAU\n return address.IPv4Address(\"TCP\", self._ip, 3423)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 46, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def getPeer(self):\n # We give an address so that getClientAddress/getClientIP returns a non null entry,\n # causing us to record the MAU\n return address.IPv4Addre" }, { "id": 225061, "commit_id": "f1da904a7fae401c5f96ef6494bfd2bbfcb8c29e", "repo": "mkdocs", "path": "mkdocs/tests/config/config_options_tests.py", "file_name": "config_options_tests.py", "fun_name": "test_incorrect_type_error", "commit_message": "Refactor: use config_options module through a short alias 'c'", "code": "def test_incorrect_type_error(self):\n for cls in c.Dir, c.File, c.FilesystemObject:\n with self.subTest(cls):\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 27, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 13, "token_counts": 81, "n_ast_nodes": 45, "n_identifiers": 8, "random_cut": "def test_incorrect_type_error(self):\n for cls in c.Dir, c.File, c.FilesystemObject:\n with self.subTest(cls):\n" }, { "id": 276127, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saved_model/save_impl.py", "file_name": "save_impl.py", "fun_name": "_replace_child_layer_functions", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _replace_child_layer_functions(layer, serialization_cache):\n \n # pylint: disable=protected-access\n original_fns = {}\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 6, "nloc": 22, "token_counts": 106, "n_ast_nodes": 23, "n_identifiers": 4, "random_cut": "def _replace_child_layer_functions(layer, serialization_cache):\n \n # pylint: disable=protected-access\n original_fns = {}\n" }, { "id": 274995, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/mixed_precision/layer_test.py", "file_name": "layer_test.py", "fun_name": "test_gradient", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_gradient(self, strategy_fn):\n x = tf.constant([1.0])\n with strategy_fn().scope() as strategy:\n with policy.policy_scope(\"mixed_float16\"):\n layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)\n # Learning rate is small enough that if applied to a float16 variable,\n # the variable will not change. So this tests the learning rate is not\n # applied to a float16 value, but instead the float32 variable.\n opt = gradient_descent.SGD(2**-14)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 155, "n_words": 56, "vocab_size": 42, "complexity": 2, "nloc": 12, "token_counts": 119, "n_ast_nodes": 106, "n_identifiers": 18, "random_cut": "def test_gradient(self, strategy_fn):\n x = tf.constant([1.0])\n with strategy_fn().scope() as strategy:\n with policy.policy_scope(\"mixed_float16\"):\n layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)\n # Learning rate is small enough that if applied to a float16 variable,\n # the variable will not change. So this tests the learning rate is not\n # applied to a float16 value, but instead " }, { "id": 261545, "commit_id": "fa4376a5815ce8b15f48f220bc353de0e06aa259", "repo": "scikit-learn", "path": "examples/mixture/plot_gmm_selection.py", "file_name": "plot_gmm_selection.py", "fun_name": "gmm_bic_score", "commit_message": "DOC Rework Gaussian Mixture example (#24721)\n\nCo-authored-by: Guillaume Lemaitre ", "code": "def gmm_bic_score(estimator, X):\n \n # Make it negative since GridSearchCV expects a score to maximize\n return -estimator.bic(X)\n\n\nparam_grid = {\n \"n_components\": range(1, 7),\n \"covariance_type\": [\"spherical\", \"tied\", \"diag\", \"full\"],\n}\ngrid_search = GridSearchCV(\n GaussianMixture(), param_grid=param_grid, scoring=gmm_bic_score\n)\ngrid_search.fit(X)\n\n# %%\n# Plot the BIC scores\n# -------------------\n#\n# To ease the plotting we can create a `pandas.DataFrame` from the results of\n# the cross-validation done by the grid search. We re-inverse the sign of the\n# BIC score to show the effect of minimizing it.\n\nimport pandas as pd\n\ndf = pd.DataFrame(grid_search.cv_results_)[\n [\"param_n_components\", \"param_covariance_type\", \"mean_test_score\"]\n]\ndf[\"mean_test_score\"] = -df[\"mean_test_score\"]\ndf = df.rename(\n columns={\n \"param_n_components\": \"Number of components\",\n \"param_covariance_type\": \"Type of covariance\",\n \"mean_test_score\": \"BIC score\",\n }\n)\ndf.sort_values(by=\"BIC score\").head()\n\n# %%\nimport seaborn as sns\n\nsns.catplot(\n data=df,\n kind=\"bar\",\n x=\"Number of components\",\n y=\"BIC score\",\n hue=\"Type of covariance\",\n)\nplt.show()\n\n# %%\n# In the present case, the model with 2 components and full covariance (which\n# corresponds to the true generative model) has the lowest BIC score and is\n# therefore selected by the grid search.\n#\n# Plot the best model\n# -------------------\n#\n# We plot an ellipse to show each Gaussian component of the selected model. For\n# such purpose, one needs to find the eigenvalues of the covariance matrices as\n# returned by the `covariances_` attribute. The shape of such matrices depends\n# on the `covariance_type`:\n#\n# - `\"full\"`: (`n_components`, `n_features`, `n_features`)\n# - `\"tied\"`: (`n_features`, `n_features`)\n# - `\"diag\"`: (`n_components`, `n_features`)\n# - `\"spherical\"`: (`n_components`,)\n\nfrom matplotlib.patches import Ellipse\nfrom scipy import linalg\n\ncolor_iter = sns.color_palette(\"tab10\", 2)[::-1]\nY_ = grid_search.predict(X)\n\nfig, ax = plt.subplots()\n\nfor i, (mean, cov, color) in enumerate(\n zip(\n grid_search.best_estimator_.means_,\n grid_search.best_estimator_.covariances_,\n color_iter,\n )\n):\n v, w = linalg.eigh(cov)\n if not np.any(Y_ == i):\n continue\n plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 0.8, color=color)\n\n angle = np.arctan2(w[0][1], w[0][0])\n angle = 180.0 * angle / np.pi # convert to degrees\n v = 2.0 * np.sqrt(2.0) * np.sqrt(v)\n ellipse = Ellipse(mean, v[0], v[1], angle=180.0 + angle, color=color)\n ellipse.set_clip_box(fig.bbox)\n ellipse.set_alpha(0.5)\n ax.add_artist(ellipse)\n\nplt.title(\n f\"Selected GMM: {grid_search.best_params_['covariance_type']} model, \"\n f\"{grid_search.best_params_['n_components']} components\"\n)\nplt.axis(\"equal\")\nplt.show()\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 427, "n_words": 345, "vocab_size": 221, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 708, "n_identifiers": 70, "random_cut": "def gmm_bic_score(estimator, X):\n \n # Make it negative since GridSearchCV expects a score to maximize\n return -estimator.bic(X)\n\n\nparam_grid = {\n \"n_components\": range(1, 7),\n \"covariance_type\": [\"spherical\", \"tied\", \"diag\", \"full\"],\n}\ngrid_search = GridSearchCV(\n GaussianMixture(), param_grid=param_grid, scoring=gmm_bic_score\n)\ngrid_search.fit(X)\n\n# %%\n# Plot the BIC scores\n# -------------------\n#\n# To ease the plotting we can create a `pandas.DataFrame` from the results of\n# the cross-validation done by the grid search. We re-inverse the sign of the\n# BIC score to show the effect of minimizing it.\n\nimport pandas as pd\n\ndf = pd.DataFrame(grid_search.cv_results_)[\n [\"param_n_components\", \"param_covariance_type\", \"mean_test_score\"]\n]\ndf[\"mean_test_score\"] = -df[\"mean_test_score\"]\ndf = df.rename(\n columns={\n \"param_n_components\": \"Number of components\",\n \"param_covariance_type\": \"Type of covariance\",\n \"mean_test_score\": \"BIC score\",\n }\n)\ndf.sort_values(by=\"BIC score\").head()\n\n# %%\nimport seaborn as sns\n\nsns.catplot(\n data=df,\n kind=\"bar\",\n x=\"Number of components\",\n y=\"BIC score\",\n hue=\"Type of covariance\",\n)\nplt.show()\n\n# %%\n# In the present case, the model with 2 components and full covariance (which\n# corresponds to the true generative model) has the lowest BIC score and is\n# therefore selected by the grid search.\n#\n# Plot the best model\n# -------------------\n#\n# We plot an ellipse to show each Gaussian component of the selected model. For\n# such purpose, one needs to find the eigenvalues of the covariance matrices as\n# returned by the `covariances_` attribute. The shape of such matrices depends\n# on the `covariance_type`:\n#\n# - `\"full\"`: (`n_components`, `n_features`, `n_features`)\n# - `\"tied\"`: (`n_features`, `n_features`)\n# - `\"diag\"`: (`n_components`, `n_features`)\n# - `\"spherical\"`: (`n_components`,)\n\nfrom matplotlib.patches import Ellipse\nfrom scipy import linalg\n\ncolor_iter = sns.color_palette(\"tab10\", 2)[::-1]\nY_ = grid_search.predict(X)\n\nfig, ax = plt.subplots()\n\nfor i, (mean, cov, color) in enumerate(\n zip(\n grid_search.best_estimator_.means_,\n grid_search.best_estimator_.covariances_,\n color_iter,\n )\n):\n v, w = linalg.eigh(cov)\n if not np.any(Y_ == i):\n continue\n plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 0.8, color=color)\n\n angle = np.arctan2(w[0][1], w[0][0])\n angle = 180.0 * angle / np.pi # convert to degrees\n v = 2.0 * np.sqrt(2.0) * np.sqrt(v)\n ellipse = Ellipse(mean, v[0], v[1], angle=180.0 + angle, color=c" }, { "id": 132526, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/tests/test_logger.py", "file_name": "test_logger.py", "fun_name": "testCSV", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def testCSV(self):\n config = {\"a\": 2, \"b\": 5, \"c\": {\"c\": {\"D\": 123}, \"e\": None}}\n t = Trial(evaluated_params=config, trial_id=\"csv\", logdir=self.test_dir)\n logger = CSVLoggerCallback()\n logger.on_trial_result(0, [], t, result(0, 4))\n logger.on_trial_result(1, [], t, result(1, 5))\n logger.on_trial_result(\n 2, [], t, result(2, 6, score=[1, 2, 3], hello={\"world\": 1})\n )\n\n logger.on_trial_complete(3, [], t)\n self._validate_csv_result()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 121, "n_words": 48, "vocab_size": 39, "complexity": 1, "nloc": 11, "token_counts": 143, "n_ast_nodes": 221, "n_identifiers": 17, "random_cut": "def testCSV(self):\n config = {\"a\": 2, \"b\": 5, \"c\": {\"c\": {\"D\": 123}, \"e\": None}}\n " }, { "id": 148463, "commit_id": "28011a39076d41e6f1f2182215cbcb420bcb3fa5", "repo": "freqtrade", "path": "tests/data/test_btanalysis.py", "file_name": "test_btanalysis.py", "fun_name": "test_analyze_trade_parallelism", "commit_message": "Update bt_results filename to new.json", "code": "def test_analyze_trade_parallelism(testdatadir):\n filename = testdatadir / \"backtest-result_new.json\"\n bt_data = load_backtest_data(filename)\n\n res = analyze_trade_parallelism(bt_data, \"5m\")\n assert isinstance(res, DataFrame)\n assert 'open_trades' in res.columns\n assert res['open_trades'].max() == 3\n assert res['open_trades'].min() == 0\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 49, "n_words": 29, "vocab_size": 23, "complexity": 1, "nloc": 8, "token_counts": 59, "n_ast_nodes": 102, "n_identifiers": 12, "random_cut": "def test_analyze_trade_parallelism(testdatadir):\n filename = testdatadir / \"backtest-result_new.json\"\n bt_data = load_backtest_data(filename)\n\n res = analyze_trade_parallelism(bt_data, \"5m\")\n assert isinstance(res, DataFrame)\n assert 'open_trades' in res.columns\n assert " }, { "id": 335006, "commit_id": "c3cc8eb23c8095217388d350409b454ea396c12b", "repo": "diffusers", "path": "src/diffusers/utils/logging.py", "file_name": "logging.py", "fun_name": "_get_default_logging_level", "commit_message": "changes comments and env vars in `utils/logging`\nremoves mentions of 🤗Transformers with 🤗Diffusers equivalent.", "code": "def _get_default_logging_level():\n \n env_level_str = os.getenv(\"DIFFUSERS_VERBOSITY\", None)\n if env_level_str:\n if env_level_str in log_levels:\n return log_levels[env_level_str]\n else:\n logging.getLogger().warning(\n f\"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, \"\n f\"has to be one of: { ', '.join(log_levels.keys()) }\"\n )\n return _default_log_level\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 121, "n_words": 32, "vocab_size": 29, "complexity": 3, "nloc": 11, "token_counts": 45, "n_ast_nodes": 104, "n_identifiers": 11, "random_cut": "def _get_default_logging_level():\n \n env_level_str = os.getenv(\"DIFFUSERS_VERBOSITY\", None)\n if env_level_str:\n " }, { "id": 29971, "commit_id": "10356eb2845766684a13cb3adcbde2ad0e5e07ec", "repo": "saleor", "path": "saleor/graphql/meta/tests/test_meta_mutations.py", "file_name": "test_meta_mutations.py", "fun_name": "test_delete_public_metadata_for_checkout_by_token", "commit_message": "Move checkout metadata to separate model (#11264)\n\n* seperate checkout matadata to CheckoutMetadata model\r\n\r\n* change naming of checkout metadata class field\r\n\r\n* fix tests after rebase\r\n\r\n* add dataloaders,move resolving to checkout type instead of meta type, fix review remarks\r\n\r\n* fix tests\r\n\r\n* clen up migrations\r\n\r\n* add missing migration, add missing metdata filter for checkout\r\n\r\n* fixes for cases when checkout has no metadata_storage, cosmetic changes after review\r\n\r\n* fixes for cases when checkout has no metadata_storage, cosmetic changes after review\r\n\r\n* update changelog\r\n\r\n* move comment to proper place\r\n\r\n* delete index from state\r\n\r\n* add using helper to ensure metadata exists\r\n\r\n* fix migration, add get_or_create fo metadata calls\r\n\r\n* fix tests after rebase, fixes after review\r\n\r\n* fix in migration queryset", "code": "def test_delete_public_metadata_for_checkout_by_token(api_client, checkout):\n # given\n checkout.metadata_storage.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})\n checkout.metadata_storage.save(update_fields=[\"metadata\"])\n checkout_id = graphene.Node.to_global_id(\"Checkout\", checkout.pk)\n\n # when\n response = execute_clear_public_metadata_for_item(\n api_client, None, checkout.token, \"Checkout\"\n )\n\n # then\n assert item_without_public_metadata(\n response[\"data\"][\"deleteMetadata\"][\"item\"],\n checkout.metadata_storage,\n checkout_id,\n )\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 87, "n_words": 30, "vocab_size": 26, "complexity": 1, "nloc": 12, "token_counts": 80, "n_ast_nodes": 132, "n_identifiers": 18, "random_cut": "def test_delete_public_metadata_for_checkout_by_token(api_client, checkout):\n # given\n checkout.metadata_storage.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE})\n checkout.metadata_storage.save(update_fields=[\"metadata\"])\n checkout_id = graphene.Node.to_global_id(\"Checkout\", checkout.pk)\n\n # when\n response = execute_clear_public_metadata_for_item(\n api_client, None, checkout.token, \"Checkout\"\n )\n\n # then\n assert item_without_public_metadata(\n response[\"data\"][\"deleteMetadata\"][\"item\"],\n checkout.metadata_storage,\n checkout_id,\n )\n\n" }, { "id": 274305, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/legacy_tf_layers/convolutional_test.py", "file_name": "convolutional_test.py", "fun_name": "testFunctionalConv1DNoReuse", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def testFunctionalConv1DNoReuse(self):\n with tf.Graph().as_default():\n length = 10\n data = tf.random.uniform((5, length, 3), seed=1)\n conv_layers.separable_conv1d(data, 32, 3)\n self.assertEqual(len(tf.compat.v1.trainable_variables()), 3)\n conv_layers.separable_conv1d(data, 32, 3)\n self.assertEqual(len(tf.compat.v1.trainable_variables()), 6)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 95, "n_words": 23, "vocab_size": 17, "complexity": 1, "nloc": 8, "token_counts": 97, "n_ast_nodes": 151, "n_identifiers": 17, "random_cut": "def testFunctionalConv1DNoReuse(self):\n with tf.Graph().as_defaul" }, { "id": 272727, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/normalization/batch_normalization_test.py", "file_name": "batch_normalization_test.py", "fun_name": "test_batchnorm_convnet", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_batchnorm_convnet(self):\n if tf.test.is_gpu_available(cuda_only=True):\n with self.session():\n model = keras.models.Sequential()\n norm = keras.layers.BatchNormalization(\n axis=1, input_shape=(3, 4, 4), momentum=0.8\n )\n model.add(norm)\n model.compile(\n loss=\"mse\",\n optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),\n run_eagerly=test_utils.should_run_eagerly(),\n )\n\n # centered on 5.0, variance 10.0\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))\n model.fit(x, x, epochs=4, verbose=0)\n out = model.predict(x)\n out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))\n out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))\n\n np.testing.assert_allclose(\n np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1\n )\n np.testing.assert_allclose(\n np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 446, "n_words": 75, "vocab_size": 56, "complexity": 2, "nloc": 24, "token_counts": 278, "n_ast_nodes": 383, "n_identifiers": 50, "random_cut": "def test_batchnorm_convnet(self):\n if tf.test.is_gpu_available(cuda_only=True):\n with self.session():\n model = keras.models.Sequential()\n norm = keras.layers.BatchNormalization(\n axis=1, input_shape=(3, 4, 4), momentum=0.8\n )\n model.add(norm)\n model.compile(\n loss=\"mse\",\n optimizer=tf" }, { "id": 162444, "commit_id": "fb62afd6f047aea7e88a6b0df00b49f78ba16e84", "repo": "yt-dlp", "path": "yt_dlp/extractor/musicdex.py", "file_name": "musicdex.py", "fun_name": "_return_info", "commit_message": "[Musicdex] Add extractors (#2421)\n\nCloses #2204\r\nAuthored by: Ashish0804", "code": "def _return_info(self, track_json, album_json, id):\n return {\n 'id': str(id),\n 'title': track_json.get('name'),\n 'track': track_json.get('name'),\n 'description': track_json.get('description'),\n 'track_number': track_json.get('number'),\n 'url': format_field(track_json, 'url', 'https://www.musicdex.org/%s'),\n 'duration': track_json.get('duration'),\n 'genre': [genre.get('name') for genre in track_json.get('genres') or []],\n 'like_count': track_json.get('likes_count'),\n 'view_count': track_json.get('plays'),\n 'artist': [artist.get('name') for artist in track_json.get('artists') or []],\n 'album_artist': [artist.get('name') for artist in album_json.get('artists') or []],\n 'thumbnail': format_field(album_json, 'image', 'https://www.musicdex.org/%s'),\n 'album': album_json.get('name'),\n 'release_year': try_get(album_json, lambda x: date_from_str(unified_strdate(x['release_date'])).year),\n 'extractor_key': MusicdexSongIE.ie_key(),\n 'extractor': 'MusicdexSong',\n }\n\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 267, "n_words": 67, "vocab_size": 55, "complexity": 7, "nloc": 20, "token_counts": 219, "n_ast_nodes": 389, "n_identifiers": 17, "random_cut": "def _return_info(self, track_json, album_json, id):\n return {\n 'id': str(id),\n 'title': track_json.get('name'),\n 'track': track_json.get('name'),\n 'description': track_json.get('description'),\n 'track_number': track_json.get('number'),\n 'url': format_field(track_json, 'url', 'https://www.musicdex.org/%s'),\n 'duration': track_json.get('duration'),\n 'genre': [genre.get('name') for genre in track_json.get('genres') or []],\n 'like_count': track_json.get('likes_count'),\n 'view_count': track_json.get('plays'),\n 'artist': [artist.get('name') for artist in track_json.get('artists') or []],\n 'album_artist': [artist.get('name') for artist in album_json.get('artists') or []],\n 'thumbnail': format_field(album_json, 'image', 'https://www.musicdex.org/%s'),\n 'album': album_json.get('name')," }, { "id": 95384, "commit_id": "45750ab53007ebae64d7a82d5020e65ab94b6da7", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_user_roles.py", "file_name": "test_user_roles.py", "fun_name": "test_lookup_self", "commit_message": "feat: Add endpoints for managing user roles (#30993)\n\n- Add endpoints for creating, updating, deleting user roles.\r\n- Add endpoints for assigning and unassigning roles from users.\r\n- Add permission validation to endpoints (permissions now must be known).\r\n- Add sudo/superuser requirements to various endpoints involving permissions.\r\n- Add standard audit logs to all permission related endpoints.\r\n\r\nAdditionally this cleans up the various permission endpoints, improving testing, adding various security (sudo, superuser-only).", "code": "def test_lookup_self(self):\n role = UserRole.objects.create(name=\"support\", permissions=[\"broadcasts.admin\"])\n role.users.add(self.user)\n role2 = UserRole.objects.create(name=\"admin\", permissions=[\"users.admin\"])\n role2.users.add(self.user)\n UserRole.objects.create(name=\"other\", permissions=[\"users.edit\"])\n resp = self.get_response(\"me\")\n assert resp.status_code == 200\n assert len(resp.data) == 2, resp.data\n role_names = [r[\"name\"] for r in resp.data]\n assert \"support\" in role_names\n assert \"admin\" in role_names\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 117, "n_words": 41, "vocab_size": 30, "complexity": 2, "nloc": 12, "token_counts": 126, "n_ast_nodes": 212, "n_identifiers": 19, "random_cut": "def test_lookup_self(self):\n role = UserRole.objects.create(name=\"support\", permissions=[\"broadcasts.admin\"])\n role.users.add(self.user)\n role2 = UserRole.objects.create(name=\"admin\", permissions=[\"users.admin\"])\n role2.users.add(self.user)\n UserRole.objects.create(name=\"other\", permissions=[\"users.edit\"])\n resp = self.get_response(\"me\")\n assert resp.status_code == 200\n assert len(resp.data) == 2, resp.data\n role_names = [r" }, { "id": 58792, "commit_id": "da5115381e62b084922641a8b1270806f695055f", "repo": "prefect", "path": "tests/orion/database/test_queries.py", "file_name": "test_queries.py", "fun_name": "test_get_runs_in_queue_limit", "commit_message": "Return a work queue ID", "code": "async def test_get_runs_in_queue_limit(self, session, db, fr_1, fr_2, fr_3):\n query = db.queries.get_scheduled_flow_runs_from_work_queues(\n db=db, limit_per_queue=1\n )\n result = await session.execute(query)\n runs = result.all()\n\n assert [r[0].id for r in runs] == [fr_1.id, fr_3.id]\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 75, "n_words": 30, "vocab_size": 28, "complexity": 2, "nloc": 7, "token_counts": 70, "n_ast_nodes": 103, "n_identifiers": 17, "random_cut": "async def test_get_runs_in_queue_limit(self, session, db, fr_1, fr_2, fr_3):\n query = db.queries.get_scheduled_flow_runs_from_work_queues(\n db=db, limit_per_queue=1\n )\n result = await session.execute(query)\n runs = result.all()\n\n assert " }, { "id": 45966, "commit_id": "e1134590973355549272b1f3a213dbfa29698df7", "repo": "airflow", "path": "airflow/cli/commands/dag_command.py", "file_name": "dag_command.py", "fun_name": "dag_list_import_errors", "commit_message": "Add `list-import-errors` to `airflow dags` command (#22084)\n\nThis will help users to see the dags with import error and enable scripts\r\nprocess the output", "code": "def dag_list_import_errors(args):\n \n dagbag = DagBag(process_subdir(args.subdir))\n data = []\n for filename, errors in dagbag.import_errors.items():\n data.append({\"filepath\": filename, \"error\": errors})\n AirflowConsole().print_as(\n data=data,\n output=args.output,\n )\n\n\n@cli_utils.action_cli\n@suppress_logs_and_warning", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@cli_utils.action_cli\n@suppress_logs_and_warning", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 60, "n_words": 23, "vocab_size": 21, "complexity": 2, "nloc": 9, "token_counts": 65, "n_ast_nodes": 119, "n_identifiers": 18, "random_cut": "def dag_list_import_errors(args):\n \n dagbag = DagBag(process_subd" }, { "id": 114781, "commit_id": "e8a8d9b71deae2c291efb49ff11573285f3aec35", "repo": "mindsdb", "path": "mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", "file_name": "mysql_proxy.py", "fun_name": "process_query", "commit_message": "before integration tests", "code": "def process_query(self, sql):\n executor = Executor(\n session=self.session,\n sqlserver=self\n )\n\n executor.query_execute(sql)\n\n if executor.error is not None:\n resp = SQLAnswer(\n answer_type = ANSWER_TYPE.ERROR,\n error_code=executor.error['code'],\n error_message=executor.error['message']\n )\n elif executor.data is None:\n resp = SQLAnswer(\n answer_type = ANSWER_TYPE.OK,\n state_track=executor.state_track,\n )\n else:\n\n resp = SQLAnswer(\n answer_type=ANSWER_TYPE.TABLE,\n state_track=executor.state_track,\n columns=self.to_mysql_columns(executor.columns),\n data=executor.data,\n status=executor.server_status\n )\n return resp\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 334, "n_words": 48, "vocab_size": 31, "complexity": 3, "nloc": 26, "token_counts": 130, "n_ast_nodes": 197, "n_identifiers": 24, "random_cut": "def process_query(self, sql):\n executor = Executor(\n session=self.session,\n sqlserver=self\n )\n\n executor.query_execute(sql)\n\n if executor.error is not None:\n resp = SQLAnswer(\n answer_type = ANSWER_TYPE.ERROR,\n error_code=executor.error['code'],\n error_message=executor.error['message']\n )\n elif executor.data is None:\n resp = SQLAnswer(\n answer_type = ANSWER_TYPE.OK,\n state_track=executor.state_track,\n )\n else:\n\n resp = SQLAnswer(\n answer_type=ANSWER_TYPE.TABLE,\n state_track=executor.state_track,\n columns=self.to_mysql_columns(executor.columns),\n data=executor.data,\n stat" }, { "id": 171873, "commit_id": "e93ee07729afe0bc7661655755df6adad657c23b", "repo": "pandas", "path": "pandas/tests/frame/methods/test_to_csv.py", "file_name": "test_to_csv.py", "fun_name": "test_to_csv_empty", "commit_message": "BUG/API: Indexes on empty frames/series should be RangeIndex (#49637)\n\n* BUG/API: ndexes on empty frames/series should be RangeIndex, are Index[object]\r\n\r\n* fix black\r\n\r\n* fix window stuff\r\n\r\n* Add docs\r\n\r\n* double ticks\r\n\r\n* unneeded line\r\n\r\n* update thatsnew text\r\n\r\n* update whatsnew text\r\n\r\n* fix rst\r\n\r\n* Update doc/source/whatsnew/v2.0.0.rst\r\n\r\nCo-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>\r\n\r\nCo-authored-by: Terji Petersen \r\nCo-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>", "code": "def test_to_csv_empty(self):\n df = DataFrame(index=np.arange(10))\n result, expected = self._return_result_expected(df, 1000)\n tm.assert_frame_equal(result, expected, check_column_type=False)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 33, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 65, "n_identifiers": 13, "random_cut": "def test_to_csv_empty(self):" }, { "id": 183561, "commit_id": "d14659c1a3760eade2dd3479b66eb8b2e7711db0", "repo": "textual", "path": "src/textual/_terminal_features.py", "file_name": "_terminal_features.py", "fun_name": "from_autodetect", "commit_message": "[terminal buffering] Add support for the \"mode 2026\"\n\nThat task is definitely way more complicated that it seemed to be 😅", "code": "def from_autodetect(cls) -> TerminalSupportedFeatures:\n \n\n # Using macOS, but not using the default terminal: let's assume we're on iTerm2\n iterm2_synchronized_update = (\n platform.system() == \"Darwin\"\n and os.environ.get(\"TERM_PROGRAM\", \"\") != \"Apple_Terminal\"\n )\n\n # Detecting \"mode2026\" is more complicated, as we have to use an async request/response\n # machinery with the terminal emulator - for now we should just assume it's not supported.\n # See the use of the Mode and ModeReportParameter classes in the Textual code to check this machinery.\n mode2026_synchronized_update = False\n\n return cls(\n iterm2_synchronized_update=iterm2_synchronized_update,\n mode2026_synchronized_update=mode2026_synchronized_update,\n )\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 200, "n_words": 86, "vocab_size": 71, "complexity": 2, "nloc": 18, "token_counts": 47, "n_ast_nodes": 87, "n_identifiers": 10, "random_cut": "def from_autodetect(cls) -> TerminalSupportedFeatures:\n \n\n # Using macOS, but not using the default terminal: let's assume we're on iTerm2\n iterm2_synchronized_update = (\n platform.system() == \"Darwin\"\n and os.environ.get(\"TERM_PROGRAM\", \"\") != \"Apple_Terminal\"\n )\n\n # Detecting \"mode2026\" is more complicated, as we have to use an async request/response\n # machinery with the terminal emulator - for now we should just assume it's not supported.\n # See the use of the Mode and ModeReportParameter classes in the Textual code to check this machinery.\n mode2026_synchronized_update = False\n\n return cls(\n iterm2_synchronized_update=iterm2_synchronized_update,\n mode2026_synchronized_update=mode2026_synchronized_update,\n )\n" }, { "id": 130471, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/gcp/config.py", "file_name": "config.py", "fun_name": "_get_service_account", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _get_service_account(account, config, iam):\n project_id = config[\"provider\"][\"project_id\"]\n full_name = \"projects/{project_id}/serviceAccounts/{account}\" \"\".format(\n project_id=project_id, account=account\n )\n try:\n service_account = iam.projects().serviceAccounts().get(name=full_name).execute()\n except errors.HttpError as e:\n if e.resp.status != 404:\n raise\n service_account = None\n\n return service_account\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 88, "n_words": 32, "vocab_size": 27, "complexity": 3, "nloc": 12, "token_counts": 79, "n_ast_nodes": 134, "n_identifiers": 18, "random_cut": "def _get_service_account(account, config, iam):\n project_id = config[\"provider\"][\"project_id\"]\n full_name = \"projects/{project_id}/serviceAccounts/{account}\" \"\".format(\n project_id=project_id, account=account\n )\n try:\n service_account = iam.projects().serviceAccounts().get(name=full_name).execute()\n except errors.HttpError as e:\n if e.resp.status != 404:\n raise\n service_account = None\n\n return service_account\n\n" }, { "id": 170773, "commit_id": "a215264d472e79c48433fa3a04fa492abc41e38d", "repo": "pandas", "path": "pandas/tests/indexes/categorical/test_reindex.py", "file_name": "test_reindex.py", "fun_name": "test_reindex_list_non_unique_unused_category", "commit_message": "DEPR: Index.reindex with non-unique Index (#49485)", "code": "def test_reindex_list_non_unique_unused_category(self):\n msg = \"cannot reindex on an axis with duplicate labels\"\n ci = CategoricalIndex([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n with pytest.raises(ValueError, match=msg):\n ci.reindex([\"a\", \"c\"])\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 27, "vocab_size": 23, "complexity": 1, "nloc": 5, "token_counts": 56, "n_ast_nodes": 104, "n_identifiers": 11, "random_cut": "def test_reindex_list_non_unique_unused_category(self):\n msg = \"cannot reindex on an axis with duplicate labels\"\n ci = CategoricalIndex([\"a\", \"b\", \"c\", \"a\"], categorie" }, { "id": 177053, "commit_id": "afef7ebde11cbe9d92d6a98319fe431a219d9f8c", "repo": "networkx", "path": "networkx/utils/misc.py", "file_name": "misc.py", "fun_name": "choice", "commit_message": "make lazy_import private and remove its internal use (#5878)\n\n* make lazy_import private and remove its internal use\r\n\r\n* add back the non-lazy imports of numpy to utils.misc", "code": "def choice(self, seq):\n import numpy as np\n\n if isinstance(self._rng, np.random.Generator):\n idx = self._rng.integers(0, len(seq))\n else:\n idx = self._rng.randint(0, len(seq))\n return seq[idx]\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 70, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 62, "n_ast_nodes": 96, "n_identifiers": 13, "random_cut": "def choice(self, seq):\n import numpy as np\n\n if isinstance(self._rng, np.random.Generator):\n idx = self." }, { "id": 89542, "commit_id": "e94d7cd092d813d88c2216fca3ca6bd48e0747a3", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_accept_organization_invite.py", "file_name": "test_accept_organization_invite.py", "fun_name": "test_not_needs_authentication", "commit_message": "chore(hybrid-cloud): use organization_slug in AcceptOrganizationInvite API (#42138)", "code": "def test_not_needs_authentication(self):\n self.login_as(self.user)\n\n om = OrganizationMember.objects.create(\n email=\"newuser@example.com\", token=\"abc\", organization=self.organization\n )\n for path in self._get_paths([om.id, om.token]):\n resp = self.client.get(path)\n assert resp.status_code == 200\n assert not resp.data[\"needsAuthentication\"]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 96, "n_words": 25, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 77, "n_ast_nodes": 123, "n_identifiers": 19, "random_cut": "def test_not_needs_authentication(self):\n self.login_as(self.user)\n" }, { "id": 74928, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/widgets.py", "file_name": "widgets.py", "fun_name": "media", "commit_message": "Reformat with black", "code": "def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtaildocs/js/document-chooser-modal.js\"),\n versioned_static(\"wagtaildocs/js/document-chooser.js\"),\n ]\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 7, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 6, "random_cut": "def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtaildocs/js/document-chooser-modal.js\"),\n versioned_static(\"wagtaildocs/js/document-choose" }, { "id": 70088, "commit_id": "004288eac5b4ffbcced7149113150d7cc42df28e", "repo": "glances", "path": "glances/globals.py", "file_name": "globals.py", "fun_name": "json_dumps_dictlist", "commit_message": "Improve code quality", "code": "def json_dumps_dictlist(data, item):\n if isinstance(data, dict):\n try:\n return json_dumps({item: data[item]})\n except:\n return None\n elif isinstance(data, list):\n try:\n # Source:\n # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list\n return json_dumps({item: map(itemgetter(item), data)})\n except:\n return None\n else:\n return None\n", "url": "https://github.com/nicolargo/glances.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 140, "n_words": 31, "vocab_size": 20, "complexity": 5, "nloc": 13, "token_counts": 68, "n_ast_nodes": 110, "n_identifiers": 9, "random_cut": "def json_dumps_dictlist(data, item):\n if isinstance(" }, { "id": 163363, "commit_id": "37c33438837cbb7e41a949b44a20c82b82289498", "repo": "pandas", "path": "pandas/tests/indexing/test_loc.py", "file_name": "test_loc.py", "fun_name": "test_loc_setitem_uint8_upcast", "commit_message": "BUG: can_hold_element size checks on ints/floats (#45273)", "code": "def test_loc_setitem_uint8_upcast():\n # GH#26049\n\n df = DataFrame([1, 2, 3, 4], columns=[\"col1\"], dtype=\"uint8\")\n df.loc[2, \"col1\"] = 300 # value that can't be held in uint8\n\n # TODO: would be better to get uint16?\n expected = DataFrame([1, 2, 300, 4], columns=[\"col1\"], dtype=\"int64\")\n tm.assert_frame_equal(df, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 42, "vocab_size": 33, "complexity": 1, "nloc": 5, "token_counts": 70, "n_ast_nodes": 111, "n_identifiers": 9, "random_cut": "def test_loc_setitem_uint8_upcast():\n # GH#26049\n\n" }, { "id": 296640, "commit_id": "d20a620590bc2df74eb2f9c0d35e1b7f12be5ded", "repo": "core", "path": "homeassistant/helpers/template.py", "file_name": "template.py", "fun_name": "__getitem__", "commit_message": "Make `this` variable available in template entities (#65201)\n\n* feat: make this variable available in template entities\r\n\r\nThis makes the variable `this` available in template entities.\r\nIt will simplify the use of self-referencing template entities.\r\nBecause, without this, we have to repeat the entity id every time.\r\nIf we can solve this without explicitly spelling the entity id,\r\ncode can be re-used much better.\r\n\r\nAs a side-effect, this will allow to use `variables`-like patterns,\r\nwhere attributes can be used as variables to calculate subsequent attributes or state.\r\n\r\nExample:\r\n```yaml\r\ntemplate:\r\n sensor:\r\n - name: test\r\n state: \"{{ this.attributes.test }}\"\r\n # not: \"{{ state_attr('sensor.test', 'test' }}\"\r\n attributes:\r\n test: \"{{ now() }}\"\r\n```\r\n\r\n* expose entity_id instead of this\r\n\r\n* add test\r\n\r\n* Refactor to expose this variable\r\n\r\n* Tweak repr dunder\r\n\r\nCo-authored-by: Erik ", "code": "def __getitem__(self, item):\n \n if item in _COLLECTABLE_STATE_ATTRIBUTES:\n # _collect_state inlined here for performance\n if self._collect and _RENDER_INFO in self._hass.data:\n self._hass.data[_RENDER_INFO].entities.add(self._entity_id)\n return getattr(self._state, item)\n if item == \"entity_id\":\n return self._entity_id\n if item == \"state_with_unit\":\n return self.state_with_unit\n raise KeyError\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 142, "n_words": 37, "vocab_size": 28, "complexity": 6, "nloc": 10, "token_counts": 72, "n_ast_nodes": 118, "n_identifiers": 15, "random_cut": "def __getitem__(self, item):\n \n if item in _COLLECTABLE_STATE_ATTRIBUTES:\n # _collect_state inlined here for performance\n if self._collect and _RENDER_INFO in self._hass.data:\n self._hass.data[_RENDER_INFO].entities.add(self._entity_id)\n return getattr(self._state, item)\n if item == \"entity_id\":\n return self._entity_id\n if item == \"state_with_unit\":\n return self.state_with_" }, { "id": 295258, "commit_id": "78e4d7e1ca8f49068d8f63f6c80bb3048f5ad8e8", "repo": "core", "path": "tests/components/hassio/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "mock_all", "commit_message": "Add auto_update property to supervisor and addon update entities (#69055)", "code": "def mock_all(aioclient_mock, request):\n \n aioclient_mock.post(\"http://127.0.0.1/homeassistant/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\"http://127.0.0.1/supervisor/ping\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/supervisor/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\n \"http://127.0.0.1/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\"supervisor\": \"222\", \"homeassistant\": \"0.110.0\", \"hassos\": None},\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/store\",\n json={\n \"result\": \"ok\",\n \"data\": {\"addons\": [], \"repositories\": []},\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/host/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\n \"result\": \"ok\",\n \"data\": {\n \"chassis\": \"vm\",\n \"operating_system\": \"Debian GNU/Linux 10 (buster)\",\n \"kernel\": \"4.19.0-6-amd64\",\n },\n },\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/core/info\",\n json={\"result\": \"ok\", \"data\": {\"version_latest\": \"1.0.0\", \"version\": \"1.0.0\"}},\n )\n aioclient_mock.get(\n \"http://127.0.0.1/os/info\",\n json={\"result\": \"ok\", \"data\": {\"version_latest\": \"1.0.0\", \"version\": \"1.0.0\"}},\n )\n aioclient_mock.get(\n \"http://127.0.0.1/supervisor/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\n \"result\": \"ok\",\n \"version\": \"1.0.0\",\n \"version_latest\": \"1.0.0\",\n \"addons\": [\n {\n \"name\": \"test\",\n \"state\": \"started\",\n \"slug\": \"test\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"2.0.0\",\n \"version_latest\": \"2.0.1\",\n \"repository\": \"core\",\n \"url\": \"https://github.com/home-assistant/addons/test\",\n },\n {\n \"name\": \"test2\",\n \"state\": \"stopped\",\n \"slug\": \"test2\",\n \"installed\": True,\n \"update_available\": False,\n \"version\": \"3.1.0\",\n \"version_latest\": \"3.2.0\",\n \"repository\": \"core\",\n \"url\": \"https://github.com\",\n },\n ],\n },\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/addons/test/stats\",\n json={\n \"result\": \"ok\",\n \"data\": {\n \"cpu_percent\": 0.99,\n \"memory_usage\": 182611968,\n \"memory_limit\": 3977146368,\n \"memory_percent\": 4.59,\n \"network_rx\": 362570232,\n \"network_tx\": 82374138,\n \"blk_read\": 46010945536,\n \"blk_write\": 15051526144,\n },\n },\n )\n aioclient_mock.get(\"http://127.0.0.1/addons/test/changelog\", text=\"\")\n aioclient_mock.get(\n \"http://127.0.0.1/addons/test/info\",\n json={\"result\": \"ok\", \"data\": {\"auto_update\": True}},\n )\n aioclient_mock.get(\"http://127.0.0.1/addons/test2/changelog\", text=\"\")\n aioclient_mock.get(\n \"http://127.0.0.1/addons/test2/info\",\n json={\"result\": \"ok\", \"data\": {\"auto_update\": False}},\n )\n aioclient_mock.get(\n \"http://127.0.0.1/ingress/panels\", json={\"result\": \"ok\", \"data\": {\"panels\": {}}}\n )\n\n\n@pytest.mark.parametrize(\n \"entity_id,expected\",\n [\n (\"sensor.home_assistant_operating_system_version\", \"1.0.0\"),\n (\"sensor.home_assistant_operating_system_newest_version\", \"1.0.0\"),\n (\"sensor.test_version\", \"2.0.0\"),\n (\"sensor.test_newest_version\", \"2.0.1\"),\n (\"sensor.test2_version\", \"3.1.0\"),\n (\"sensor.test2_newest_version\", \"3.2.0\"),\n (\"sensor.test_cpu_percent\", \"0.99\"),\n (\"sensor.test2_cpu_percent\", \"unavailable\"),\n (\"sensor.test_memory_percent\", \"4.59\"),\n (\"sensor.test2_memory_percent\", \"unavailable\"),\n ],\n)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"entity_id,expected\",\n [\n (\"sensor.home_assistant_operating_system_version\", \"1.0.0\"),\n (\"sensor.home_assistant_operating_system_newest_version\", \"1.0.0\"),\n (\"sensor.test_version\", \"2.0.0\"),\n (\"sensor.test_newest_version\", \"2.0.1\"),\n (\"sensor.test2_version\", \"3.1.0\"),\n (\"sensor.test2_newest_version\", \"3.2.0\"),\n (\"sensor.test_cpu_percent\", \"0.99\"),\n (\"sensor.test2_cpu_percent\", \"unavailable\"),\n (\"sensor.test_memory_percent\", \"4.59\"),\n (\"sensor.test2_memory_percent\", \"unavailable\"),\n ],\n)", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 1473, "n_words": 220, "vocab_size": 117, "complexity": 1, "nloc": 104, "token_counts": 470, "n_ast_nodes": 1034, "n_identifiers": 10, "random_cut": "def mock_all(aioclient_mock, request):\n \n aioclient_mock.post(\"http://127.0.0.1/homeassistant/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\"http://127.0.0.1/supervisor/ping\", json={\"result\": \"ok\"})\n aioclient_mock.post(\"http://127.0.0.1/supervisor/options\", json={\"result\": \"ok\"})\n aioclient_mock.get(\n \"http://127.0.0.1/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\"supervisor\": \"222\", \"homeassistant\": \"0.110.0\", \"hassos\": None},\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/store\",\n json={\n \"result\": \"ok\",\n \"data\": {\"addons\": [], \"repositories\": []},\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/host/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\n \"result\": \"ok\",\n \"data\": {\n \"chassis\": \"vm\",\n \"operating_system\": \"Debian GNU/Linux 10 (buster)\",\n \"kernel\": \"4.19.0-6-amd64\",\n },\n },\n },\n )\n aioclient_mock.get(\n \"http://127.0.0.1/core/info\",\n json={\"result\": \"ok\", \"data\": {\"version_latest\": \"1.0.0\", \"version\": \"1.0.0\"}},\n )\n aioclient_mock.get(\n \"http://127.0.0.1/os/info\",\n json={\"result\": \"ok\", \"data\": {\"version_latest\": \"1.0.0\", \"version\": \"1.0.0\"}},\n )\n aioclient_mock.get(\n \"http://127.0.0.1/supervisor/info\",\n json={\n \"result\": \"ok\",\n \"data\": {\n \"result\": \"ok\",\n \"version\": \"1.0.0\",\n " }, { "id": 167587, "commit_id": "f538568afc2c76c2d738d32e3544cf9fe6742960", "repo": "pandas", "path": "pandas/_testing/asserters.py", "file_name": "asserters.py", "fun_name": "assert_is_sorted", "commit_message": "TYP: misc return type annotations (#47558)", "code": "def assert_is_sorted(seq) -> None:\n \n if isinstance(seq, (Index, Series)):\n seq = seq.values\n # sorting does not change precisions\n assert_numpy_array_equal(seq, np.sort(np.array(seq)))\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 38, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 41, "n_ast_nodes": 67, "n_identifiers": 10, "random_cut": "def assert_is_sorted(seq) -> None:\n \n if isinstance(seq, (Index, Series)):\n seq = seq.values\n # sorting does not change precisions\n " }, { "id": 205149, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/postgresql/operations.py", "file_name": "operations.py", "fun_name": "subtract_temporals", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def subtract_temporals(self, internal_type, lhs, rhs):\n if internal_type == \"DateField\":\n lhs_sql, lhs_params = lhs\n rhs_sql, rhs_params = rhs\n params = (*lhs_params, *rhs_params)\n return \"(interval '1 day' * (%s - %s))\" % (lhs_sql, rhs_sql), params\n return super().subtract_temporals(internal_type, lhs, rhs)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 94, "n_words": 37, "vocab_size": 32, "complexity": 2, "nloc": 7, "token_counts": 58, "n_ast_nodes": 90, "n_identifiers": 11, "random_cut": "def subtract_temporals(self, internal_type, lhs, rhs):\n " }, { "id": 36752, "commit_id": "81ac45f85c35244831f11f73c09ea10eee4f953a", "repo": "transformers", "path": "src/transformers/training_args.py", "file_name": "training_args.py", "fun_name": "local_process_index", "commit_message": "update smddp api to v1.4.0 (#16371)\n\n* update smddp api to v1.4.0\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* address comments\r\n\r\n* fix style\r\n\r\n* remove unused import\r\n\r\n* fix indent\r\n\r\n* disable style check for import\r\n\r\n* fix space\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def local_process_index(self):\n \n if is_torch_tpu_available():\n return xm.get_local_ordinal()\n elif is_sagemaker_mp_enabled():\n return smp.local_rank()\n elif is_sagemaker_dp_enabled():\n return dist.get_rank()\n elif self.local_rank != -1:\n return self.local_rank\n return 0\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 108, "n_words": 22, "vocab_size": 15, "complexity": 5, "nloc": 10, "token_counts": 53, "n_ast_nodes": 92, "n_identifiers": 11, "random_cut": "def local_process_index(self):\n \n if is_torch_tpu_available():\n return xm.get_local_ordinal()\n elif is_sagemaker_mp_enabled():\n return smp.local_rank()\n " }, { "id": 178615, "commit_id": "a470b75c8e045312ea22dbfb6c5fc6702835b31c", "repo": "Nuitka", "path": "nuitka/freezer/Standalone.py", "file_name": "Standalone.py", "fun_name": "getScanDirectories", "commit_message": "macOS: Massive improvements for dependency scans\n\n* Was not recursively scanning dependencies and therefore could\n miss some of them.\n\n* Made internal functions private.\n\n* Make sure to pass proper \"package\" value to DLL scans, so it\n can include the needed directories.\n\n* Do not mutate information of DLL map, it is used later for\n other things and we now detect errors in that.", "code": "def getScanDirectories(package_name, original_dir):\n # Many cases, pylint: disable=too-many-branches\n\n cache_key = package_name, original_dir\n\n if cache_key in _scan_dir_cache:\n return _scan_dir_cache[cache_key]\n\n scan_dirs = [sys.prefix]\n\n if package_name is not None:\n scan_dirs.extend(_getPackageSpecificDLLDirectories(package_name))\n\n if original_dir is not None:\n scan_dirs.append(original_dir)\n scan_dirs.extend(getSubDirectories(original_dir))\n\n if (\n Utils.isWin32Windows()\n and package_name is not None\n and package_name.isBelowNamespace(\"win32com\")\n ):\n pywin32_dir = getPyWin32Dir()\n\n if pywin32_dir is not None:\n scan_dirs.append(pywin32_dir)\n\n for path_dir in os.environ[\"PATH\"].split(\";\"):\n if not os.path.isdir(path_dir):\n continue\n\n if areSamePaths(path_dir, os.path.join(os.environ[\"SYSTEMROOT\"])):\n continue\n if areSamePaths(path_dir, os.path.join(os.environ[\"SYSTEMROOT\"], \"System32\")):\n continue\n if areSamePaths(path_dir, os.path.join(os.environ[\"SYSTEMROOT\"], \"SysWOW64\")):\n continue\n\n scan_dirs.append(path_dir)\n\n result = []\n\n # Remove directories that hold no DLLs.\n for scan_dir in scan_dirs:\n sys.stdout.flush()\n\n # These are useless, but plenty.\n if os.path.basename(scan_dir) == \"__pycache__\":\n continue\n\n scan_dir = getDirectoryRealPath(scan_dir)\n\n # No DLLs, no use.\n if not any(entry[1].lower().endswith(\".dll\") for entry in listDir(scan_dir)):\n continue\n\n result.append(os.path.realpath(scan_dir))\n\n _scan_dir_cache[cache_key] = result\n return result\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 390, "n_words": 125, "vocab_size": 73, "complexity": 17, "nloc": 39, "token_counts": 286, "n_ast_nodes": 473, "n_identifiers": 37, "random_cut": "def getScanDirectories(package_name, original_dir):\n # Many cases, pylint: disable=too-many-branches\n\n cache_key = package_name, original_dir\n\n if cache_key in _scan_dir_cache:\n return _scan_dir_cache[cache_key]\n\n scan_dirs = [sys.prefix]\n\n if package_name is not None:\n scan_dirs.extend(_getPackageSpecificDLLDirectories(package_name))\n\n if original_dir is not None:\n scan_dirs.append(original_dir)\n scan_dirs.extend(getSubDirectories(original_dir))\n\n if (\n Utils.isWin32Windows()\n and package_name is not None\n and package_name.isBelowNamespace(\"win32com\")\n ):\n pywin32_dir = getPyWin32Dir()\n\n if pywin32_dir is not None:\n scan_dirs.append(pywin32_dir)\n\n for path_dir in os.environ[\"PATH\"].split(\";\"):\n if not os.path.isdir(path_dir):\n continue\n\n if areSamePaths(path_dir, os.path.join(os.environ[\"SYSTEMROOT\"])):\n continue\n if areSamePaths(path_dir, os.path.join(os.environ[\"SYSTEMROOT\"], \"System32\")):\n continue\n if areSamePaths(path_dir, os.path.join(os.environ[\"SYSTEMROOT\"], \"SysWOW64\")):\n continue\n\n scan_dirs.append(path_dir)\n\n result = []\n\n # Remove directories that hold no DLLs.\n for scan_dir in scan_dirs:\n sys.stdout.flush()\n\n # These are useless, but plenty.\n if os.path.basename(scan_dir) == \"__pycache__\":\n continue\n\n scan_dir = getDirectoryRealPath(scan_dir)\n\n # No DLLs, no use.\n if not any(en" }, { "id": 241622, "commit_id": "dbf1acd5a553ffc1546734be164cc89cef2b741d", "repo": "lightning", "path": "pytorch_lightning/plugins/environments/lsf_environment.py", "file_name": "lsf_environment.py", "fun_name": "_get_node_rank", "commit_message": "Modify LSFEnvironment to use more reliable environment variable (#10825)\n\n\r\nCo-authored-by: thomas chaton \r\nCo-authored-by: Carlos Mocholí \r\nCo-authored-by: Adrian Wälchli \r\nCo-authored-by: Jirka Borovec ", "code": "def _get_node_rank(self) -> int:\n \n hosts = self._read_hosts()\n count: Dict[str, int] = {}\n for host in hosts:\n if host not in count:\n count[host] = len(count)\n return count[socket.gethostname()]\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 87, "n_words": 26, "vocab_size": 21, "complexity": 3, "nloc": 12, "token_counts": 55, "n_ast_nodes": 88, "n_identifiers": 12, "random_cut": "def _get_node_rank(self) -> int:\n \n hosts = self._read_hosts()\n count: Dict[str, int] = {}\n for host in hosts:\n if host not in count:\n count[host] = len(count)\n " }, { "id": 156987, "commit_id": "142de2608df2494bf11e08038aadddb544b4500c", "repo": "dask", "path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "fun_name": "test_median_approximate", "commit_message": "Add `DataFrame` and `Series` `median` method (#9483)", "code": "def test_median_approximate(method):\n df = pd.DataFrame({\"x\": range(100), \"y\": range(100, 200)})\n ddf = dd.from_pandas(df, npartitions=10)\n if PANDAS_GT_110:\n assert_eq(\n ddf.median_approximate(method=method),\n df.median(),\n atol=1,\n )\n else:\n result = ddf.median_approximate(method=method)\n expected = df.median()\n assert ((result - expected).abs() < 1).all().compute()\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 112, "n_words": 33, "vocab_size": 30, "complexity": 2, "nloc": 13, "token_counts": 107, "n_ast_nodes": 173, "n_identifiers": 20, "random_cut": "def test_median_approximate(method):\n df = pd.DataFrame({\"x\": range(100), \"y\": range(100, 200)})\n ddf = dd.from_pandas(d" }, { "id": 163316, "commit_id": "ad9d42a4c847eb9f341dd6743466a4bed70a0a6e", "repo": "pandas", "path": "pandas/tests/series/test_constructors.py", "file_name": "test_constructors.py", "fun_name": "test_constructor_coerce_float_fail", "commit_message": "BUG: Series(floatlike, dtype=intlike) inconsistent with non-ndarray data (#45142)", "code": "def test_constructor_coerce_float_fail(self, any_int_numpy_dtype):\n # see gh-15832\n # Updated: make sure we treat this list the same as we would treat\n # the equivalent ndarray\n vals = [1, 2, 3.5]\n\n res = Series(vals, dtype=any_int_numpy_dtype)\n expected = Series(np.array(vals), dtype=any_int_numpy_dtype)\n tm.assert_series_equal(res, expected)\n alt = Series(np.array(vals)) # i.e. we ignore the dtype kwd\n tm.assert_series_equal(alt, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 115, "n_words": 51, "vocab_size": 38, "complexity": 1, "nloc": 7, "token_counts": 70, "n_ast_nodes": 107, "n_identifiers": 13, "random_cut": "def test_constructor_coerce_float_fail(self, any_int_numpy_dtype):\n # see gh-15832\n # Updated: make sure we treat this list the same as we would treat\n # the equivalent ndarray\n vals = [1, 2, 3.5]\n\n res = Series(vals, dtype=any_in" }, { "id": 41583, "commit_id": "6b61a26a462effaea1c80518e98185abb12174ed", "repo": "seaborn", "path": "seaborn/tests/_core/test_subplots.py", "file_name": "test_subplots.py", "fun_name": "test_row_faceted_x_paired", "commit_message": "Begin removal of data/layers as Plotter attributes", "code": "def test_row_faceted_x_paired(self):\n\n x = [\"f\", \"s\"]\n key = \"a\"\n order = list(\"abc\")\n facet_spec = {\"variables\": {\"row\": key}, \"row_order\": order}\n s = Subplots({}, facet_spec, {\"x\": x})\n\n assert s.n_subplots == len(order) * len(x)\n assert s.subplot_spec[\"ncols\"] == len(x)\n assert s.subplot_spec[\"nrows\"] == len(order)\n assert s.subplot_spec[\"sharex\"] == \"col\"\n assert s.subplot_spec[\"sharey\"] is True\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 116, "n_words": 47, "vocab_size": 34, "complexity": 1, "nloc": 11, "token_counts": 107, "n_ast_nodes": 188, "n_identifiers": 12, "random_cut": "def test_row_faceted_x_paired(self):\n\n x = [\"f\", \"s\"]\n key = \"a\"\n order = list(\"abc\")\n facet_spec = {\"variables\": {\"row\": key}, \"row_order\": order}\n s = Subplots({}, facet_spec, {\"x\": x})\n\n assert s.n_subplots == len(order) * le" }, { "id": 17716, "commit_id": "101076f6f891ddfbcc5ba99a7858decf58565537", "repo": "ccxt", "path": "python/ccxt/async_support/bittrex.py", "file_name": "bittrex.py", "fun_name": "fetch_markets", "commit_message": "1.72.42\n\n[ci skip]", "code": "async def fetch_markets(self, params={}):\n response = await self.publicGetMarkets(params)\n #\n # [\n # {\n # \"symbol\":\"LTC-BTC\",\n # \"baseCurrencySymbol\":\"LTC\",\n # \"quoteCurrencySymbol\":\"BTC\",\n # \"minTradeSize\":\"0.01686767\",\n # \"precision\":8,\n # \"status\":\"ONLINE\", # \"OFFLINE\"\n # \"createdAt\":\"2014-02-13T00:00:00Z\"\n # },\n # {\n # \"symbol\":\"VDX-USDT\",\n # \"baseCurrencySymbol\":\"VDX\",\n # \"quoteCurrencySymbol\":\"USDT\",\n # \"minTradeSize\":\"300.00000000\",\n # \"precision\":8,\n # \"status\":\"ONLINE\", # \"OFFLINE\"\n # \"createdAt\":\"2019-05-23T00:41:21.843Z\",\n # \"notice\":\"USDT has swapped to an ERC20-based token as of August 5, 2019.\"\n # }\n # ]\n #\n result = []\n for i in range(0, len(response)):\n market = response[i]\n baseId = self.safe_string(market, 'baseCurrencySymbol')\n quoteId = self.safe_string(market, 'quoteCurrencySymbol')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n status = self.safe_string(market, 'status')\n result.append({\n 'id': self.safe_string(market, 'symbol'),\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': (status == 'ONLINE'),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'price': self.safe_integer(market, 'precision', 8),\n 'amount': int('8'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.safe_number(market, 'minTradeSize'),\n 'max': None,\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1488, "n_words": 196, "vocab_size": 115, "complexity": 2, "nloc": 59, "token_counts": 297, "n_ast_nodes": 537, "n_identifiers": 21, "random_cut": "async def fetch_markets(self, params={}):\n response = await self.publicGetMarkets(params)\n #\n # [\n # {\n # \"symbol\":\"LTC-BTC\",\n # \"baseCurrencySymbol\":\"LTC\",\n # \"quot" }, { "id": 205432, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/base.py", "file_name": "base.py", "fun_name": "_get_next_or_previous_by_FIELD", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):\n if not self.pk:\n raise ValueError(\"get_next/get_previous cannot be used on unsaved objects.\")\n op = \"gt\" if is_next else \"lt\"\n order = \"\" if is_next else \"-\"\n param = getattr(self, field.attname)\n q = Q((field.name, param), (f\"pk__{op}\", self.pk), _connector=Q.AND)\n q = Q(q, (f\"{field.name}__{op}\", param), _connector=Q.OR)\n qs = (\n self.__class__._default_manager.using(self._state.db)\n .filter(**kwargs)\n .filter(q)\n .order_by(\"%s%s\" % (order, field.name), \"%spk\" % order)\n )\n try:\n return qs[0]\n except IndexError:\n raise self.DoesNotExist(\n \"%s matching query does not exist.\" % self.__class__._meta.object_name\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 249, "n_words": 77, "vocab_size": 61, "complexity": 5, "nloc": 20, "token_counts": 164, "n_ast_nodes": 275, "n_identifiers": 30, "random_cut": "def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):\n if not self.pk:\n raise ValueError(\"get_next/get_previous cannot be used on unsaved objects.\")\n op = \"gt\" if is_next else \"lt\"\n order = \"\" if is_next else \"-\"\n param = getatt" }, { "id": 100305, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/gui/analysis/event_reader.py", "file_name": "event_reader.py", "fun_name": "_parse_outputs", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _parse_outputs(self, event):\n \n serializer = get_serializer(\"json\")\n struct = event.summary.value[0].tensor.string_val[0]\n\n config = serializer.unmarshal(struct)[\"config\"]\n model_outputs = self._get_outputs(config)\n\n for side_outputs, side in zip(model_outputs, (\"a\", \"b\")):\n logger.debug(\"side: '%s', outputs: '%s'\", side, side_outputs)\n layer_name = side_outputs[0][0]\n\n output_config = next(layer for layer in config[\"layers\"]\n if layer[\"name\"] == layer_name)[\"config\"]\n layer_outputs = self._get_outputs(output_config)\n for output in layer_outputs: # Drill into sub-model to get the actual output names\n loss_name = output[0][0]\n if loss_name[-2:] not in (\"_a\", \"_b\"): # Rename losses to reflect the side output\n new_name = f\"{loss_name.replace('_both', '')}_{side}\"\n logger.debug(\"Renaming loss output from '%s' to '%s'\", loss_name, new_name)\n loss_name = new_name\n if loss_name not in self._loss_labels:\n logger.debug(\"Adding loss name: '%s'\", loss_name)\n self._loss_labels.append(loss_name)\n logger.debug(\"Collated loss labels: %s\", self._loss_labels)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 386, "n_words": 108, "vocab_size": 75, "complexity": 7, "nloc": 21, "token_counts": 196, "n_ast_nodes": 345, "n_identifiers": 30, "random_cut": "def _parse_outputs(self, event):\n \n serializer = get_serializer(\"json\")\n struct = event.summary.value[0].tensor.string_val[0]\n\n config = serializer.unmarshal(struct)[\"config\"]\n model_outputs = self._get_outputs(config)\n\n for side_outputs, side in zip(model_outputs, (\"a\", \"b\")):\n logger.debug(\"side: '%s', outputs: '%s'\", side, side_outputs)\n layer_name = side_outputs[0][0]\n\n output_config = next(layer for layer in config[\"layers\"]\n if layer[\"name\"] == layer_name)[\"config\"]\n layer_outputs = self._get_outputs(output_config)\n for output in layer_outputs: # Drill into sub-model to get the actual output names\n loss_name = output[0][0]\n if loss_name[-2:] not in (\"_a\", \"_b\"): # Rename losses to reflect the side output\n new_name = f\"{loss_nam" }, { "id": 44093, "commit_id": "8dabce8887f02216c1037be35e80c214edcbadfe", "repo": "airflow", "path": "airflow/models/baseoperator.py", "file_name": "baseoperator.py", "fun_name": "get_serialized_fields", "commit_message": "Add `--map-index` parameter to task CLI commands (#20980)", "code": "def get_serialized_fields(cls):\n if cls.__serialized_fields is None:\n fields_dict = attr.fields_dict(cls)\n cls.__serialized_fields = frozenset(\n fields_dict.keys()\n - {\n 'dag',\n 'deps',\n 'inherits_from_dummy_operator',\n 'is_mapped',\n 'operator_extra_links',\n 'upstream_task_ids',\n 'task_type',\n }\n | {'template_fields'}\n )\n return cls.__serialized_fields\n\n\n# TODO: Deprecate for Airflow 3.0\nChainable = Union[DependencyMixin, Sequence[DependencyMixin]]\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 275, "n_words": 38, "vocab_size": 34, "complexity": 2, "nloc": 17, "token_counts": 57, "n_ast_nodes": 116, "n_identifiers": 11, "random_cut": "def get_serialized_fields(cls):\n if cls.__serialized_fields is None:\n fields_dict = attr.fields_dict(cls)\n cls.__serialized_fields = frozenset(\n fields_dict.keys()\n - {\n 'dag',\n 'deps',\n 'inherits_from_dummy_operator',\n 'is_ma" }, { "id": 181367, "commit_id": "20057aa946b6711ae2928eb0a81c8f00c3b0a7a9", "repo": "gradio", "path": "test/test_components.py", "file_name": "test_components.py", "fun_name": "test_video_postprocess_converts_to_playable_format", "commit_message": "Create fewer temp files and make them consistently-named (#2758)\n\n* tmp files\r\n\r\n* components\r\n\r\n* changes\r\n\r\n* temp_file_sets\r\n\r\n* TempFileManager class\r\n\r\n* added file manager\r\n\r\n* internal functions\r\n\r\n* tests\r\n\r\n* formatting\r\n\r\n* changes\r\n\r\n* video tests\r\n\r\n* added tests for File\r\n\r\n* cheetah image\r\n\r\n* formatting\r\n\r\n* tests for upload button\r\n\r\n* temp files\r\n\r\n* formatting\r\n\r\n* changelog\r\n\r\n* fixed audio\r\n\r\n* tmp files\r\n\r\n* tmp files\r\n\r\n* gallery\r\n\r\n* deprecated type=file\r\n\r\n* fixing tests\r\n\r\n* patch os.path.exists\r\n\r\n* fixed test_video_postprocess_converts_to_playable_format\r\n\r\n* fixed tests\r\n\r\n* changelog\r\n\r\n* fix tests\r\n\r\n* formatting\r\n\r\n* added a download_if_needed\r\n\r\n* formatting\r\n\r\n* fixed download\r\n\r\n* fixed gallery demo\r\n\r\n* fix tests\r\n\r\n* version\r\n\r\n* fix for mac\r\n\r\n* consolidate", "code": "def test_video_postprocess_converts_to_playable_format(self):\n test_file_dir = pathlib.Path(pathlib.Path(__file__).parent, \"test_files\")\n # This file has a playable container but not playable codec\n with tempfile.NamedTemporaryFile(\n suffix=\"bad_video.mp4\", delete=False\n ) as tmp_not_playable_vid:\n bad_vid = str(test_file_dir / \"bad_video_sample.mp4\")\n assert not processing_utils.video_is_playable(bad_vid)\n shutil.copy(bad_vid, tmp_not_playable_vid.name)\n _ = gr.Video().postprocess(tmp_not_playable_vid.name)\n # The original video gets converted to .mp4 format\n full_path_to_output = pathlib.Path(tmp_not_playable_vid.name).with_suffix(\n \".mp4\"\n )\n assert processing_utils.video_is_playable(str(full_path_to_output))\n\n # This file has a playable codec but not a playable container\n with tempfile.NamedTemporaryFile(\n suffix=\"playable_but_bad_container.mkv\", delete=False\n ) as tmp_not_playable_vid:\n bad_vid = str(test_file_dir / \"playable_but_bad_container.mkv\")\n assert not processing_utils.video_is_playable(bad_vid)\n shutil.copy(bad_vid, tmp_not_playable_vid.name)\n _ = gr.Video().postprocess(tmp_not_playable_vid.name)\n full_path_to_output = pathlib.Path(tmp_not_playable_vid.name).with_suffix(\n \".mp4\"\n )\n assert processing_utils.video_is_playable(str(full_path_to_output))\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 357, "n_words": 92, "vocab_size": 47, "complexity": 1, "nloc": 24, "token_counts": 184, "n_ast_nodes": 314, "n_identifiers": 25, "random_cut": "def test_video_postprocess_converts_to_playable_format(self):\n test_file_dir = pathlib.Path(pathlib.Path(__file__).parent, \"test_files\")\n # This file has a playable container but not playable codec\n with tempfile.NamedTemporaryFile(\n suffix=\"bad_video.mp4\", delete=False\n ) as tmp_not_playable_vid:\n bad_vid = str(test_file_dir / \"bad_video_sample.mp4\")\n assert not processing_utils.video_is_playable(bad_vid)\n shutil.copy(bad_vid, tmp_not_playable_vid.name)\n _ = gr.Video().postprocess(tmp_not_playable_vid.name)\n # The original video gets converted to .mp4 format\n full_path_to_output = pathlib.Path(tmp_not_playable_vid.name).with_suffix(\n \".mp4\"\n )\n assert processing_utils.video_is_playable(str(full_path_to_output))\n\n # This file has a playable codec but not a playable container\n with tempfile.NamedTemporaryFile(\n suffix=\"playable_but_bad_container.mkv\", delete=False\n ) as tmp_not_playable_vid:\n bad_vid = str(test_file_dir / \"playable_but_bad_container.mkv\")\n assert not processing_utils.video_is_playable(bad_vid)\n shutil.copy(bad_vid, tmp_not_playable_vid.name)\n _ = gr.Video().postprocess(tmp_not" }, { "id": 166606, "commit_id": "c50b745a99e644e7c7552f196f49f6269b79e258", "repo": "pandas", "path": "pandas/tests/io/parser/test_python_parser_only.py", "file_name": "test_python_parser_only.py", "fun_name": "test_index_col_false_and_header_none", "commit_message": "REGR: index_col False and header=None inferring index names in some cases (#47139)", "code": "def test_index_col_false_and_header_none(python_parser_only):\n # GH#46955\n parser = python_parser_only\n data = \n result = parser.read_csv(StringIO(data), sep=\",\", header=None, index_col=False)\n expected = DataFrame({0: [0.5, 0.1], 1: [0.03, 0.2]})\n tm.assert_frame_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 43, "n_words": 25, "vocab_size": 22, "complexity": 1, "nloc": 9, "token_counts": 72, "n_ast_nodes": 100, "n_identifiers": 14, "random_cut": "def test_index_col_false_and_header_none(python_parser_only):\n # GH#46955\n parser = python" }, { "id": 138013, "commit_id": "8e680c483ce326cefc62e44f68ab1a6948b1c3d2", "repo": "ray", "path": "rllib/utils/replay_buffers/tests/test_prioritized_replay_buffer_replay_buffer_api.py", "file_name": "test_prioritized_replay_buffer_replay_buffer_api.py", "fun_name": "_generate_data", "commit_message": "[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)", "code": "def _generate_data(self):\n return SampleBatch(\n {\n SampleBatch.T: [np.random.random((4,))],\n SampleBatch.ACTIONS: [np.random.choice([0, 1])],\n SampleBatch.REWARDS: [np.random.rand()],\n SampleBatch.OBS: [np.random.random((4,))],\n SampleBatch.NEXT_OBS: [np.random.random((4,))],\n SampleBatch.TERMINATEDS: [np.random.choice([False, True])],\n SampleBatch.TRUNCATEDS: [np.random.choice([False, False])],\n }\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 164, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 136, "n_ast_nodes": 199, "n_identifiers": 14, "random_cut": "def _generate_data(self):\n return SampleBatch(\n {\n SampleBatch.T: [np.random.random((4,))],\n SampleBatch.ACTIONS: [np.random.choice([0, 1])],\n SampleBatch.REWARDS: [np.random.rand()],\n SampleBatch.OBS: [np.random.random((4,))],\n SampleBatch.NEXT_OBS: [np.random.random((4,))],\n SampleBatch.TERMINATEDS: [np.random.choice([False, True])],\n SampleBatch.TRUNCATEDS: [np.random.choice([False, False])],\n }\n" }, { "id": 258457, "commit_id": "e4015289e0eeb390190ce0d051cee756bc5ecb33", "repo": "scikit-learn", "path": "sklearn/metrics/cluster/_unsupervised.py", "file_name": "_unsupervised.py", "fun_name": "silhouette_samples", "commit_message": "FIX Support integers in silhouette_score for precomputed distances (#22108)\n\nCo-authored-by: Guillaume Lemaitre ", "code": "def silhouette_samples(X, labels, *, metric=\"euclidean\", **kwds):\n \n X, labels = check_X_y(X, labels, accept_sparse=[\"csc\", \"csr\"])\n\n # Check for non-zero diagonal entries in precomputed distance matrix\n if metric == \"precomputed\":\n error_msg = ValueError(\n \"The precomputed distance matrix contains non-zero \"\n \"elements on the diagonal. Use np.fill_diagonal(X, 0).\"\n )\n if X.dtype.kind == \"f\":\n atol = np.finfo(X.dtype).eps * 100\n if np.any(np.abs(np.diagonal(X)) > atol):\n raise ValueError(error_msg)\n elif np.any(np.diagonal(X) != 0): # integral dtype\n raise ValueError(error_msg)\n\n le = LabelEncoder()\n labels = le.fit_transform(labels)\n n_samples = len(labels)\n label_freqs = np.bincount(labels)\n check_number_of_labels(len(le.classes_), n_samples)\n\n kwds[\"metric\"] = metric\n reduce_func = functools.partial(\n _silhouette_reduce, labels=labels, label_freqs=label_freqs\n )\n results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds))\n intra_clust_dists, inter_clust_dists = results\n intra_clust_dists = np.concatenate(intra_clust_dists)\n inter_clust_dists = np.concatenate(inter_clust_dists)\n\n denom = (label_freqs - 1).take(labels, mode=\"clip\")\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n intra_clust_dists /= denom\n\n sil_samples = inter_clust_dists - intra_clust_dists\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)\n # nan values are for clusters of size 1, and should be 0\n return np.nan_to_num(sil_samples)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 333, "n_words": 147, "vocab_size": 105, "complexity": 5, "nloc": 33, "token_counts": 284, "n_ast_nodes": 473, "n_identifiers": 46, "random_cut": "def silhouette_samples(X, labels, *, metric=\"euclidean\", **kwds):\n \n X, labels = check_X_y(X, labels, accept_sparse=[\"csc\", \"csr\"])\n\n # Check for non-zero diagonal entries in precomputed distance matrix\n if metric == \"precomputed\":\n error_msg = ValueError(\n \"The precomputed distance matrix contains non-zero \"\n \"elements on the diagonal. Use np.fill_diagonal(X, 0).\"\n )\n if X.dtype.kind == \"f\":\n atol = np.finfo(X.dtype).eps * 100\n if np.any(np.abs(np.diagonal(X)) > atol):\n raise ValueError(error_msg)\n elif np.any(np.diagonal(X) != 0): # integral dtype\n raise ValueError(error_msg)\n\n le = LabelEncoder()\n labels " }, { "id": 37466, "commit_id": "e952e049b4fbb5d3e2ba6a140f10fb4049dd8654", "repo": "transformers", "path": "tests/speech_encoder_decoder/test_modeling_flax_speech_encoder_decoder.py", "file_name": "test_modeling_flax_speech_encoder_decoder.py", "fun_name": "get_pretrained_model_and_inputs", "commit_message": "use scale=1.0 in floats_tensor called in speech model testers (#17007)\n\nCo-authored-by: ydshieh ", "code": "def get_pretrained_model_and_inputs(self):\n model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(\n \"facebook/wav2vec2-large-lv60\", \"gpt2-medium\"\n )\n batch_size = 13\n input_values = floats_tensor([batch_size, 512], scale=1.0)\n attention_mask = random_attention_mask([batch_size, 512])\n decoder_input_ids = ids_tensor([batch_size, 4], model.config.decoder.vocab_size)\n decoder_attention_mask = random_attention_mask([batch_size, 4])\n inputs = {\n \"inputs\": input_values,\n \"attention_mask\": attention_mask,\n \"decoder_input_ids\": decoder_input_ids,\n \"decoder_attention_mask\": decoder_attention_mask,\n }\n\n return model, inputs\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 168, "n_words": 44, "vocab_size": 36, "complexity": 1, "nloc": 16, "token_counts": 96, "n_ast_nodes": 150, "n_identifiers": 18, "random_cut": "def get_pretrained_model_and_inputs(self):\n model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(\n \"facebook/wav2vec2-large-lv60\", \"gpt2-medium\"\n )\n batch_size = 13\n input_values = floats_tensor([batch_size, 512], scale=1.0)\n" }, { "id": 88244, "commit_id": "4821e6846b007cce0092f43141e4b436beb2bedc", "repo": "sentry", "path": "tests/sentry/relay/test_config.py", "file_name": "test_config.py", "fun_name": "test_project_config_setattr", "commit_message": "test: Add missing tests to sentry/relay/config/__init__.py [TET-504] (#41058)\n\nThis PR increase code coverage from ~82% upto 98% in\r\nsentry/relay/config/__init__.py.\r\n\r\ncodecov [report](https://app.codecov.io/gh/getsentry/sentry/pull/41058):\r\n\r\n\"image\"\r\nsrc=\"https://user-images.githubusercontent.com/1374633/200516881-ed23da43-37df-4fc2-b291-310fc13f0ff5.png\"", "code": "def test_project_config_setattr(default_project):\n project_cfg = ProjectConfig(default_project)\n with pytest.raises(Exception) as exc_info:\n project_cfg.foo = \"bar\"\n assert str(exc_info.value) == \"Trying to change read only ProjectConfig object\"\n\n\n@pytest.mark.django_db", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.mark.django_db", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 37, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 5, "token_counts": 35, "n_ast_nodes": 74, "n_identifiers": 13, "random_cut": "def test_project_config_setattr(default_project):\n project_cfg = ProjectConfig(default_project)\n with pytest.raises(Ex" }, { "id": 270220, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/distribute_coordinator_utils.py", "file_name": "distribute_coordinator_utils.py", "fun_name": "_is_chief", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _is_chief(self):\n \n if not self._cluster_spec or self._task_type in [\n _TaskType.CHIEF,\n _TaskType.EVALUATOR,\n None,\n ]:\n return True\n\n # If not local and chief not in the cluster_spec, use the first worker as\n # chief.\n if (\n _TaskType.CHIEF not in self._cluster_spec.jobs\n and self._task_type == _TaskType.WORKER\n and self._task_id == 0\n ):\n return True\n return False\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 195, "n_words": 51, "vocab_size": 36, "complexity": 6, "nloc": 14, "token_counts": 63, "n_ast_nodes": 99, "n_identifiers": 10, "random_cut": "def _is_chief(self):\n \n if not self._cluster_spec or self._task_type in [\n " }, { "id": 69051, "commit_id": "70c4117c22df5ab3efda0be842452cabc2f9aab9", "repo": "erpnext", "path": "erpnext/patches/v14_0/remove_india_localisation.py", "file_name": "remove_india_localisation.py", "fun_name": "unlink_custom_fields", "commit_message": "fix: unlink custom fields in patch", "code": "def unlink_custom_fields():\n\tfrappe.db.set_value(\n\t\t\"Custom Field\",\n\t\t{\"dt\": \"Item\", \"fieldname\": \"gst_hsn_code\"},\n\t\t{\"fieldtype\": \"Data\", \"options\": \"\"},\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 8, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 33, "n_ast_nodes": 66, "n_identifiers": 4, "random_cut": "def unlink_custom_fields():\n\tfrappe.db.set_value(\n\t\t\"Custom Field\",\n\t\t{\"dt\": \"Item\", \"fieldname\": \"gst_hsn_code\"},\n\t\t{\"fieldtype\": \"Data\", \"options\": \"\"},\n\t)\n" }, { "id": 258935, "commit_id": "1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe", "repo": "scikit-learn", "path": "sklearn/mixture/tests/test_gaussian_mixture.py", "file_name": "test_gaussian_mixture.py", "fun_name": "test_gaussian_suffstat_sk_spherical", "commit_message": "MNT Update black to stable version (#22474)", "code": "def test_gaussian_suffstat_sk_spherical():\n # computing spherical covariance equals to the variance of one-dimension\n # data after flattening, n_components=1\n rng = np.random.RandomState(0)\n n_samples, n_features = 500, 2\n\n X = rng.rand(n_samples, n_features)\n X = X - X.mean()\n resp = np.ones((n_samples, 1))\n nk = np.array([n_samples])\n xk = X.mean()\n covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0)\n covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / (\n n_features * n_samples\n )\n assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)\n\n # check the precision computation\n precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical, \"spherical\")\n assert_almost_equal(covars_pred_spherical, 1.0 / precs_chol_pred**2)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 130, "n_words": 76, "vocab_size": 58, "complexity": 1, "nloc": 15, "token_counts": 135, "n_ast_nodes": 211, "n_identifiers": 24, "random_cut": "def test_gaussian_suffstat_sk_spherical():\n # computing spherical covariance equals to the variance of one-dimension\n # data after flattening, n_components=1\n rng = np.random.RandomState(0)\n n_samples, n_features = 500, 2\n\n X = rng.rand(n_samples, n_features)\n X = X - X.mean()\n resp = np.ones((n_samples, 1))\n nk = np.array([n_samples])\n xk = X.mean()\n covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0)\n covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / (\n n_features * n_samples\n )\n assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)\n\n # check the precisio" }, { "id": 46172, "commit_id": "3452f7ce45607af04bade5e5edebaa18fdc13819", "repo": "airflow", "path": "tests/utils/test_db.py", "file_name": "test_db.py", "fun_name": "test_offline_upgrade_revision", "commit_message": "Enhance `db upgrade` args (#22102)\n\nMake `db upgrade` args more like `db downgrade`.\r\n\r\n```\r\nusage: airflow db upgrade [-h] [--from-revision FROM_REVISION] [--from-version FROM_VERSION] [-r REVISION]\r\n [-s] [-n VERSION]\r\n\r\nUpgrade the schema of the metadata database. To print but not execute commands, use option ``--show-sql-only``. If using options ``--from-revision`` or ``--from-version``, you must also use ``--show-sql-only``, because if actually *running* migrations, we should only migrate from the *current* revision.\r\n\r\noptional arguments:\r\n -h, --help show this help message and exit\r\n --from-revision FROM_REVISION\r\n (Optional) If generating sql, may supply a *from* revision\r\n --from-version FROM_VERSION\r\n (Optional) If generating sql, may supply a *from* version\r\n -r REVISION, --revision REVISION\r\n (Optional) The airflow revision to upgrade to. Note: must provide either `--revision` or `--version`.\r\n -s, --show-sql-only Don't actually run migrations; just print out sql scripts for offline migration. Required if using either `--from-version` or `--from-version`.\r\n -n VERSION, --version VERSION\r\n (Optional) The airflow version to upgrade to. Note: must provide either `--revision` or `--version`.\r\n```", "code": "def test_offline_upgrade_revision(self, from_revision, to_revision):\n with mock.patch('airflow.utils.db.settings.engine.dialect'):\n with mock.patch('alembic.command.upgrade') as mock_alembic_upgrade:\n upgradedb(from_revision=from_revision, to_revision=to_revision, show_sql_only=True)\n mock_alembic_upgrade.assert_called_once_with(mock.ANY, f\"{from_revision}:{to_revision}\", sql=True)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 55, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 56, "n_ast_nodes": 101, "n_identifiers": 12, "random_cut": "def test_offline_upgrade_revision(self, from_revision, to_revision):\n with mock.p" }, { "id": 139298, "commit_id": "bc8742792cde5be62e22add01686a9c539e0f465", "repo": "ray", "path": "python/ray/tune/trial.py", "file_name": "trial.py", "fun_name": "should_stop", "commit_message": "[Tune] Logging of bad results dict keys (#23954)\n\n[User complains](https://discuss.ray.io/t/which-attributes-can-be-used-in-checkpoint-score-attr-when-using-tune-run/5826) about logging on failure of locating `checkpoint_score_attr ` in results dict not being informative.\r\nI propose that we log the actual results dict keys and extended stopping criteria, which imho should not log the whole result dict as this might contain tensors.\r\n\r\nMaybe there are other similar cases in tune library, in which I don't know my way around that good.", "code": "def should_stop(self, result):\n \n if result.get(DONE):\n return True\n\n for criteria, stop_value in self.stopping_criterion.items():\n if criteria not in result:\n raise TuneError(\n \"Stopping criteria {} not provided in result dict. Keys \"\n \"are {}.\".format(criteria, list(result.keys()))\n )\n elif isinstance(criteria, dict):\n raise ValueError(\n \"Stopping criteria is now flattened by default. \"\n \"Use forward slashes to nest values `key1/key2/key3`.\"\n )\n elif result[criteria] >= stop_value:\n return True\n return False\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 285, "n_words": 62, "vocab_size": 48, "complexity": 6, "nloc": 17, "token_counts": 83, "n_ast_nodes": 140, "n_identifiers": 16, "random_cut": "def should_stop(self, result):\n \n if result.get(DONE):\n return True\n\n for criteria, stop_value in self.stopping_criterion.items():\n if criteria not in result:\n raise TuneError(\n \"Stopping criteria {} not provided in result dict. Keys \"\n \"are {}.\".format(criteria, list(result.keys()))\n " }, { "id": 116204, "commit_id": "12c18196c71dee5b16b7c8ddcfe9a0bdffbf8440", "repo": "mindsdb", "path": "mindsdb/migrations/versions/2022-08-25_6a54ba55872e_view_integration.py", "file_name": "2022-08-25_6a54ba55872e_view_integration.py", "fun_name": "downgrade", "commit_message": "removed integration_id from view (is already absent in db.py)", "code": "def downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('view', schema=None) as batch_op:\n batch_op.add_column(sa.Column('integration_id', sa.INTEGER(), autoincrement=False, nullable=False))\n batch_op.create_foreign_key('fk_integration_id', 'integration', ['integration_id'], ['id'])\n\n # ### end Alembic commands ###\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 54, "n_words": 32, "vocab_size": 26, "complexity": 1, "nloc": 4, "token_counts": 59, "n_ast_nodes": 105, "n_identifiers": 12, "random_cut": "def downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('view', schema=None) as batch_op:\n batch_op.add_column(sa.Column('integration_id', sa.INTEGER(), autoincrement=False, nullable=False))\n batch_op.create_foreign_key('fk_integrati" }, { "id": 139805, "commit_id": "a25235a2c2d8882b7793cb7e5010764aa4adc999", "repo": "ray", "path": "python/ray/tune/tests/test_sync.py", "file_name": "test_sync.py", "fun_name": "_prepareDirForTestSyncRemoteTask", "commit_message": "[tune] Fast path for `sync_dir_between_nodes` (#24958)\n\nThis PR adds a fast path for `sync_dir_between_nodes` that gets triggered if both source IP and target IP are the same. It uses simple `shutil` operations instead of packing and unpacking to improve performance.", "code": "def _prepareDirForTestSyncRemoteTask(self):\n temp_source = tempfile.mkdtemp()\n temp_up_target = tempfile.mkdtemp()\n temp_down_target = tempfile.mkdtemp()\n self.addCleanup(shutil.rmtree, temp_source)\n self.addCleanup(shutil.rmtree, temp_up_target)\n self.addCleanup(shutil.rmtree, temp_down_target)\n\n os.makedirs(os.path.join(temp_source, \"A\", \"a1\"))\n os.makedirs(os.path.join(temp_source, \"A\", \"a2\"))\n os.makedirs(os.path.join(temp_source, \"B\", \"b1\"))\n with open(os.path.join(temp_source, \"level_0.txt\"), \"wt\") as fp:\n fp.write(\"Level 0\\n\")\n with open(os.path.join(temp_source, \"A\", \"level_a1.txt\"), \"wt\") as fp:\n fp.write(\"Level A1\\n\")\n with open(os.path.join(temp_source, \"A\", \"a1\", \"level_a2.txt\"), \"wt\") as fp:\n fp.write(\"Level A2\\n\")\n with open(os.path.join(temp_source, \"B\", \"level_b1.txt\"), \"wt\") as fp:\n fp.write(\"Level B1\\n\")\n return temp_source, temp_up_target, temp_down_target\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 207, "n_words": 66, "vocab_size": 35, "complexity": 1, "nloc": 19, "token_counts": 221, "n_ast_nodes": 393, "n_identifiers": 17, "random_cut": "def _prepareDirForTestSyncRemoteTask(self):\n temp_source = tempfile.mkdtemp()\n temp_up_target = tempfile.mkdtemp()\n temp_down_target = tempfile.mkdtemp()\n self.addCleanup(shutil.rmtree, temp_source)\n self.addCleanup(shutil.rmtree, temp_up_target)\n self.addCleanup(shutil.rmtree, temp_down_target)\n\n os.makedirs(os.path.join(temp_source, \"A\", \"a1\"))\n " }, { "id": 198727, "commit_id": "4c22fad1d280711c2a868cabb7d2dbd90c1ac052", "repo": "sympy", "path": "sympy/printing/tests/test_julia.py", "file_name": "test_julia.py", "fun_name": "test_1_over_x_and_sqrt", "commit_message": "Add extra spaces in julia_code() printing.", "code": "def test_1_over_x_and_sqrt():\n # 1.0 and 0.5 would do something different in regular StrPrinter,\n # but these are exact in IEEE floating point so no different here.\n assert julia_code(1/x) == '1 ./ x'\n assert julia_code(x**-1) == julia_code(x**-1.0) == '1 ./ x'\n assert julia_code(1/sqrt(x)) == '1 ./ sqrt(x)'\n assert julia_code(x**-S.Half) == julia_code(x**-0.5) == '1 ./ sqrt(x)'\n assert julia_code(sqrt(x)) == 'sqrt(x)'\n assert julia_code(x**S.Half) == julia_code(x**0.5) == 'sqrt(x)'\n assert julia_code(1/pi) == '1 / pi'\n assert julia_code(pi**-1) == julia_code(pi**-1.0) == '1 / pi'\n assert julia_code(pi**-0.5) == '1 / sqrt(pi)'\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 116, "n_words": 84, "vocab_size": 46, "complexity": 1, "nloc": 10, "token_counts": 138, "n_ast_nodes": 221, "n_identifiers": 7, "random_cut": "def test_1_over_x_and_sqrt():\n # 1.0 and 0.5 would do something different in regular StrPrinter,\n # but these are exact in IEEE floating point so no different here.\n assert julia_code(1/x) == '1 ./ x'\n assert julia_code(x**-1) == julia_code(x**-1.0) == '1 ./ x'\n assert julia_code(1/sqrt(x)) == '1 ./ sqrt(x)'\n assert julia_code(x**-S.Half) == julia_code(x**-0.5) == '1 ./ sqrt(x)'\n assert julia_code(sqrt(x)) == 'sqrt(x)'\n assert julia_code(x**S.Half) == julia_code(x**0." }, { "id": 311520, "commit_id": "58b8c30221a6f6e5acbbe98b7e3298b03fb741f5", "repo": "core", "path": "tests/components/homekit_controller/test_lock.py", "file_name": "test_lock.py", "fun_name": "test_switch_change_lock_state", "commit_message": "Improve homekit_controller tests (#65266)", "code": "async def test_switch_change_lock_state(hass, utcnow):\n \n helper = await setup_test_component(hass, create_lock_service)\n\n await hass.services.async_call(\n \"lock\", \"lock\", {\"entity_id\": \"lock.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 1,\n },\n )\n\n await hass.services.async_call(\n \"lock\", \"unlock\", {\"entity_id\": \"lock.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 0,\n },\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 147, "n_words": 39, "vocab_size": 23, "complexity": 1, "nloc": 20, "token_counts": 95, "n_ast_nodes": 158, "n_identifiers": 14, "random_cut": "async def test_switch_change_lock_state(hass, utcnow):\n \n helper = await setup_test_component(hass, create_lock_service)\n\n await hass.services.async_call(\n \"lock\", \"lock\", {\"entity_id\": \"lock.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(" }, { "id": 285321, "commit_id": "9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/stocks/dark_pool_shorts/test_finra_model.py", "file_name": "test_finra_model.py", "fun_name": "test_getATSdata", "commit_message": "Here we merge all API Refactor related branches (#2236)\n\n* Update api.py\r\n\r\n* Updated forex menu\r\n\r\n* refactor ycrv command\r\n\r\n* refactor ycrv command black\r\n\r\n* refactor ecocal command\r\n\r\n* Minh changes\r\n\r\n* Adding space to test pushing\r\n\r\n* title fix ecocal df\r\n\r\n* get economic calendar annotation\r\n\r\n* fix investingcom tests\r\n\r\n* refactor index command\r\n\r\n* refactor overview command\r\n\r\n* give defaults to wsj view function args\r\n\r\n* rename date args investincom\r\n\r\n* refacto bigmac command\r\n\r\n* fix ecocal typo\r\n\r\n* refactor rtps command\r\n\r\n* alphavantage gdp\r\n\r\n* alphavantage gdp per capita\r\n\r\n* alphavantage cpi\r\n\r\n* alphavantage tyld\r\n\r\n* alphavantage inf\r\n\r\n* refactor macro command\r\n\r\n* refactor macro command w helpers\r\n\r\n* refactor treasury command\r\n\r\n* fix macro on terminal\r\n\r\n* treasury labels\r\n\r\n* refactor maturities\r\n\r\n* update treasury maturities doc strings\r\n\r\n* refactor get economic calendar finhub\r\n\r\n* refactor map command api\r\n\r\n* display map filter choices\r\n\r\n* route economy api to performance map\r\n\r\n* route economy api to performance map\r\n\r\n* display group choices on valuation command\r\n\r\n* refactor performance and valuation commands\r\n\r\n* refactor spectrum model and view\r\n\r\n* add choices to spectrum controller\r\n\r\n* delete image after view\r\n\r\n* fix model tests finviz\r\n\r\n* fix finciz view tests\r\n\r\n* refactor futures\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix controller test\r\n\r\n* refactor fred series notes\r\n\r\n* update fred notes docstring\r\n\r\n* refacto fred series ids\r\n\r\n* fix pred and qa when empty datasets\r\n\r\n* refactor fred\r\n\r\n* uncomment stuff\r\n\r\n* refacto get series data\r\n\r\n* fix some tests\r\n\r\n* set defaults on args\r\n\r\n* refactor fred yield curve\r\n\r\n* black\r\n\r\n* fix spell and remove ecocal names\r\n\r\n* fix linting\r\n\r\n* linting\r\n\r\n* pylint fix\r\n\r\n* change dangerous defaults\r\n\r\n* Working through crypto fixes (#2256)\r\n\r\n* Working through crypto fixes\r\n\r\n* Continued adding crypto stuff\r\n\r\n* Added crypto overview\r\n\r\n* Added test fixes\r\n\r\n* Added fixtures\r\n\r\n* Fixed tests\r\n\r\n* Fixed charting issue\r\n\r\n* Removed broken APIs\r\n\r\n* Final adjustments\r\n\r\n* Added test fixes\r\n\r\n* map get groups and get ycrv countries into old api\r\n\r\n* exposed econdb helper funcs\r\n\r\n* remove helpers\r\n\r\n* refactor search indices\r\n\r\n* linting\r\n\r\n* refactor arg currency\r\n\r\n* pylint from currency\r\n\r\n* Started switching crpyto ascending to ascend\r\n\r\n* Merging\r\n\r\n* Portfolio model arguements, params, and docstring\r\n\r\n* Refactored for etf commands (#2292)\r\n\r\n* Refactored for etf commands\r\n\r\n* Fixed tests\r\n\r\n* Added load command\r\n\r\n* Fixed menu\r\n\r\n* Portfolio logic fixes\r\n\r\n* Added econometrics (#2260)\r\n\r\n* Added econometrics\r\n\r\n* Fixed tests\r\n\r\n* Simplified API\r\n\r\n* Added test fixes\r\n\r\n* Added test csv\r\n\r\n* Allowed examples to be loaded\r\n\r\n* Fund refactor (#2291)\r\n\r\n* Fund refactor\r\n\r\n* Changed fund_name and fund to name\r\n\r\n* Changed ascending to ascend\r\n\r\n* Stock menu refactoring for easier API usage (#2194)\r\n\r\n* Stocks refactoring for easier API usage\r\n\r\n* Linting\r\n\r\n* Refactor newly added features\r\n\r\n* Linting\r\n\r\n* Fixing tests\r\n\r\n* Refactor common files used by stocks menu\r\n\r\n* Fixing flake8\r\n\r\n* Fix linting and tests\r\n\r\n* Linting\r\n\r\n* Fix flake8\r\n\r\n* refactor insider_data\r\n\r\n* refactor mentions\r\n\r\n* refactor watchlist\r\n\r\n* refactor sentiment\r\n\r\n* refactor sentiment\r\n\r\n* fix yahoofinance tests\r\n\r\n* refactor load and candle\r\n\r\n* refactor get_news and display_news\r\n\r\n* refactor stocks.ins.act\r\n\r\n* candle default matplotlib\r\n\r\n* fix yahoofinance_view tests\r\n\r\n* fix ark model tests\r\n\r\n* fix ark view tests\r\n\r\n* fix business insider model\r\n\r\n* fix business insider view\r\n\r\n* refactor csimarket model\r\n\r\n* fix tests csi market model\r\n\r\n* update dd controller\r\n\r\n* fix get suppliers tests\r\n\r\n* fix dd controller tests\r\n\r\n* fix finhub tests\r\n\r\n* fix finviz tests\r\n\r\n* fix fmp tests\r\n\r\n* fix marketwatch tests\r\n\r\n* corrected argument keywords in test_bt_model\r\n\r\n* corrected argument keywords in test_bt_view\r\n\r\n* refactor fa controller\r\n\r\n* refactor marketwatch view\r\n\r\n* refactor gov controller\r\n\r\n* fix tests fa av\r\n\r\n* fix tests elect\r\n\r\n* fix dcf tests\r\n\r\n* fix polygon tests\r\n\r\n* fix fmp tests\r\n\r\n* fix quiverquant tests\r\n\r\n* fix yahoofinance fa tests\r\n\r\n* fix more fa tests\r\n\r\n* fix insider tests\r\n\r\n* fix more tests\r\n\r\n* fix more tests\r\n\r\n* fix options tests\r\n\r\n* fix stock gov tests\r\n\r\n* fix tests test_ba_controller\r\n\r\n* fix tests for test_finviz_compare_model.py\r\n\r\n* fixed 2 tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fix final tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* Fix tests\r\n\r\n* black\r\n\r\n* forgot to black tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* flakefix\r\n\r\n* Tests + code : Stocks / Discovery\r\n\r\n* fix tests\r\n\r\n* added recorder\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* black\r\n\r\n* black\r\n\r\n* remove unused imports\r\n\r\n* refactor display raw\r\n\r\n* sia dicts fix\r\n\r\n* pylint\r\n\r\n* linting\r\n\r\n* remove dangerous default\r\n\r\n* fix tests\r\n\r\n* fix beta model test\r\n\r\n* black\r\n\r\n* skip screener qa test\r\n\r\n* change sector path to sectors\r\n\r\n* update tests readme\r\n\r\n* fix metric defaults\r\n\r\n* black\r\n\r\n* substitute lost ticker\r\n\r\n* defaults cpic\r\n\r\n* another round on sia\r\n\r\n* refactor cramer\r\n\r\n* reduce default tweets on sentiment\r\n\r\n* refactor yf hist, corr, volume\r\n\r\n* arkorders default\r\n\r\n* refactor income, balance, cashflow\r\n\r\n* refacto scorr, screener, getfinnhub\r\n\r\n* refactor stockgrid\r\n\r\n* ibkr refactor\r\n\r\n* another round on stockgrid\r\n\r\n* add dividens end point\r\n\r\n* refactor discovery endpoints\r\n\r\n* update docstrings with similar input\r\n\r\n* refactor messages\r\n\r\n* refactor ba\r\n\r\n* refactor regioons\r\n\r\n* refactor twitter sentiment\r\n\r\n* refactor hist\r\n\r\n* refactor regions\r\n\r\n* give default to timeframe\r\n\r\n* refactor bunch of defaults and arg names\r\n\r\n* remove leftover imports\r\n\r\n* refactor vwap\r\n\r\n* let tests run\r\n\r\n* fix tests\r\n\r\n* fix stock tests\r\n\r\n* fix stockanalysis tests\r\n\r\n* flake\r\n\r\n* MYPY\r\n\r\n* Made important changes\r\n\r\n* added fixes\r\n\r\n* Fixed big issue\r\n\r\n* Added fixes to tests\r\n\r\n* fix qa tests\r\n\r\n* fix tests\r\n\r\n* fix 1 more test\r\n\r\n* last stocks failing\r\n\r\n* fix crypto test\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: montezdesousa \r\nCo-authored-by: hjoaquim \r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\n\r\n* fix portfolio tests\r\n\r\n* change period to window\r\n\r\n* update ca docstrings\r\n\r\n* refactor get_similar_companies func\r\n\r\n* Fixed\r\n\r\n* Update CI\r\n\r\n* Update CI 2\r\n\r\n* Update CI 3\r\n\r\n* Update dependencies\r\n\r\nCo-authored-by: colin99d \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: montezdesousa \r\nCo-authored-by: James Simmons \r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com>\r\nCo-authored-by: hjoaquim ", "code": "def test_getATSdata(recorder):\n df_ats, d_ats_reg = finra_model.getATSdata(\n limit=2,\n tier_ats=\"T1\",\n )\n\n d_ats_reg = {k: round(v, 9) for k, v in d_ats_reg.items()}\n\n recorder.capture_list([df_ats, d_ats_reg])\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 21, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 54, "n_ast_nodes": 84, "n_identifiers": 13, "random_cut": "def test_getATSdata(recorder):\n df_ats, d_ats_reg = finra_model.getATSdata(\n limit=2,\n tier_ats=\"T1\",\n )\n\n d_ats_reg = {k: round(v, 9) for k, v in d_ats_reg.items()}\n\n recorder.capture_list([df" }, { "id": 122481, "commit_id": "1cead779a3abd18066195919fc5693a15cfa9070", "repo": "jax", "path": "jax/_src/lax/linalg.py", "file_name": "linalg.py", "fun_name": "triangular_solve_shape_rule", "commit_message": "Add support for Hessenberg and tridiagonal matrix reductions on CPU.\n\n* Implement jax.scipy.linalg.hessenberg and jax.lax.linalg.hessenberg.\n* Export what was previously jax._src.lax.linalg.orgqr as jax.lax.linalg.householder_product, since it can be used with some minor tweaks to compute the unitary matrix of a Hessenberg reduction.\n* Implement jax.lax.linalg.tridiagonal, which is the symmetric (Hermitian) equivalent of Hessenberg reduction.\n\nNone of these primitives are differentiable at the moment.\n\nPiperOrigin-RevId: 487224934", "code": "def _triangular_solve_shape_rule(a, b, *, left_side=False, **unused_kwargs):\n if a.ndim < 2:\n msg = \"triangular_solve requires a.ndim to be at least 2, got {}.\"\n raise TypeError(msg.format(a.ndim))\n if b.ndim < 2:\n msg = \"triangular_solve requires b.ndim to be at least 2, got {}.\"\n raise TypeError(msg.format(b.ndim))\n if a.shape[-1] != a.shape[-2]:\n msg = (\"triangular_solve requires the last two dimensions of a to be equal \"\n \"in size, got a.shape of {}.\")\n raise TypeError(msg.format(a.shape))\n if a.shape[:-2] != b.shape[:-2]:\n msg = (\"triangular_solve requires both arguments to have the same number \"\n \"of dimensions and equal batch dimensions, got {} and {}.\")\n raise TypeError(msg.format(a.shape, b.shape))\n common_dim = -2 if left_side else -1\n if a.shape[-1] != b.shape[common_dim]:\n msg = \"Incompatible shapes for arguments to triangular_solve: {} and {}.\"\n raise TypeError(msg.format(a.shape, b.shape))\n return b.shape\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 180, "n_words": 124, "vocab_size": 68, "complexity": 7, "nloc": 20, "token_counts": 184, "n_ast_nodes": 299, "n_identifiers": 11, "random_cut": "def _triangular_solve_shape_rule(a, b, *, left_side=False, **unused_kwargs):\n if a.ndim < 2:\n msg = \"triangular_solve requires a.ndim to be at least 2, got {}.\"\n raise TypeError(msg.format(a.ndim))\n if b.ndim < 2:\n msg = \"triangular_solve requires b.ndim to be at least 2, got {}.\"\n raise TypeError(msg.format(b.ndim))\n if a.shape[-1] != a.shape[-2]:\n msg = (\"triangular_solve requires the last two dimensions of a to be equal \"\n \"in size, got a.shape of {}.\")\n raise TypeError(msg.format(a.shape))\n if a.shape[:-2] != b.shape[:-2]:\n msg = (\"triangular_solve requires both arguments to have the same number \"\n \"of dimensions and equal batch dimensions, got {} and {}.\")\n raise TypeError(msg.format(a.shape, b.shape))\n common_dim = -2 if left_side else -1\n if a.shape[-1] != b.shape[common_dim]:\n msg = \"Incompatible shapes for arguments to triangular_solve: {} and {}.\"\n raise TypeError(msg.format(a.shape, b.shape))\n return b.shape\n" }, { "id": 156051, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/array/percentile.py", "file_name": "percentile.py", "fun_name": "merge_percentiles", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def merge_percentiles(finalq, qs, vals, method=\"lower\", Ns=None, raise_on_nan=True):\n \n from dask.array.utils import array_safe\n\n if isinstance(finalq, Iterator):\n finalq = list(finalq)\n finalq = array_safe(finalq, like=finalq)\n qs = list(map(list, qs))\n vals = list(vals)\n if Ns is None:\n vals, Ns = zip(*vals)\n Ns = list(Ns)\n\n L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))\n if not L:\n if raise_on_nan:\n raise ValueError(\"No non-trivial arrays found\")\n return np.full(len(qs[0]) - 2, np.nan)\n qs, vals, Ns = L\n\n # TODO: Perform this check above in percentile once dtype checking is easy\n # Here we silently change meaning\n if vals[0].dtype.name == \"category\":\n result = merge_percentiles(\n finalq, qs, [v.codes for v in vals], method, Ns, raise_on_nan\n )\n import pandas as pd\n\n return pd.Categorical.from_codes(result, vals[0].categories, vals[0].ordered)\n if not np.issubdtype(vals[0].dtype, np.number):\n method = \"nearest\"\n\n if len(vals) != len(qs) or len(Ns) != len(qs):\n raise ValueError(\"qs, vals, and Ns parameters must be the same length\")\n\n # transform qs and Ns into number of observations between percentiles\n counts = []\n for q, N in zip(qs, Ns):\n count = np.empty_like(finalq, shape=len(q))\n count[1:] = np.diff(array_safe(q, like=q[0]))\n count[0] = q[0]\n count *= N\n counts.append(count)\n\n # Sort by calculated percentile values, then number of observations.\n combined_vals = np.concatenate(vals)\n combined_counts = array_safe(np.concatenate(counts), like=combined_vals)\n sort_order = np.argsort(combined_vals)\n combined_vals = np.take(combined_vals, sort_order)\n combined_counts = np.take(combined_counts, sort_order)\n\n # percentile-like, but scaled by total number of observations\n combined_q = np.cumsum(combined_counts)\n\n # rescale finalq percentiles to match combined_q\n finalq = array_safe(finalq, like=combined_vals)\n desired_q = finalq * sum(Ns)\n\n # the behavior of different interpolation methods should be\n # investigated further.\n if method == \"linear\":\n rv = np.interp(desired_q, combined_q, combined_vals)\n else:\n left = np.searchsorted(combined_q, desired_q, side=\"left\")\n right = np.searchsorted(combined_q, desired_q, side=\"right\") - 1\n np.minimum(left, len(combined_vals) - 1, left) # don't exceed max index\n lower = np.minimum(left, right)\n upper = np.maximum(left, right)\n if method == \"lower\":\n rv = combined_vals[lower]\n elif method == \"higher\":\n rv = combined_vals[upper]\n elif method == \"midpoint\":\n rv = 0.5 * (combined_vals[lower] + combined_vals[upper])\n elif method == \"nearest\":\n lower_residual = np.abs(combined_q[lower] - desired_q)\n upper_residual = np.abs(combined_q[upper] - desired_q)\n mask = lower_residual > upper_residual\n index = lower # alias; we no longer need lower\n index[mask] = upper[mask]\n rv = combined_vals[index]\n else:\n raise ValueError(\n \"interpolation method can only be 'linear', 'lower', \"\n \"'higher', 'midpoint', or 'nearest'\"\n )\n return rv\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 834, "n_words": 366, "vocab_size": 221, "complexity": 18, "nloc": 68, "token_counts": 615, "n_ast_nodes": 963, "n_identifiers": 70, "random_cut": "def merge_percentiles(finalq, qs, vals, method=\"lower\", Ns=None, raise_on_nan=True):\n \n from dask.array.utils import array_safe\n\n if isinstance(finalq, Iterator):\n finalq = list(finalq)\n finalq = array_safe(finalq, like=finalq)\n qs = list(map(list, qs))\n vals = list(vals)\n if Ns is None:\n vals, Ns = zip(*vals)\n Ns = list(Ns)\n\n L = list(zip(*[(q, val, N) for q, val, N in zip(qs, vals, Ns) if N]))\n if not L:\n if raise_on_nan:\n raise ValueError(\"No non-trivial arrays found\")\n return np.full(len(qs[0]) - 2, np.nan)\n qs, vals, Ns = L\n\n # TODO: Perform this check above in percentile once dtype checking is easy\n # Here we silently change meaning\n if vals[0].dtype.name == \"category\":\n result = merge_percentiles(\n finalq, qs, [v.codes for v in vals], method, Ns, raise_on_nan\n " }, { "id": 80321, "commit_id": "a4a3ba65d736045733cb49430d7076b73aec23bb", "repo": "awx", "path": "awx/main/tasks/receptor.py", "file_name": "receptor.py", "fun_name": "get_receptor_sockfile", "commit_message": "Refactored tasks.py to a package\n--- Added 3 new sub-package : awx.main.tasks.system , awx.main.tasks.jobs , awx.main.tasks.receptor\n--- Modified the functional tests and unit tests accordingly", "code": "def get_receptor_sockfile():\n with open(__RECEPTOR_CONF, 'r') as f:\n data = yaml.safe_load(f)\n for section in data:\n for entry_name, entry_data in section.items():\n if entry_name == 'control-service':\n if 'filename' in entry_data:\n return entry_data['filename']\n else:\n raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} control-service entry does not have a filename parameter')\n else:\n raise RuntimeError(f'Receptor conf {__RECEPTOR_CONF} does not have control-service entry needed to get sockfile')\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 164, "n_words": 56, "vocab_size": 42, "complexity": 5, "nloc": 12, "token_counts": 69, "n_ast_nodes": 130, "n_identifiers": 12, "random_cut": "def get_receptor_sockfile():\n with open(__RECEPTOR_CONF, 'r') as f:\n data = yaml.safe_load(f)\n for section in data:\n for entry_name, entry_data in sectio" }, { "id": 189034, "commit_id": "f1f299527634a425cb34b621d6201fa9172d3529", "repo": "psutil", "path": "psutil/tests/test_linux.py", "file_name": "test_linux.py", "fun_name": "test_parse_smaps_mocked", "commit_message": "[Linux] Speedup `Process.full_memory_info()` (#2108)\n\n`Process.memory_full_info()` (reporting proecss USS/PSS/Swap memory) now reads ``/proc/pid/smaps_rollup`` instead of ``/proc/pids/smaps`` which makes it 5 times faster.\r\n\r\nWithout patch:\r\n```\r\n~/svn/psutil {linux-smaps-rollup}$ python3 -m timeit -s \"import psutil; p = psutil.Process()\" \"p.memory_full_info()\"\r\n500 loops, best of 5: 518 usec per loop\r\n```\r\n\r\nWith patch (5 times faster):\r\n```\r\n~/svn/psutil {linux-smaps-rollup}$ python3 -m timeit -s \"import psutil; p = psutil.Process()\" \"p.memory_full_info()\"\r\n2000 loops, best of 5: 111 usec per loop\r\n```\r\n\r\n----\r\n\r\n`make test-memleaks` suite, who heavily rely on `Process.memory_full_info()`, also received a nice speedup:\r\n\r\nBefore patch:\r\n\r\n```\r\n$ make test-memleaks\r\n----------------------------------------------------------------------\r\nRan 99 tests in 1.646s\r\n\r\nOK (skipped=9)\r\nSUCCESS\r\n```\r\n\r\nAfter patch:\r\n\r\n```\r\n$ make test-memleaks\r\n----------------------------------------------------------------------\r\nRan 99 tests in 1.195s\r\n\r\nOK (skipped=9)\r\nSUCCESS\r\n```", "code": "def test_parse_smaps_mocked(self):\n # See: https://github.com/giampaolo/psutil/issues/1222\n with mock_open_content(\n \"/proc/%s/smaps\" % os.getpid(),\n textwrap.dedent().encode()) as m:\n p = psutil._pslinux.Process(os.getpid())\n uss, pss, swap = p._parse_smaps()\n assert m.called\n self.assertEqual(uss, (6 + 7 + 14) * 1024)\n self.assertEqual(pss, 3 * 1024)\n self.assertEqual(swap, 15 * 1024)\n\n # On PYPY file descriptors are not closed fast enough.", "url": "https://github.com/giampaolo/psutil.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 153, "n_words": 49, "vocab_size": 42, "complexity": 1, "nloc": 32, "token_counts": 95, "n_ast_nodes": 159, "n_identifiers": 19, "random_cut": "def test_parse_smaps_mocked(self):\n # See: https://github.com/giampaolo/psutil/issues/1222\n with mock_open_content(\n \"/proc/%s/smaps\" % os.getpid(),\n textwrap.dedent().encode()) as m:\n p = psutil._pslinux.Process(os.getpid())\n uss, pss, swap = p._parse_smaps()\n assert m.called\n " }, { "id": 134380, "commit_id": "182744bbd151c166b8028355eae12a5da63fb3cc", "repo": "ray", "path": "rllib/algorithms/algorithm_config.py", "file_name": "algorithm_config.py", "fun_name": "__getitem__", "commit_message": "[RLlib] AlgorithmConfig: Next steps (volume 01); Algos, RolloutWorker, PolicyMap, WorkerSet use AlgorithmConfig objects under the hood. (#29395)", "code": "def __getitem__(self, item):\n # TODO: Uncomment this once all algorithms use AlgorithmConfigs under the\n # hood (as well as Ray Tune).\n # if log_once(\"algo_config_getitem\"):\n # logger.warning(\n # \"AlgorithmConfig objects should NOT be used as dict! \"\n # f\"Try accessing `{item}` directly as a property.\"\n # )\n item = self._translate_special_keys(item)\n return getattr(self, item)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 135, "n_words": 52, "vocab_size": 44, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 41, "n_identifiers": 5, "random_cut": "def __getitem__(self, item):\n # TODO: Uncomment this once all algorithms use AlgorithmConfigs under the\n # hood (as well as Ray Tune).\n # if log_once(\"algo_config_getitem\"):\n # logger.warning(\n # \"AlgorithmConfig " }, { "id": 280516, "commit_id": "b3f12f1acc0a599e9aa61349c7e1b4e3afcdd932", "repo": "keras", "path": "keras/saving/experimental/serialization_lib_test.py", "file_name": "serialization_lib_test.py", "fun_name": "test_custom_fn", "commit_message": "Support lambdas in new serialization.\n\nPiperOrigin-RevId: 491075544", "code": "def test_custom_fn(self):\n obj = {\"activation\": custom_fn}\n serialized, _, reserialized = self.roundtrip(\n obj, custom_objects={\"custom_fn\": custom_fn}\n )\n self.assertEqual(serialized, reserialized)\n\n # Test inside layer\n dense = keras.layers.Dense(1, activation=custom_fn)\n dense.build((None, 2))\n _, new_dense, _ = self.roundtrip(\n dense, custom_objects={\"custom_fn\": custom_fn}\n )\n x = tf.random.normal((2, 2))\n y1 = dense(x)\n _ = new_dense(x)\n new_dense.set_weights(dense.get_weights())\n y2 = new_dense(x)\n self.assertAllClose(y1, y2, atol=1e-5)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 179, "n_words": 53, "vocab_size": 37, "complexity": 1, "nloc": 17, "token_counts": 140, "n_ast_nodes": 218, "n_identifiers": 27, "random_cut": "def test_custom_fn(self):\n obj = {\"activation\": custom_fn}\n serialized, _, reserialized = self.roun" }, { "id": 107475, "commit_id": "f156db08eee54d285ab0fb4e031e48d078ba6aa3", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "tick_params", "commit_message": "DOC: More cleanup axes -> Axes", "code": "def tick_params(self, axis='both', **kwargs):\n \n _api.check_in_list(['x', 'y', 'both'], axis=axis)\n if axis in ['x', 'both']:\n xkw = dict(kwargs)\n xkw.pop('left', None)\n xkw.pop('right', None)\n xkw.pop('labelleft', None)\n xkw.pop('labelright', None)\n self.xaxis.set_tick_params(**xkw)\n if axis in ['y', 'both']:\n ykw = dict(kwargs)\n ykw.pop('top', None)\n ykw.pop('bottom', None)\n ykw.pop('labeltop', None)\n ykw.pop('labelbottom', None)\n self.yaxis.set_tick_params(**ykw)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 202, "n_words": 42, "vocab_size": 29, "complexity": 3, "nloc": 16, "token_counts": 141, "n_ast_nodes": 248, "n_identifiers": 13, "random_cut": "def tick_params(self, axis='both', **kwargs):\n \n _api.check_in_list(['x', 'y', 'both'], axis=axis)\n if axis in ['x', 'both']:\n xkw = dict(kwargs)\n xkw.pop('left', None)\n xkw.pop('right', None)\n xkw.pop('labelleft', None)\n xkw.pop('labelright', None)\n self.xaxis.set_tick_params(**xkw)\n if axis in ['y', 'both']:\n ykw = dict(kwargs)\n ykw.pop('top', None)\n ykw.pop('bottom', None)\n ykw.pop('labeltop', None)\n ykw.pop('labelbottom', None)\n self.yaxis.set_tick_params(**ykw)\n" }, { "id": 266163, "commit_id": "977b79ecee4d1d8054c0fd9528c563376fe3bcd9", "repo": "netbox", "path": "netbox/dcim/views.py", "file_name": "views.py", "fun_name": "render", "commit_message": "Check that device has a platform set before rendering napalm tab", "code": "def render(self, instance):\n # Display NAPALM tabs only for devices which meet certain requirements\n if not (\n instance.status == 'active' and\n instance.primary_ip and\n instance.platform and\n instance.platform.napalm_driver\n ):\n return None\n return super().render(instance)\n\n\n@register_model_view(Device, 'status')", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@register_model_view(Device, 'status')", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 114, "n_words": 33, "vocab_size": 30, "complexity": 5, "nloc": 9, "token_counts": 42, "n_ast_nodes": 82, "n_identifiers": 10, "random_cut": "def render(self, instance):\n # Display NAPALM tabs only for devices which meet certain requirements\n if not (\n instance.status == 'active' and\n instance.primary_ip and\n instance.platform and\n instance.platform.napalm_driver\n " }, { "id": 97305, "commit_id": "f9dcd325304b37e3bff3869c1589354755e9300e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_details.py", "file_name": "test_organization_metric_details.py", "fun_name": "test_same_entity_multiple_metric_ids", "commit_message": "ref(metrics-indexer): Change bulk_record, record signatures (#32811)\n\n* ref(metrics-indexer): Change bulk_record, record signatures", "code": "def test_same_entity_multiple_metric_ids(self):\n \n self.store_session(\n self.build_session(\n project_id=self.project.id,\n started=(time.time() // 60) * 60,\n status=\"ok\",\n release=\"foobar@2.0\",\n errors=2,\n )\n )\n response = self.get_response(\n self.organization.slug,\n \"derived_metric.multiple_metrics\",\n )\n assert response.status_code == 404\n assert response.json()[\"detail\"] == (\n \"Not all the requested metrics or the constituent metrics in \"\n \"['derived_metric.multiple_metrics'] have data in the dataset\"\n )\n org_id = self.organization.id\n self._send_buckets(\n [\n {\n \"org_id\": org_id,\n \"project_id\": self.project.id,\n \"metric_id\": indexer.record(org_id, \"metric_foo_doe\"),\n \"timestamp\": int(time.time()),\n \"tags\": {\n resolve_weak(\"release\"): indexer.record(org_id, \"foo\"),\n },\n \"type\": \"c\",\n \"value\": 1,\n \"retention_days\": 90,\n },\n ],\n entity=\"metrics_counters\",\n )\n response = self.get_success_response(\n self.organization.slug,\n \"derived_metric.multiple_metrics\",\n )\n assert response.data == {\n \"name\": \"derived_metric.multiple_metrics\",\n \"type\": \"numeric\",\n \"operations\": [],\n \"unit\": \"percentage\",\n \"tags\": [{\"key\": \"release\"}],\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 680, "n_words": 100, "vocab_size": 75, "complexity": 1, "nloc": 48, "token_counts": 216, "n_ast_nodes": 382, "n_identifiers": 27, "random_cut": "def test_same_entity_multiple_metric_ids(self):\n \n self.store_session(\n self.build_session(\n project_id=self.project.id,\n started=(time.time() // 60) * 60,\n status=\"ok\",\n release=\"foobar@2.0\",\n errors=2,\n )\n )\n response = self.get_response(\n self.organization.slug,\n \"derived_metric.multiple_metrics\",\n )\n assert response.status_code == 404\n assert response.json()[\"detail\"] == (\n \"Not all the requested m" }, { "id": 132509, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/tests/test_integration_mlflow.py", "file_name": "test_integration_mlflow.py", "fun_name": "testMlFlowMixinConfig", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def testMlFlowMixinConfig(self):\n clear_env_vars()\n trial_config = {\"par1\": 4, \"par2\": 9.0}\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 17, "token_counts": 126, "n_ast_nodes": 35, "n_identifiers": 4, "random_cut": "def testMlFlowMixinConfig(self):\n clear_env_vars()\n trial_config = {\"par1\": 4, \"par2\": 9.0}\n" }, { "id": 189968, "commit_id": "7a801707b6dc373fae21af192214620ce0a53380", "repo": "manim", "path": "manim/mobject/text/code_mobject.py", "file_name": "code_mobject.py", "fun_name": "_ensure_valid_file", "commit_message": "Always expand user when validating file-paths (#2885)\n\nThe users are expanded when providing a file-path for the following\r\nmobjects:\r\n- CodeMobjects\r\n- ImageMobjects\r\n- SVGMobjects", "code": "def _ensure_valid_file(self):\n \n if self.file_name is None:\n raise Exception(\"Must specify file for Code\")\n possible_paths = [\n os.path.join(os.path.join(\"assets\", \"codes\"), self.file_name),\n os.path.expanduser(self.file_name),\n ]\n for path in possible_paths:\n if os.path.exists(path):\n self.file_path = path\n return\n error = (\n f\"From: {os.getcwd()}, could not find {self.file_name} at either \"\n + f\"of these locations: {possible_paths}\"\n )\n raise OSError(error)\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 202, "n_words": 50, "vocab_size": 44, "complexity": 4, "nloc": 16, "token_counts": 90, "n_ast_nodes": 170, "n_identifiers": 14, "random_cut": "def _ensure_valid_file(self):\n \n if self.file_name is None:\n raise Exception(\"Must specify file for Code\")\n possible_paths = [\n os.path.join(os.path.join(\"assets\", \"codes\"), self.file_name),\n os.path.expanduser(self.file_name),\n ]\n for path in possible_paths:\n if os.path.exists(path):\n self.file_path = path\n return\n " }, { "id": 250659, "commit_id": "bbc65e5f375693ec32d50b805cdb369a607f0b67", "repo": "mitmproxy", "path": "mitmproxy/tools/web/master.py", "file_name": "master.py", "fun_name": "running", "commit_message": "clean up initialization mess\n\nWe now manage the eventloop ourselves no matter which tool.", "code": "async def running(self):\n # Register tornado with the current event loop\n tornado.ioloop.IOLoop.current()\n\n # Add our web app.\n http_server = tornado.httpserver.HTTPServer(self.app)\n http_server.listen(self.options.web_port, self.options.web_host)\n\n self.log.info(\n f\"Web server listening at http://{self.options.web_host}:{self.options.web_port}/\",\n )\n\n return await super().running()\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 98, "n_words": 32, "vocab_size": 31, "complexity": 1, "nloc": 8, "token_counts": 61, "n_ast_nodes": 122, "n_identifiers": 17, "random_cut": "async def running(self):\n # Register tornado with the current event loop\n tornado.ioloop.IOLoop.current()\n\n # Add our web app.\n http_server = tornado.httpserver.HTTPServer(self.app)\n http_server.listen(self.options.web_port, self.options.web_host)\n\n self.log.info(\n f\"Web server listening at http://{self.options.web_host}:{self.options.web_port}/\",\n )\n\n r" }, { "id": 156375, "commit_id": "00572071d15e7e8cfc20d8342b00aabadf0d2102", "repo": "dask", "path": "dask/dataframe/io/tests/test_parquet.py", "file_name": "test_parquet.py", "fun_name": "test_pathlib_path", "commit_message": "Change `to_parquet` default to `write_metadata_file=None` (#8988)\n\n* Refactor to_parquet\r\n\r\nA bit of refactoring before changing the default of\r\n`write_metadata_file` to `None` in `to_parquet`.\r\n\r\n- Simplify implementation\r\n- Don't include file metadata in `write_partition` calls if it's not\r\nneeded\r\n- Everything needed to support implementing `write_metadata_file=None`\r\nas default *except* changing the value (to ensure tests pass).\r\n\r\n* Fixup failing parquet tests\r\n\r\nMost of the failures are due to divisions not being known by default\r\nanymore, since they're only known by default if a `_metadata` file is\r\npresent.\r\n\r\n* Respond to feedback", "code": "def test_pathlib_path(tmpdir, engine):\n import pathlib\n\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=2)\n path = pathlib.Path(str(tmpdir))\n ddf.to_parquet(path, engine=engine)\n ddf2 = dd.read_parquet(path, engine=engine)\n assert_eq(ddf, ddf2, check_divisions=False)\n\n\n@FASTPARQUET_MARK", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@FASTPARQUET_MARK", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 56, "n_words": 34, "vocab_size": 29, "complexity": 1, "nloc": 9, "token_counts": 95, "n_ast_nodes": 149, "n_identifiers": 22, "random_cut": "def test_pathlib_path(tmpdir, engine):\n import pathlib\n\n df = pd.DataFrame({\"x\": [4, 5, 6, 1, 2, 3]})\n df.index.name = \"index\"\n ddf = dd.from_pan" }, { "id": 116604, "commit_id": "5f7345439b9317659e36eaa296aa7f7607ef7e79", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/lightwood_handler/lightwood_handler/lightwood_handler.py", "file_name": "lightwood_handler.py", "fun_name": "_retrain", "commit_message": "file storage factory", "code": "def _retrain(self, statement):\n model_name = statement.name.parts[-1]\n\n base_predictor_record = get_model_record(\n name=model_name,\n ml_handler_name='lightwood',\n company_id=self.company_id,\n active=True\n )\n\n if base_predictor_record is None:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=f\"Error: model '{model_name}' does not exists!\"\n )\n\n new_predictor_record = db.Predictor(\n company_id=self.company_id,\n name=base_predictor_record.name,\n integration_id=base_predictor_record.integration_id,\n data_integration_id=base_predictor_record.data_integration_id,\n fetch_data_query=base_predictor_record.fetch_data_query,\n mindsdb_version=mindsdb_version,\n lightwood_version=lightwood_version,\n to_predict=base_predictor_record.to_predict,\n learn_args=base_predictor_record.learn_args,\n data={'name': base_predictor_record.name},\n active=False,\n status=PREDICTOR_STATUS.GENERATING\n )\n db.session.add(new_predictor_record)\n db.session.commit()\n\n data_handler_meta = self.handler_controller.get_by_id(base_predictor_record.data_integration_id)\n data_handler = self.handler_controller.get_handler(data_handler_meta['name'])\n ast = self.parser(base_predictor_record.fetch_data_query, dialect=self.dialect)\n response = data_handler.query(ast)\n if response.type == RESPONSE_TYPE.ERROR:\n return response\n\n new_predictor_record.training_data_columns_count = len(response.data_frame.columns)\n new_predictor_record.training_data_rows_count = len(response.data_frame)\n db.session.commit()\n\n predictor_storage = self.storage_factory(new_predictor_record.id)\n\n p = HandlerProcess(\n run_update,\n new_predictor_record.id,\n response.data_frame,\n self.company_id,\n str(predictor_storage.folder_path)\n )\n p.start()\n\n return Response(RESPONSE_TYPE.OK)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 528, "n_words": 88, "vocab_size": 68, "complexity": 3, "nloc": 48, "token_counts": 284, "n_ast_nodes": 435, "n_identifiers": 58, "random_cut": "def _retrain(self, statement):\n model_name = statement.name.parts[-1]\n\n base_predictor_record = get_model_record(\n name=model_name,\n ml_handler_name='lightwood',\n company_id=self.company_id,\n active=True\n )\n\n if base_predictor_record is None:\n return Response(\n RESPONSE_TYPE.ERROR,\n error_message=f\"Error: model '{model_name}' does not exists!\"\n )\n\n new_predictor_record = db.Predictor(\n company_id=self.company_id,\n name=base_predictor_record.name,\n integration_id=base_predictor_record.integration_id,\n data_integration_id=base_predictor_record.data_integration_id,\n fetch_data_query=base_predictor_record.fetch_data_query,\n mindsdb_version=mindsdb_version,\n lightwood_version=lightwood_version,\n to_predict=base_predictor_record.to_predict,\n learn_args=base_predictor_record.learn_args,\n data={'name': base_predictor_record.name},\n active=False,\n status=PREDICTOR_STATUS.GENERATING\n )\n db.session.add(new_predictor_record)\n db.session.commit()\n\n data_handler_meta = self.handler_controller.get_by_id(base_predictor_record.data_integration_id)\n data_handler = self.handler_controller.get_handler(data_handler_meta['name'])\n ast = self.parser(base_predictor_record.fetch_data_query, dialect=self.dialect)\n response = data_handler.query(ast)\n if response.type == RESPONSE_TYPE.ERROR:\n return response\n\n new_predictor_record.training_data_columns_count = len(response.data_frame.columns)\n new_predictor_record.training_data_rows_count = len(response.data_frame)\n db.session.commit()\n\n predictor_storage = self.storage_factory(new_predictor_record.id)\n\n p = HandlerProcess(\n run_update,\n new_predictor_record.id,\n response.data_frame,\n self.company_id,\n str(predictor_storage.folder_path)\n )\n p.start()\n\n " }, { "id": 89481, "commit_id": "0f6923d5dc11e8552e6620dd14596f1d4efe630c", "repo": "sentry", "path": "tests/sentry/issues/test_issue_occurrence.py", "file_name": "test_issue_occurrence.py", "fun_name": "test", "commit_message": "feat(issue_platform): Create a function to support saving an issue occurrence and related data (#42120)\n\nThis introduces a function for saving an `IssueOccurrence` based on\r\n`IssueOccurrenceData`, as well as an related event data. In future prs\r\nit will also handle creating/updating the related issue and passing this\r\ninformation to eventstream.", "code": "def test(self) -> None:\n occurrence = self.build_occurrence()\n occurrence.save(self.project.id)\n fetched_occurrence = IssueOccurrence.fetch(occurrence.id, self.project.id)\n assert fetched_occurrence is not None\n self.assert_occurrences_identical(occurrence, fetched_occurrence)\n\n\n@region_silo_test", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@region_silo_test", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 53, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 53, "n_ast_nodes": 87, "n_identifiers": 12, "random_cut": "def test(self) -> None:\n occurrence = self.build_occurrence()\n occurrence.save(self.project.id)\n fetched_occurrence = IssueOccurrence.fetch(occurrence.id, self.project.id)\n assert fetched_occurrence is not None\n self.assert_occurrences_identical(occurrence, fetched" }, { "id": 76820, "commit_id": "defa7f8ed248354e57e90f3f5d31466de43c73f9", "repo": "wagtail", "path": "wagtail/admin/panels.py", "file_name": "panels.py", "fun_name": "bind_to", "commit_message": "Deprecate EditHandler.bind_to", "code": "def bind_to(self, model=None, instance=None, request=None, form=None):\n warn(\n \"The %s.bind_to() method has been replaced by bind_to_model(model) and get_bound_panel(instance=instance, request=request, form=form)\"\n % type(self).__name__,\n category=RemovedInWagtail219Warning,\n stacklevel=2,\n )\n return self.get_bound_panel(instance=instance, request=request, form=form)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 28, "vocab_size": 27, "complexity": 1, "nloc": 8, "token_counts": 58, "n_ast_nodes": 84, "n_identifiers": 13, "random_cut": "def bind_to(self, model=None, instance=None, request=None, form=None):\n warn(\n \"The %s.bind_to() method has been replaced by bind_to_model(model) and get_bound_panel(instance=instance, request=request, form=form)\"\n % type(self).__name__,\n category=RemovedInWagtail219Warning,\n stacklevel=2,\n )\n return self.get_bound_panel(instance=insta" }, { "id": 254724, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/det.py", "file_name": "det.py", "fun_name": "export_2d", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export_2d() -> None:\n node = onnx.helper.make_node(\n 'Det',\n inputs=['x'],\n outputs=['y'],\n )\n\n x = np.arange(4).reshape(2, 2).astype(np.float32)\n y = np.linalg.det(x) # expect -2\n expect(node, inputs=[x], outputs=[y],\n name='test_det_2d')\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 25, "vocab_size": 23, "complexity": 1, "nloc": 10, "token_counts": 81, "n_ast_nodes": 131, "n_identifiers": 18, "random_cut": "def export_2d() -> None:\n node = onnx.helper.make_node(\n 'Det',\n " }, { "id": 85320, "commit_id": "d6bc97675d194a4d336ed4444d48c4a4fb349255", "repo": "sentry", "path": "tests/acceptance/test_project_settings_sampling.py", "file_name": "test_project_settings_sampling.py", "fun_name": "store_outcomes", "commit_message": "ref(sampling): Remove skip from acceptance tests - (#38440)", "code": "def store_outcomes(self, outcome, num_times=1):\n outcomes = []\n for _ in range(num_times):\n outcome_copy = outcome.copy()\n outcome_copy[\"timestamp\"] = outcome_copy[\"timestamp\"].strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n outcomes.append(outcome_copy)\n\n assert (\n requests.post(\n settings.SENTRY_SNUBA + \"/tests/outcomes/insert\", data=json.dumps(outcomes)\n ).status_code\n == 200\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 137, "n_words": 29, "vocab_size": 27, "complexity": 2, "nloc": 12, "token_counts": 76, "n_ast_nodes": 125, "n_identifiers": 19, "random_cut": "def store_outcomes(self, outcome, num_times=1):\n outcomes = []\n for _ in range(num_times):\n outcome_copy = outcome.copy()\n outcome_copy[\"timestamp\"] = outcome_copy[\"timestamp\"].strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n outcomes.append(outcome_copy)\n\n assert (\n re" }, { "id": 23840, "commit_id": "9816aebdb078ec14ca3141560f2431981c8948f5", "repo": "PaddleOCR", "path": "ppocr/data/imaug/rec_img_aug.py", "file_name": "rec_img_aug.py", "fun_name": "__call__", "commit_message": "add rec vitstr algorithm.", "code": "def __call__(self, data):\n img = data['image']\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n image_shape = self.image_shape\n if self.padding:\n imgC, imgH, imgW = image_shape\n # todo: change to 0 and modified image shape\n h = img.shape[0]\n w = img.shape[1]\n ratio = w / float(h)\n if math.ceil(imgH * ratio) > imgW:\n resized_w = imgW\n else:\n resized_w = int(math.ceil(imgH * ratio))\n resized_image = cv2.resize(img, (resized_w, imgH))\n norm_img = np.expand_dims(resized_image, -1)\n norm_img = norm_img.transpose((2, 0, 1))\n resized_image = norm_img.astype(np.float32) / 128. - 1.\n padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)\n padding_im[:, :, 0:resized_w] = resized_image\n data['image'] = padding_im\n return data\n if self.resize_type == 'PIL':\n image_pil = Image.fromarray(np.uint8(img))\n img = image_pil.resize(self.image_shape, self.inter_type)\n img = np.array(img)\n if self.resize_type == 'OpenCV':\n img = cv2.resize(img, self.image_shape)\n norm_img = np.expand_dims(img, -1)\n norm_img = norm_img.transpose((2, 0, 1))\n if self.scale:\n data['image'] = norm_img.astype(np.float32) / 128. - 1.\n else:\n data['image'] = norm_img.astype(np.float32) / 255.\n return data\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 477, "n_words": 140, "vocab_size": 76, "complexity": 6, "nloc": 34, "token_counts": 321, "n_ast_nodes": 496, "n_identifiers": 40, "random_cut": "def __call__(self, data):\n img = data['image']\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n image_shape = self.image_shape\n if self.padding:\n imgC, imgH, imgW = image_shape\n # todo: change to 0 and modified image shape\n h = img.shape[0]\n w = img.shape[1]\n ratio = w / float(h)\n if math.ceil(imgH * ratio) > imgW:\n resized_w = imgW\n else:\n resized_w = int(math.ceil(imgH * ratio))\n resized_image = cv2.resize(img, (resized_w, imgH))\n norm_img = np.expand_dims(resized_image, -1)\n norm_img = norm_img.transpose((2, 0, 1))\n resized_image = norm_img.astype(np.float32) / 128. - 1.\n padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)\n padding_im[:, :, 0:resized_w] = resized_image\n data['image'] = padding_im\n return data\n if self.resize_type == 'PIL':\n image_pil = Image.fromarray(np.uint8(img))\n img = image_pil.resize(self.image_shape, self.inter_type)\n " }, { "id": 110170, "commit_id": "8ef4e017f8a95db8704728a5fffd2c0384afc525", "repo": "matplotlib", "path": "lib/matplotlib/offsetbox.py", "file_name": "offsetbox.py", "fun_name": "_get_packed_offsets", "commit_message": "Don't pass unused xdescent to _get_packed_offsets.\n\nInstead of passing a list of (widths, xdescents) where xdescent is\nunused, just pass a list of widths. This helper is private so we just\nneed to adjust the call sites and tests with no deprecation.\n\nThis patch is preliminary work for some further cleanup on the offsetbox\nmodule.", "code": "def _get_packed_offsets(widths, total, sep, mode=\"fixed\"):\n r\n _api.check_in_list([\"fixed\", \"expand\", \"equal\"], mode=mode)\n\n if mode == \"fixed\":\n offsets_ = np.cumsum([0] + [w + sep for w in widths])\n offsets = offsets_[:-1]\n if total is None:\n total = offsets_[-1] - sep\n return total, offsets\n\n elif mode == \"expand\":\n # This is a bit of a hack to avoid a TypeError when *total*\n # is None and used in conjugation with tight layout.\n if total is None:\n total = 1\n if len(widths) > 1:\n sep = (total - sum(widths)) / (len(widths) - 1)\n else:\n sep = 0\n offsets_ = np.cumsum([0] + [w + sep for w in widths])\n offsets = offsets_[:-1]\n return total, offsets\n\n elif mode == \"equal\":\n maxh = max(widths)\n if total is None:\n if sep is None:\n raise ValueError(\"total and sep cannot both be None when \"\n \"using layout mode 'equal'\")\n total = (maxh + sep) * len(widths)\n else:\n sep = total / len(widths) - maxh\n offsets = (maxh + sep) * np.arange(len(widths))\n return total, offsets\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 420, "n_words": 163, "vocab_size": 79, "complexity": 11, "nloc": 74, "token_counts": 231, "n_ast_nodes": 381, "n_identifiers": 18, "random_cut": "def _get_packed_offsets(widths, total, sep, mode=\"fixed\"):\n r\n _api.check_in_list([\"fixed\", \"expand\", \"equal\"], mode=mode)\n\n if mode == \"fixed\":\n offsets_ = np.cumsum([0] + [w + sep for w in widths])\n offsets = offsets_[:-1]\n if total is None:\n total = offsets_[-1] - sep\n return total, offsets\n\n elif mode == \"expand\":\n # This is a bit of a hack to avoid a TypeError when *total*\n # is None and used in conjugation with tight layout.\n if total is None:\n total = 1\n if len(widths) > 1:\n sep = (total - sum(widths)) / (len(widths) - 1)\n else:\n sep = 0\n offsets_ = np.cumsum([0] + [w + sep for w in widths])\n offsets = offsets_[:-1]\n return total, offsets\n\n elif mode == \"equal\":\n maxh = max(widths)\n if total is None:\n if sep is None:\n raise ValueError(\"total and sep cannot both be None when \"\n \"using la" }, { "id": 181790, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/tpot_tests.py", "file_name": "tpot_tests.py", "fun_name": "test_fit_7", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_fit_7():\n \n tpot_obj = TPOTRegressor(\n random_state=42,\n population_size=1,\n offspring_size=2,\n generations=1,\n verbosity=0\n )\n tpot_obj.fit(pretest_X_reg, pretest_y_reg)\n\n assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)\n assert not (tpot_obj._start_datetime is None)\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 74, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 11, "token_counts": 57, "n_ast_nodes": 87, "n_identifiers": 16, "random_cut": "def test_fit_7():\n \n tpot_obj = TPOTRegressor(\n random_state=42,\n population_size=1,\n offspring_size=2,\n generations=1,\n verbosity=0\n )\n tpot_obj.fit(pretest_X_reg, pretest_y_reg)\n\n assert isinstance(tpot_obj._optimized_pipeline, creator.Individual)\n assert not (tpot_obj._start_datetime is None)\n\n" }, { "id": 190916, "commit_id": "301124c5b377fa56b940d298900dbc5816dbc24e", "repo": "thumbor", "path": "tests/engines/test_gif.py", "file_name": "test_gif.py", "fun_name": "test_errors_on_gifsicle_should_not_raises_errors_when_output", "commit_message": "Reformat to 80 chars and mypy.ini", "code": "def test_errors_on_gifsicle_should_not_raises_errors_when_output(self):\n engine = Engine(self.context)\n with open(\n join(STORAGE_PATH, \"SmallFullColourGIF.gif\"), \"rb\"\n ) as image_file:\n buffer = image_file.read()\n\n engine.load(buffer, \".gif\")\n result = engine.run_gifsicle(\"--some-invalid-opt\")\n expect(result).Not.to_be_null()\n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 85, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 9, "token_counts": 61, "n_ast_nodes": 109, "n_identifiers": 17, "random_cut": "def test_errors_on_gifsicle_should_not_raises_errors_when_output(self):\n engine = Engine(self.context)\n with open(\n join(STORAGE_PATH, \"SmallFullColourGIF.gif\"), \"rb\"\n ) as image_file:\n buffer = image_file.read()\n\n engine.load(buffer, \".gif\")\n result = engine.run_gifsicle(\"--some-invalid-o" }, { "id": 209506, "commit_id": "08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf", "repo": "scapy", "path": "scapy/contrib/http2.py", "file_name": "http2.py", "fun_name": "h2i", "commit_message": "E275 - Missing whitespace after keyword (#3711)\n\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: plorinquer \r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: speakinghedge \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>\r\n\r\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>", "code": "def h2i(self, pkt, x):\n # type: (Optional[packet.Packet], Optional[int]) -> Optional[int]\n \n assert not isinstance(x, six.integer_types) or x >= 0\n return x\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 3, "token_counts": 26, "n_ast_nodes": 42, "n_identifiers": 7, "random_cut": "def h2i(self, pkt, x):\n # type: (Optional[packet.Packet], Optional[int]) -> Optional[int]\n \n assert not isinstance(x, six.integer_types) or x >= 0\n return" }, { "id": 117345, "commit_id": "9ce5a21dd6359fd7e8ebf78051ce9e97bd195ec9", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/crate_handler/crate_handler.py", "file_name": "crate_handler.py", "fun_name": "disconnect", "commit_message": "ML handler supbrocess (#3377)\n\n* log -> logger\r\n\r\ndividing components:\r\n\r\napp initialize\r\n parse args\r\n set env.MINDSDB_CONFIG_PATH\r\n\r\nconfig\r\n requiers env.MINDSDB_CONFIG_PATH\r\n sets\r\n env.MINDSDB_DB_CON\r\n Config() - makes initialization\r\n\r\nlog\r\n uses config\r\n initialize_log - makes initialization\r\n\r\ndatabase\r\n uses env.MINDSDB_DB_CON\r\n have init() method\r\n\r\nfile storage\r\n uses config\r\n\r\n* partial sync for model storage interfaces\r\n\r\n* ml handler in subprocess interface\r\n\r\n* fix delete model\r\n\r\n* fix: model with error in success status\r\n\r\n* fix: remove hf predictor\r\n\r\n* fix pg handler\r\n\r\n* MLHandlerPersistWrapper keeps wrapper process opened\r\n\r\n* predictor with error keeps 'success' status\r\n\r\n#3362\r\n\r\n* lock for sending tasks to subprocess one by one\r\n\r\n* check target of predictor before run learn in subproccess\r\n\r\n* fix check target\r\n\r\n* fix: json_ai override and problem definition generation\r\n\r\n* fix None case\r\n\r\n* folder for ml handler tests\r\n\r\n* fix: add timeseries_settings key to learn_args\r\n\r\n* fixes in lw_handler\r\n\r\n* fix: del join_learn_process\r\n\r\n* tests for LW handler\r\n\r\n* finish unit test for LW\r\n\r\n* changes in tests:\r\n- set-project -> to base class\r\n- return of ml handler is dataframe\r\n- draft for project structure test\r\n\r\n* merge from staging\r\n\r\n* create_validation method to check learn params before send to subprocess\r\nfixes of HF\r\nfixed version of transformers in HF requirements\r\n\r\nCo-authored-by: Patricio Cerda Mardini ", "code": "def disconnect(self):\n \n\n if self.is_connected is False:\n return\n try:\n self.connection.close()\n self.is_connected=False\n except Exception as e:\n log.logger.error(f\"Error while disconnecting to CrateDB, {e}\")\n\n return \n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 101, "n_words": 21, "vocab_size": 20, "complexity": 3, "nloc": 9, "token_counts": 43, "n_ast_nodes": 80, "n_identifiers": 10, "random_cut": "def disconnect(self):\n \n\n if self.is_connected is False:\n return\n try:\n self.connection.close()\n self.is_connected=False\n except Exception as e:\n log.logger.error(f\"Error while disconnecting to CrateDB, {e}\")\n" }, { "id": 29308, "commit_id": "d90be220d6b687d08153934a51354011a3cb5ca1", "repo": "saleor", "path": "saleor/graphql/product/tests/queries/test_products_query.py", "file_name": "test_products_query.py", "fun_name": "test_sort_products", "commit_message": "Split test_product.py and test_variant.py into multiple files (#11173)\n\n* Split test_product.py into multiple files\r\n\r\n* Split test_variant.py into multiple files", "code": "def test_sort_products(user_api_client, product, channel_USD):\n product.updated_at = datetime.utcnow()\n product.save()\n\n product.pk = None\n product.slug = \"second-product\"\n product.updated_at = datetime.utcnow()\n product.save()\n ProductChannelListing.objects.create(\n product=product,\n channel=channel_USD,\n is_published=True,\n visible_in_listings=True,\n )\n variant = ProductVariant.objects.create(product=product, sku=\"1234\")\n ProductVariantChannelListing.objects.create(\n variant=variant,\n channel=channel_USD,\n price_amount=Decimal(20),\n cost_price_amount=Decimal(2),\n currency=channel_USD.currency_code,\n )\n product.pk = None\n product.slug = \"third-product\"\n product.updated_at = datetime.utcnow()\n product.save()\n ProductChannelListing.objects.create(\n product=product,\n channel=channel_USD,\n is_published=True,\n visible_in_listings=True,\n )\n variant_second = ProductVariant.objects.create(product=product, sku=\"12345\")\n ProductVariantChannelListing.objects.create(\n variant=variant_second,\n channel=channel_USD,\n currency=channel_USD.currency_code,\n )\n variables = {\"channel\": channel_USD.slug}\n query = SORT_PRODUCTS_QUERY\n\n # Test sorting by PRICE, ascending\n sort_by = \"{field: PRICE, direction: ASC}\"\n asc_price_query = query % {\"sort_by_product_order\": sort_by}\n response = user_api_client.post_graphql(asc_price_query, variables)\n content = get_graphql_content(response)\n edges = content[\"data\"][\"products\"][\"edges\"]\n assert len(edges) == 2\n price1 = edges[0][\"node\"][\"pricing\"][\"priceRangeUndiscounted\"][\"start\"][\"gross\"][\n \"amount\"\n ]\n price2 = edges[1][\"node\"][\"pricing\"][\"priceRangeUndiscounted\"][\"start\"][\"gross\"][\n \"amount\"\n ]\n assert price1 < price2\n\n # Test sorting by PRICE, descending\n sort_by = \"{field: PRICE, direction:DESC}\"\n desc_price_query = query % {\"sort_by_product_order\": sort_by}\n response = user_api_client.post_graphql(desc_price_query, variables)\n content = get_graphql_content(response)\n edges = content[\"data\"][\"products\"][\"edges\"]\n price1 = edges[0][\"node\"][\"pricing\"][\"priceRangeUndiscounted\"][\"start\"][\"gross\"][\n \"amount\"\n ]\n price2 = edges[1][\"node\"][\"pricing\"][\"priceRangeUndiscounted\"][\"start\"][\"gross\"][\n \"amount\"\n ]\n assert price1 > price2\n\n # Test sorting by MINIMAL_PRICE, ascending\n sort_by = \"{field: MINIMAL_PRICE, direction:ASC}\"\n asc_price_query = query % {\"sort_by_product_order\": sort_by}\n response = user_api_client.post_graphql(asc_price_query, variables)\n content = get_graphql_content(response)\n edges = content[\"data\"][\"products\"][\"edges\"]\n price1 = edges[0][\"node\"][\"pricing\"][\"priceRange\"][\"start\"][\"gross\"][\"amount\"]\n price2 = edges[1][\"node\"][\"pricing\"][\"priceRange\"][\"start\"][\"gross\"][\"amount\"]\n assert price1 < price2\n\n # Test sorting by MINIMAL_PRICE, descending\n sort_by = \"{field: MINIMAL_PRICE, direction:DESC}\"\n desc_price_query = query % {\"sort_by_product_order\": sort_by}\n response = user_api_client.post_graphql(desc_price_query, variables)\n content = get_graphql_content(response)\n edges = content[\"data\"][\"products\"][\"edges\"]\n price1 = edges[0][\"node\"][\"pricing\"][\"priceRange\"][\"start\"][\"gross\"][\"amount\"]\n price2 = edges[1][\"node\"][\"pricing\"][\"priceRange\"][\"start\"][\"gross\"][\"amount\"]\n assert price1 > price2\n\n # Test sorting by DATE, ascending\n asc_date_query = query % {\"sort_by_product_order\": \"{field: DATE, direction:ASC}\"}\n response = user_api_client.post_graphql(asc_date_query, variables)\n content = get_graphql_content(response)\n date_0 = content[\"data\"][\"products\"][\"edges\"][0][\"node\"][\"updatedAt\"]\n date_1 = content[\"data\"][\"products\"][\"edges\"][1][\"node\"][\"updatedAt\"]\n assert parse_datetime(date_0) < parse_datetime(date_1)\n\n # Test sorting by DATE, descending\n desc_date_query = query % {\"sort_by_product_order\": \"{field: DATE, direction:DESC}\"}\n response = user_api_client.post_graphql(desc_date_query, variables)\n content = get_graphql_content(response)\n date_0 = content[\"data\"][\"products\"][\"edges\"][0][\"node\"][\"updatedAt\"]\n date_1 = content[\"data\"][\"products\"][\"edges\"][1][\"node\"][\"updatedAt\"]\n assert parse_datetime(date_0) > parse_datetime(date_1)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 660, "n_words": 290, "vocab_size": 89, "complexity": 1, "nloc": 92, "token_counts": 746, "n_ast_nodes": 1297, "n_identifiers": 45, "random_cut": "def test_sort_products(user_api_client, product, channel_USD):\n product.updated_at = datetime.utcnow()\n product.save()\n\n product.pk = None\n product.slug = \"second-product\"\n product.updated_at = datetime.utcnow()\n product.save()\n ProductChannelListing.objects.create(\n product=product,\n channel=channel_USD,\n is_published=True,\n visible_in_listings=True,\n )\n variant = ProductVariant.objects.create(product=product, sku=\"1234\")\n ProductVariantChannelListing.objects.create(\n variant=variant,\n channel=channel_USD,\n price_amount=Decimal(20),\n cost_price_amount=Decimal(2),\n currency=channel_USD.currency_code,\n )\n product.pk = None\n product.slug = \"third-product\"\n product.updated_at = datetime.utcnow()\n product.save()\n ProductChannelListing.objects.create(\n product=product,\n channel=channel_USD,\n is_published=True,\n visible_in_listings=True,\n )\n variant_second = ProductVariant.objects.create(product=product, sku=\"12345\")\n ProductVariantChannelListing.objects.create(\n variant=variant_second,\n channel=channel_USD,\n currency=channel_USD.currency_code,\n )\n variables = {\"channel\": channel_USD.slug}\n query = SORT_PRODUCTS_QUERY\n\n # Test sorting by PRICE, ascending\n sort_by = \"{field: PRICE, direction: ASC}\"\n asc_price_query = query % {\"sort_by_product_order\": sort_by}\n response = user_api_client.post_graphql(asc_price_query, variables)\n content = get_graphql_content(response)\n edges = content[\"data\"][\"products\"][\"edges\"]\n assert len(edges) == 2\n price1 = edges[0][\"node\"][\"pricing\"][\"priceRangeUndiscounted\"][\"start\"][\"gross\"][\n \"amount\"\n ]\n price2 = edges[1][\"node\"][\"pricing\"][\"priceRangeUndiscounted\"][\"start\"][\"gross\"][\n \"amount\"\n ]\n assert price1 < price2\n\n # Test sorting by PRICE, descending\n sort_by = \"{field: PRICE, direction:DESC}\"\n desc_price_query = query % {\"sort_by_product_order\": sort_by}\n response = user_api_client.post_graphql(desc_price_query, variables)\n content = get_graphql_content(response)\n edges = content[\"data\"][\"products\"][\"edges\"]\n price1 = edges[0][\"node\"][\"pricing\"][\"priceRangeUndiscounted\"][\"start\"][\"gross\"][\n \"amount\"\n ]\n price2 = edges[1][\"node\"][\"pricing\"][\"priceRangeUndiscounted\"][\"start\"][\"gross\"][\n \"amount\"\n ]\n assert price1 > price2\n\n # Test sorting by MINIMAL_PRICE, ascending\n sort_by = \"{field: MINIMAL_PRICE, direction:ASC}\"\n asc_price_query = query % {\"sort_by_product_order\": sort_by}\n response = user_api_client.post_graphql(asc_price_query, variables)\n content = get_graphql_content(response)\n edges = content[\"data\"][\"products\"][\"edges\"]\n price1 = edges[0][\"node\"][\"pricing\"][\"priceRange\"][\"start\"][\"gross\"][\"amount\"]\n price2 = edges[1][\"node\"][\"pricing\"][\"priceRange\"][\"start\"][\"gross\"][\"amount\"]\n assert price1 < price2\n\n # Test sorting by MINIMAL_PRICE, descending\n sort_by = \"{field: MINIMAL_PRICE, direction:DESC}\"\n desc_price_query = query % {\"sort_by_product_order\": sort_by}\n response = user_api_client.post_graphql(desc_price_query, variables)\n content = get_graphql_content(response)\n edges = content[\"data\"][\"products\"][\"edges\"]\n price1 = edges[0][\"node\"][\"pricing\"][\"priceRange\"][\"start\"][\"gross\"][\"amount\"]\n price2 = edges[1][\"node\"][\"pricing\"][\"priceRange\"][\"start\"][\"gross\"][\"amount\"]\n assert price1 > price2\n\n # Test sorting by DATE, ascending\n asc_date_query = query % {\"sort_by_product_order\": \"{field: DATE, direction:ASC}\"}\n response = user_api_client.post_graphql(asc_date_query, variables)\n content = get_graphql_content(response)\n date_0 = conten" }, { "id": 247699, "commit_id": "1da0f79d5455b594f2aa989106a672786f5b990f", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "test_edit_thread", "commit_message": "Refactor relations tests (#12232)\n\n* Moves the relation pagination tests to a separate class.\r\n* Move the assertion of the response code into the `_send_relation` helper.\r\n* Moves some helpers into the base-class.", "code": "def test_edit_thread(self) -> None:\n \n\n # Create a thread and edit the last event.\n channel = self._send_relation(\n RelationTypes.THREAD,\n \"m.room.message\",\n content={\"msgtype\": \"m.text\", \"body\": \"A threaded reply!\"},\n )\n threaded_event_id = channel.json_body[\"event_id\"]\n\n new_body = {\"msgtype\": \"m.text\", \"body\": \"I've been edited!\"}\n channel = self._send_relation(\n RelationTypes.REPLACE,\n \"m.room.message\",\n content={\"msgtype\": \"m.text\", \"body\": \"foo\", \"m.new_content\": new_body},\n parent_id=threaded_event_id,\n )\n\n # Fetch the thread root, to get the bundled aggregation for the thread.\n channel = self.make_request(\n \"GET\",\n f\"/rooms/{self.room}/event/{self.parent_id}\",\n access_token=self.user_token,\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n # We expect that the edit message appears in the thread summary in the\n # unsigned relations section.\n relations_dict = channel.json_body[\"unsigned\"].get(\"m.relations\")\n self.assertIn(RelationTypes.THREAD, relations_dict)\n\n thread_summary = relations_dict[RelationTypes.THREAD]\n self.assertIn(\"latest_event\", thread_summary)\n latest_event_in_thread = thread_summary[\"latest_event\"]\n self.assertEqual(latest_event_in_thread[\"content\"][\"body\"], \"I've been edited!\")\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 357, "n_words": 107, "vocab_size": 74, "complexity": 1, "nloc": 27, "token_counts": 176, "n_ast_nodes": 324, "n_identifiers": 23, "random_cut": "def test_edit_thread(self) -> None:\n \n\n # Create a thread and edit the last event.\n channel = self._send_relation(\n RelationTypes.THREAD,\n \"m.room.message\",\n content={\"msgtype\": \"m.text\", \"body\": \"A threaded reply!\"},\n )\n threaded_event_id = channel.json_body[\"event_id\"]\n\n new_body = {\"msgtype\": \"m.text\", \"body\": \"I've been edited!\"}\n channel = self._send_relation(\n RelationTypes.REPLACE,\n \"m.room.message\",\n content={\"msgtype\": \"m.text\", \"body\": \"foo\", \"m.new_content\": new_body},\n parent_id=threaded_event_id,\n )\n\n # Fetch the thread root, to get the bundled aggregation for the thread.\n channel = self.make_request(\n \"GET\",\n f\"/rooms/{self.room}/event/{self.parent_id}\",\n access_token=self.user_token,\n )\n self.assertEqual(200, channel.code, channel.json_body)\n\n # We expect that the edit message appears in the thread summary in the\n # unsigned relations section.\n relations_dict = channel.json_body[\"unsigned\"].get(\"m.r" }, { "id": 271662, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_distributed_v1.py", "file_name": "training_distributed_v1.py", "fun_name": "fit", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def fit(self, *args, **kwargs):\n return _train_with_multi_worker(self._single_worker_loop.fit)(\n *args, **kwargs\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 27, "n_ast_nodes": 42, "n_identifiers": 6, "random_cut": "def fit(self, *args, **kwargs):\n return _train_with_multi_worker(self._single_worker_loop.fit)(\n *args, **kwarg" }, { "id": 280055, "commit_id": "c269e3cd8fed713fb54d2971319df0bfe6e1bf10", "repo": "keras", "path": "keras/activations.py", "file_name": "activations.py", "fun_name": "deserialize", "commit_message": "Move serialization-related logic in utils/generic_utils.py to saving/legacy/serialization.py.\n\nPiperOrigin-RevId: 479688207", "code": "def deserialize(name, custom_objects=None):\n \n activation_functions = {}\n current_module = sys.modules[__name__]\n\n # we put 'current_module' after 'activation_layers' to prefer the local one\n # if there is a collision\n generic_utils.populate_dict_with_module_objects(\n activation_functions,\n (activation_layers, current_module),\n obj_filter=callable,\n )\n\n return serialization.deserialize_keras_object(\n name,\n module_objects=activation_functions,\n custom_objects=custom_objects,\n printable_module_name=\"activation function\",\n )\n\n\n@keras_export(\"keras.activations.get\")\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.activations.get\")\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 116, "n_words": 42, "vocab_size": 39, "complexity": 1, "nloc": 14, "token_counts": 59, "n_ast_nodes": 114, "n_identifiers": 22, "random_cut": "def deserialize(name, custom_objects=None):\n \n activation_functions = {}\n current_module = sys.modules[__name__]\n\n # we put 'current_module' after 'activation_layers' to prefer the local one\n # if there is a collision\n generic_utils.populate_dict_with_module_objects(\n activation_functions,\n (activation_layers, current_module),\n obj_filter=callable,\n )\n\n return serialization.deserialize_keras_object(\n name,\n module_objects=activation_functions,\n custom_objects=custom_objects,\n printable_module_name=\"activation function\",\n )\n\n\n@keras_export(\"keras.activations.get\")\n@tf.__internal__.dispatch.add_dispatch_support" }, { "id": 269556, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "cumprod", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def cumprod(x, axis=0):\n \n return tf.math.cumprod(x, axis=axis)\n\n\n@keras_export(\"keras.backend.var\")\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.var\")\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 12, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def cumprod(x, axis=0):\n \n return tf.math.cumprod(x, axis" }, { "id": 131961, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/train/examples/train_fashion_mnist_example.py", "file_name": "train_fashion_mnist_example.py", "fun_name": "validate_epoch", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def validate_epoch(dataloader, model, loss_fn):\n size = len(dataloader.dataset) // train.world_size()\n num_batches = len(dataloader)\n model.eval()\n test_loss, correct = 0, 0\n with torch.no_grad():\n for X, y in dataloader:\n pred = model(X)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n test_loss /= num_batches\n correct /= size\n print(\n f\"Test Error: \\n \"\n f\"Accuracy: {(100 * correct):>0.1f}%, \"\n f\"Avg loss: {test_loss:>8f} \\n\"\n )\n return test_loss\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 150, "n_words": 60, "vocab_size": 48, "complexity": 2, "nloc": 18, "token_counts": 114, "n_ast_nodes": 212, "n_identifiers": 24, "random_cut": "def validate_epoch(dataloader, model, loss_fn):\n size = len(dataloader.dataset) // train.world_size()\n num_batches = len(dataloader)\n model.eval()\n test_loss, correct = 0, 0\n with torch.no_grad():\n for X, y in dataloader:\n pred = model(X)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n test_loss /= num_batches\n correct /= size\n print(\n f\"Test Error: \\n \"\n f\"Accuracy: {(100 * correct):>0.1f}%, " }, { "id": 75868, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/tests/test_queries.py", "file_name": "test_queries.py", "fun_name": "test_only_query", "commit_message": "Reformat with black", "code": "def test_only_query(self):\n filters, query = separate_filters_from_query(\"hello world\")\n\n self.assertDictEqual(filters, {})\n self.assertEqual(query, \"hello world\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 7, "random_cut": "def test_only_query(self):\n filters, query = separate_filters_from_query(\"hello world\")\n\n self.assertDictEqual(filters, {})\n sel" }, { "id": 62444, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/_tokenizer.py", "file_name": "_tokenizer.py", "fun_name": "rcdataState", "commit_message": "upd; format", "code": "def rcdataState(self):\n data = self.stream.char()\n if data == \"&\":\n self.state = self.characterReferenceInRcdata\n elif data == \"<\":\n self.state = self.rcdataLessThanSignState\n elif data == EOF:\n # Tokenization ends.\n return False\n elif data == \"\\u0000\":\n self.tokenQueue.append({\"type\": tokenTypes[\"ParseError\"],\n \"data\": \"invalid-codepoint\"})\n self.tokenQueue.append({\"type\": tokenTypes[\"Characters\"],\n \"data\": \"\\uFFFD\"})\n elif data in spaceCharacters:\n # Directly after emitting a token you switch back to the \"data\n # state\". At that point spaceCharacters are important so they are\n # emitted separately.\n self.tokenQueue.append({\"type\": tokenTypes[\"SpaceCharacters\"], \"data\":\n data + self.stream.charsUntil(spaceCharacters, True)})\n # No need to update lastFourChars here, since the first space will\n # have already been appended to lastFourChars and will have broken\n # any sequences\n else:\n chars = self.stream.charsUntil((\"&\", \"<\", \"\\u0000\"))\n self.tokenQueue.append({\"type\": tokenTypes[\"Characters\"], \"data\":\n data + chars})\n return True\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 480, "n_words": 120, "vocab_size": 81, "complexity": 6, "nloc": 21, "token_counts": 164, "n_ast_nodes": 300, "n_identifiers": 15, "random_cut": "def rcdataState(self):\n data = self.stream.char()\n if data == \"&\":\n self.state = self.characterReferenceInRcdata\n elif data == \"<\":\n self.state = self.rcdataLessThanSignState\n elif data == EOF:\n " }, { "id": 75100, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/rect.py", "file_name": "rect.py", "fun_name": "transform", "commit_message": "Reformat with black", "code": "def transform(self, transform):\n # Transform each corner of the rect\n tl_transformed = transform.transform_vector(Vector(self.left, self.top))\n tr_transformed = transform.transform_vector(Vector(self.right, self.top))\n bl_transformed = transform.transform_vector(Vector(self.left, self.bottom))\n br_transformed = transform.transform_vector(Vector(self.right, self.bottom))\n\n # Find extents of the transformed corners\n left = min(\n [tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x]\n )\n right = max(\n [tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x]\n )\n top = min(\n [tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y]\n )\n bottom = max(\n [tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y]\n )\n\n return Rect(left, top, right, bottom)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 218, "n_words": 70, "vocab_size": 43, "complexity": 1, "nloc": 18, "token_counts": 174, "n_ast_nodes": 255, "n_identifiers": 17, "random_cut": "def transform(self, transform):\n # Transform each corner of the rect\n tl_transformed = transform.transform_vector(Vector(self.left, self.top))\n tr_transformed = transform.transform_vector(Vector(self.right, self.top))\n bl_transformed = transform.transform_vector(Vector(self.left, self.bottom))\n br_transformed = transform.transform_vector(Vector(self.right, self.bottom))\n\n # Find extents of the transformed corners\n left = min(\n [tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x]\n )\n right = max(\n [tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x]\n )\n top = min(\n [tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y]\n )\n bottom = max(\n [tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y]\n )\n\n re" }, { "id": 297834, "commit_id": "cb13418babd21a1e9584978b0c523f1b1e4e1cb0", "repo": "core", "path": "homeassistant/components/flux/switch.py", "file_name": "switch.py", "fun_name": "async_flux_update", "commit_message": "String formatting and max line length - Part 2 (#84393)", "code": "async def async_flux_update(self, utcnow=None):\n \n if utcnow is None:\n utcnow = dt_utcnow()\n\n now = as_local(utcnow)\n\n sunset = get_astral_event_date(self.hass, SUN_EVENT_SUNSET, now.date())\n start_time = self.find_start_time(now)\n stop_time = self.find_stop_time(now)\n\n if stop_time <= start_time:\n # stop_time does not happen in the same day as start_time\n if start_time < now:\n # stop time is tomorrow\n stop_time += datetime.timedelta(days=1)\n elif now < start_time:\n # stop_time was yesterday since the new start_time is not reached\n stop_time -= datetime.timedelta(days=1)\n\n if start_time < now < sunset:\n # Daytime\n time_state = \"day\"\n temp_range = abs(self._start_colortemp - self._sunset_colortemp)\n day_length = int(sunset.timestamp() - start_time.timestamp())\n seconds_from_start = int(now.timestamp() - start_time.timestamp())\n percentage_complete = seconds_from_start / day_length\n temp_offset = temp_range * percentage_complete\n if self._start_colortemp > self._sunset_colortemp:\n temp = self._start_colortemp - temp_offset\n else:\n temp = self._start_colortemp + temp_offset\n else:\n # Night time\n time_state = \"night\"\n\n if now < stop_time:\n if stop_time < start_time and stop_time.day == sunset.day:\n # we need to use yesterday's sunset time\n sunset_time = sunset - datetime.timedelta(days=1)\n else:\n sunset_time = sunset\n\n night_length = int(stop_time.timestamp() - sunset_time.timestamp())\n seconds_from_sunset = int(now.timestamp() - sunset_time.timestamp())\n percentage_complete = seconds_from_sunset / night_length\n else:\n percentage_complete = 1\n\n temp_range = abs(self._sunset_colortemp - self._stop_colortemp)\n temp_offset = temp_range * percentage_complete\n if self._sunset_colortemp > self._stop_colortemp:\n temp = self._sunset_colortemp - temp_offset\n else:\n temp = self._sunset_colortemp + temp_offset\n rgb = color_temperature_to_rgb(temp)\n x_val, y_val, b_val = color_RGB_to_xy_brightness(*rgb)\n brightness = self._brightness if self._brightness else b_val\n if self._disable_brightness_adjust:\n brightness = None\n if self._mode == MODE_XY:\n await async_set_lights_xy(\n self.hass, self._lights, x_val, y_val, brightness, self._transition\n )\n _LOGGER.debug(\n (\n \"Lights updated to x:%s y:%s brightness:%s, %s%% \"\n \"of %s cycle complete at %s\"\n ),\n x_val,\n y_val,\n brightness,\n round(percentage_complete * 100),\n time_state,\n now,\n )\n elif self._mode == MODE_RGB:\n await async_set_lights_rgb(self.hass, self._lights, rgb, self._transition)\n _LOGGER.debug(\n \"Lights updated to rgb:%s, %s%% of %s cycle complete at %s\",\n rgb,\n round(percentage_complete * 100),\n time_state,\n now,\n )\n else:\n # Convert to mired and clamp to allowed values\n mired = color_temperature_kelvin_to_mired(temp)\n await async_set_lights_temp(\n self.hass, self._lights, mired, brightness, self._transition\n )\n _LOGGER.debug(\n (\n \"Lights updated to mired:%s brightness:%s, %s%% \"\n \"of %s cycle complete at %s\"\n ),\n mired,\n brightness,\n round(percentage_complete * 100),\n time_state,\n now,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1491, "n_words": 333, "vocab_size": 146, "complexity": 15, "nloc": 87, "token_counts": 493, "n_ast_nodes": 787, "n_identifiers": 57, "random_cut": "async def async_flux_update(self, utcnow=None):\n \n if utcnow is None:\n utcnow = dt_utcnow()\n\n now = as_local(utcnow)\n\n sunset = get_astral_event_date(self.hass, SUN_EVENT_SUNSET, now.date())\n start_time = self.find_start_time(now)\n stop_time = self.find_stop_time(now)\n\n if stop_time <= start_time:\n # stop_time does not happen in the same day as start_time\n if start_time < now:\n # stop time is tomorrow\n stop_time += datetime.timedelta(days=1)\n elif now < start_time:\n # stop_time was yesterday since the new start_time is not reached\n stop_time -= datetime.timedelta(days=1)\n\n if start_time < now < sunset:\n # Daytime\n time_state = \"day\"\n temp_range = abs(self._start_colortemp - self._sunset_colortemp)\n day_length = int(sunset.timestamp() - start_time.timestamp(" }, { "id": 44273, "commit_id": "0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e", "repo": "airflow", "path": "tests/providers/slack/hooks/test_slack.py", "file_name": "test_slack.py", "fun_name": "test_api_call", "commit_message": "Return slack api call response in slack_hook (#21107)", "code": "def test_api_call(self, slack_client_class_mock):\n slack_client_mock = mock.Mock()\n slack_client_class_mock.return_value = slack_client_mock\n slack_client_mock.api_call.return_value = {'ok': True}\n\n slack_hook = SlackHook(token='test_token')\n test_api_json = {'channel': 'test_channel'}\n\n slack_hook.call(\"chat.postMessage\", json=test_api_json)\n slack_client_mock.api_call.assert_called_with(\"chat.postMessage\", json=test_api_json)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 72, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 117, "n_identifiers": 15, "random_cut": "def test_api_call(self, slack_client_class_mock):\n slack_client_mock = mock.Mock()\n slack_client_class_mock.return_value = slack_client_mock\n slack_client_mock.api_call.return_value = {'ok': True}\n\n slack_hook = SlackHook(token='test_token')\n test_api_json = {'channel': 'test_channel'}\n\n slack_hook.call(" }, { "id": 108783, "commit_id": "f7f3bb6079048506613c513231e1bd2a87ebc7d3", "repo": "matplotlib", "path": "lib/matplotlib/figure.py", "file_name": "figure.py", "fun_name": "_check_layout_engines_compat", "commit_message": "ENH: add ability to remove layout engine\n\nThis also adds a \"place holder\" layout engine to ensure that users can not \"go\nthrough zero\" and change to an incompatible layout engine.\n\nCo-authored-by: Jody Klymak ", "code": "def _check_layout_engines_compat(self, old, new):\n \n if old is None or new is None:\n return True\n if old.colorbar_gridspec == new.colorbar_gridspec:\n return True\n # colorbar layout different, so check if any colorbars are on the\n # figure...\n for ax in self.axes:\n if hasattr(ax, '_colorbar'):\n # colorbars list themselves as a colorbar.\n return False\n return True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 164, "n_words": 52, "vocab_size": 40, "complexity": 6, "nloc": 9, "token_counts": 51, "n_ast_nodes": 84, "n_identifiers": 8, "random_cut": "def _check_layout_engines_compat(self, old, new):\n \n if old is None or ne" }, { "id": 124504, "commit_id": "1243ed62bf4121c83881c3ddc095bc6a873a09f3", "repo": "ray", "path": "rllib/algorithms/marwil/marwil.py", "file_name": "marwil.py", "fun_name": "training_step", "commit_message": "[RLlib] Make Dataset reader default reader and enable CRR to use dataset (#26304)\n\nCo-authored-by: avnish ", "code": "def training_step(self) -> ResultDict:\n # Collect SampleBatches from sample workers.\n with self._timers[SAMPLE_TIMER]:\n batch = synchronous_parallel_sample(worker_set=self.workers)\n batch = batch.as_multi_agent()\n self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps()\n self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps()\n # Add batch to replay buffer.\n self.local_replay_buffer.add(batch)\n\n # Pull batch from replay buffer and train on it.\n train_batch = sample_min_n_steps_from_buffer(\n self.local_replay_buffer,\n self.config[\"train_batch_size\"],\n count_by_agent_steps=self._by_agent_steps,\n )\n # Train.\n if self.config[\"simple_optimizer\"]:\n train_results = train_one_step(self, train_batch)\n else:\n train_results = multi_gpu_train_one_step(self, train_batch)\n\n # TODO: Move training steps counter update outside of `train_one_step()` method.\n # # Update train step counters.\n # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps()\n # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps()\n\n global_vars = {\n \"timestep\": self._counters[NUM_AGENT_STEPS_SAMPLED],\n }\n\n # Update weights - after learning on the local worker - on all remote\n # workers.\n if self.workers.remote_workers():\n with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:\n self.workers.sync_weights(global_vars=global_vars)\n\n # Update global vars on local worker as well.\n self.workers.local_worker().set_global_vars(global_vars)\n\n return train_results\n\n\n# Deprecated: Use ray.rllib.algorithms.marwil.MARWILConfig instead!", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 406, "n_words": 130, "vocab_size": 90, "complexity": 3, "nloc": 24, "token_counts": 166, "n_ast_nodes": 281, "n_identifiers": 31, "random_cut": "def training_step(self) -> ResultDict:\n # Collect SampleBatches from sample workers.\n with self._timers[SAMPLE_TIMER]:\n batch = synchronous_parallel_sample(worker_set=self.workers)\n batch = batch.as_multi_agent()\n self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps()\n self._counters[NUM_ENV_STEPS_SAMPLED] += batch.env_steps()\n # Add batch to replay buffer.\n self.local_replay_buffer.add(batch)\n\n # Pull batch from replay buffer and train on it.\n train_batch = sample_min_n_steps_from_buffer(\n self.local_replay_buffer,\n self.config[\"train_batch_size\"],\n count_by_agent_steps=self._by_agent_steps,\n )\n # Train.\n if self.config[\"simple_optimizer\"]:\n train_results = train_one_step(self, train_batch)\n else:\n train_results = multi_gpu_train_one_step(self, train_batch)\n\n # TODO: Move training steps counter update outside of `train_one_step()` method.\n # # Update train step counters.\n # self._counters[NUM_ENV_STEPS_TRAINED] += train_batch.env_steps()\n # self._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps()\n\n global_vars = {\n \"timestep\": self._counters[NUM_AGENT_STEPS_SAMPLED],\n }\n\n # Update weights - after learning on the local worker - on all remote\n # workers.\n if self.workers.remote_workers():\n with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:\n self.workers.sync_weights(global_vars=global_vars)\n\n # Update global vars on local worker as well.\n self.w" }, { "id": 19067, "commit_id": "964f5ab75098c55f028f8acfeeae05df35ea68d5", "repo": "mlflow", "path": "tests/models/test_default_evaluator.py", "file_name": "test_default_evaluator.py", "fun_name": "test_gen_multiclass_roc_curve", "commit_message": "Evaluation Default evaluator (#5092)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* rename module\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert black change\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* change module path\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert export\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix curcit import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix conftest.py\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* Revert \"fix conftest.py\"\r\n\r\nThis reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b.\r\n\r\n* fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* default evaluator\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update hash algo\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comment\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add more tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* remove scikitplot dep\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add pr curve\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap.summary_plot\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* log explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve explainer code\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update explainer creating\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update predict_proba\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add multi-class metrics artifacts\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add log_loss metric\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address ben comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* prevent show shap logo, add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* support spark model\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap version check\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update docs, loose classifier label limit\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* multiclass classifier merge metrics/plots\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* zfill feature name\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve label handling\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* black\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* increase plot dpi\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix test fixture\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use matplot rc_context\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix shap import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor EvaluationDataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* limit user specify shap algos\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* clean\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update evaluation dataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use svg fig\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert svg\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* curve dashline, legend display ap/roc, legend move out\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* linewidth 1\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* keyword arguments for evaluate, fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* mark abc.abstractmethod, kw args for ModelEvaluator methods\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def test_gen_multiclass_roc_curve():\n y = [0, 1, 2, 1, 2]\n y_probs = [\n [0.7, 0.1, 0.2],\n [0.2, 0.3, 0.5],\n [0.25, 0.4, 0.35],\n [0.3, 0.4, 0.3],\n [0.8, 0.1, 0.1],\n ]\n\n results = _gen_classifier_curve(\n is_binomial=False, y=y, y_probs=y_probs, labels=[0, 1, 2], curve_type=\"roc\"\n )\n print(results)\n\n expected_x_data_list = [\n [0.0, 0.25, 0.25, 1.0],\n [0.0, 0.33333333, 0.33333333, 1.0],\n [0.0, 0.33333333, 0.33333333, 1.0, 1.0],\n ]\n expected_y_data_list = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [0.0, 0.0, 0.5, 0.5, 1.0]]\n line_labels = [\"label=0,AUC=0.750\", \"label=1,AUC=0.750\", \"label=2,AUC=0.333\"]\n for index, (name, x_data, y_data) in enumerate(results.plot_fn_args[\"data_series\"]):\n assert name == line_labels[index]\n assert np.allclose(x_data, expected_x_data_list[index], rtol=1e-3)\n assert np.allclose(y_data, expected_y_data_list[index], rtol=1e-3)\n\n assert results.plot_fn_args[\"xlabel\"] == \"False Positive Rate\"\n assert results.plot_fn_args[\"ylabel\"] == \"True Positive Rate\"\n assert results.plot_fn_args[\"line_kwargs\"] == {\"drawstyle\": \"steps-post\", \"linewidth\": 1}\n\n expected_auc = [0.75, 0.75, 0.3333]\n assert np.allclose(results.auc, expected_auc, rtol=1e-3)\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 255, "n_words": 124, "vocab_size": 82, "complexity": 2, "nloc": 29, "token_counts": 388, "n_ast_nodes": 410, "n_identifiers": 23, "random_cut": "def test_gen_multiclass_roc_curve():\n y = [0, 1, 2, 1, 2]\n y_probs = [\n [0.7, 0.1, 0.2],\n [0.2, 0.3, 0.5],\n [0.25, 0.4, 0.35],\n [0.3, 0.4, 0.3],\n [0.8, 0.1, 0.1],\n ]\n\n results = _gen_classifier_curve(\n is_binomial=False, y=y, y_probs=y_probs, labels=[0, 1, 2], curve_type=\"roc\"\n )\n print(results)\n\n expected_x_data_list = [\n [0.0, 0.25, 0.25, 1.0],\n [0.0, 0.33333333, 0.33333333, 1.0],\n [0.0, 0.33333333, 0.33333333, 1.0, 1.0],\n ]\n expected_y_data_list = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [0.0, 0.0, 0.5, 0.5, 1.0]]\n line_labels = [\"label=0,AUC=0.750\", \"label=1,AUC=0.750\", \"label=2,AUC=0.333\"]\n for index, (name, x_data, y_data) in enumerate(results.plot_fn_args[\"data_series\"]):\n assert name == line_labels[index]\n assert np.allclose(x_data, expected_x_data_list[index], rtol=1e-3)\n assert np.allclose(y_data, expected_y_data_list[index], rtol=1e-3)\n\n assert results.plot_fn_args[\"xlabel\"] == \"False Positive Rate\"\n assert results.plot_fn_args[\"ylabel\"] == " }, { "id": 288863, "commit_id": "f23b1750e85f07091eb896a0b12b8f95e5646338", "repo": "core", "path": "tests/components/homekit_controller/specific_devices/test_eve_energy.py", "file_name": "test_eve_energy.py", "fun_name": "test_eve_energy_setup", "commit_message": "Migrate HomeKit Controller to use stable identifiers (#80064)", "code": "async def test_eve_energy_setup(hass):\n \n accessories = await setup_accessories_from_file(hass, \"eve_energy.json\")\n await setup_test_accessories(hass, accessories)\n\n await assert_devices_and_entities_created(\n hass,\n DeviceTestInfo(\n unique_id=HUB_TEST_ACCESSORY_ID,\n name=\"Eve Energy 50FF\",\n model=\"Eve Energy 20EAO8601\",\n manufacturer=\"Elgato\",\n sw_version=\"1.2.9\",\n hw_version=\"1.0.0\",\n serial_number=\"AA00A0A00000\",\n devices=[],\n entities=[\n EntityTestInfo(\n entity_id=\"switch.eve_energy_50ff\",\n unique_id=\"00:00:00:00:00:00_1_28\",\n friendly_name=\"Eve Energy 50FF\",\n state=\"off\",\n ),\n EntityTestInfo(\n entity_id=\"sensor.eve_energy_50ff_amps\",\n unique_id=\"00:00:00:00:00:00_1_28_33\",\n friendly_name=\"Eve Energy 50FF Amps\",\n unit_of_measurement=ELECTRIC_CURRENT_AMPERE,\n capabilities={\"state_class\": SensorStateClass.MEASUREMENT},\n state=\"0\",\n ),\n EntityTestInfo(\n entity_id=\"sensor.eve_energy_50ff_volts\",\n unique_id=\"00:00:00:00:00:00_1_28_32\",\n friendly_name=\"Eve Energy 50FF Volts\",\n unit_of_measurement=ELECTRIC_POTENTIAL_VOLT,\n capabilities={\"state_class\": SensorStateClass.MEASUREMENT},\n state=\"0.400000005960464\",\n ),\n EntityTestInfo(\n entity_id=\"sensor.eve_energy_50ff_power\",\n unique_id=\"00:00:00:00:00:00_1_28_34\",\n friendly_name=\"Eve Energy 50FF Power\",\n unit_of_measurement=POWER_WATT,\n capabilities={\"state_class\": SensorStateClass.MEASUREMENT},\n state=\"0\",\n ),\n EntityTestInfo(\n entity_id=\"sensor.eve_energy_50ff_energy_kwh\",\n unique_id=\"00:00:00:00:00:00_1_28_35\",\n friendly_name=\"Eve Energy 50FF Energy kWh\",\n capabilities={\"state_class\": SensorStateClass.MEASUREMENT},\n unit_of_measurement=ENERGY_KILO_WATT_HOUR,\n state=\"0.28999999165535\",\n ),\n EntityTestInfo(\n entity_id=\"switch.eve_energy_50ff_lock_physical_controls\",\n unique_id=\"00:00:00:00:00:00_1_28_36\",\n friendly_name=\"Eve Energy 50FF Lock Physical Controls\",\n entity_category=EntityCategory.CONFIG,\n state=\"off\",\n ),\n EntityTestInfo(\n entity_id=\"button.eve_energy_50ff_identify\",\n unique_id=\"00:00:00:00:00:00_1_1_3\",\n friendly_name=\"Eve Energy 50FF Identify\",\n entity_category=EntityCategory.DIAGNOSTIC,\n state=\"unknown\",\n ),\n ],\n ),\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1188, "n_words": 110, "vocab_size": 66, "complexity": 1, "nloc": 70, "token_counts": 278, "n_ast_nodes": 456, "n_identifiers": 33, "random_cut": "async def test_eve_energy_setup(hass):\n \n accessories = await setup_accessories_from_file(hass, \"eve_energy.json\")\n await setup_test_accessories(hass, accessories)\n\n await assert_devices_and_entities_created(\n hass,\n DeviceTestInfo(\n unique_id=HUB_TEST_ACCESSORY_ID,\n name=\"Eve Energy 50FF\",\n model=\"Eve Energy 20EAO8601\",\n manufacturer=\"Elgato\",\n sw_version=\"1.2.9\",\n hw_version=\"1.0.0\",\n serial_number=\"AA00A0A00000\",\n devices=[],\n entities=[\n EntityTestInfo(\n entity_id=\"switch.eve_energy_50ff\",\n unique_id=\"00:00:00:00:00:00_1_28\",\n friendly_name=\"Eve Energy 50FF\",\n state=\"off\",\n ),\n EntityTestInfo(\n entity_id=\"sensor.eve_energy_50ff_amps\",\n unique_id=\"00:00:00:00:00:00_1_28_33\",\n friendly_name=\"Eve Energy 50FF Amps\",\n unit_of_m" }, { "id": 29355, "commit_id": "f6edcd7b4f7ee334245733f118d3b10cd065d671", "repo": "saleor", "path": "saleor/checkout/tasks.py", "file_name": "tasks.py", "fun_name": "delete_expired_checkouts", "commit_message": "Optimize memory usage of delete_expired_checkouts task (#11175)", "code": "def delete_expired_checkouts():\n now = timezone.now()\n expired_anonymous_checkouts = (\n Q(email__isnull=True)\n & Q(user__isnull=True)\n & Q(last_change__lt=now - settings.ANONYMOUS_CHECKOUTS_TIMEDELTA)\n )\n expired_user_checkout = (Q(email__isnull=False) | Q(user__isnull=False)) & Q(\n last_change__lt=now - settings.USER_CHECKOUTS_TIMEDELTA\n )\n empty_checkouts = Q(lines__isnull=True) & Q(\n last_change__lt=now - settings.EMPTY_CHECKOUTS_TIMEDELTA\n )\n qs = Checkout.objects.filter(\n empty_checkouts | expired_anonymous_checkouts | expired_user_checkout\n )\n\n deleted_count = 0\n for tokens_batch in queryset_in_batches(qs):\n batch_count, _ = Checkout.objects.filter(token__in=tokens_batch).delete()\n deleted_count += batch_count\n\n if deleted_count:\n task_logger.debug(\"Removed %s checkouts.\", deleted_count)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 163, "n_words": 65, "vocab_size": 43, "complexity": 3, "nloc": 22, "token_counts": 143, "n_ast_nodes": 230, "n_identifiers": 28, "random_cut": "def delete_expired_checkouts():\n now = timezone.now()\n expired_anonymous_checkouts = (\n Q(email__isnull=True)\n & Q(user__isnull=True)\n & Q(last_change__lt=now - settings.ANONYMOUS_CHECKOUTS_TIMEDELTA)\n )\n expired_user_checkout = (Q(email__isnull=False) | Q(user__isnull=False))" }, { "id": 206968, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_changelist/tests.py", "file_name": "tests.py", "fun_name": "test_search_help_text", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_search_help_text(self):\n superuser = self._create_superuser(\"superuser\")\n m = BandAdmin(Band, custom_site)\n # search_fields without search_help_text.\n m.search_fields = [\"name\"]\n request = self._mocked_authenticated_request(\"/band/\", superuser)\n response = m.changelist_view(request)\n self.assertIsNone(response.context_data[\"cl\"].search_help_text)\n self.assertNotContains(response, '

    ')\n # search_fields with search_help_text.\n m.search_help_text = \"Search help text\"\n request = self._mocked_authenticated_request(\"/band/\", superuser)\n response = m.changelist_view(request)\n self.assertEqual(\n response.context_data[\"cl\"].search_help_text, \"Search help text\"\n )\n self.assertContains(response, '
    Search help text
    ')\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 169, "n_words": 54, "vocab_size": 34, "complexity": 1, "nloc": 15, "token_counts": 113, "n_ast_nodes": 197, "n_identifiers": 19, "random_cut": "def test_search_help_text(self):\n superuser = self._create_superuser(\"superuser\")\n m = BandAdmin(Band, custom_site)\n # search_fields without search_help_text.\n m.search_fields = [\"name\"]\n request = self._mocked_authenticated_request(\"/band/\", superuser)\n response = m.changelist_view(request)\n self.assertIsNone(response.context_data[\"cl\"].search_help_text)\n self.assertNotContains(response, '
    ')\n # " }, { "id": 155960, "commit_id": "e715a4d4b5b30374d6ea625a921b5557f0ce6efa", "repo": "dask", "path": "dask/dataframe/tests/test_dataframe.py", "file_name": "test_dataframe.py", "fun_name": "test_head_npartitions_warn", "commit_message": "Avoid pytest.warns(None) (#8718)\n\nCompatibility with new pytest 7.0\r\n\r\nCo-authored-by: Luka Sturtewagen \r\nCo-authored-by: Julia Signell ", "code": "def test_head_npartitions_warn():\n match = \"5 elements requested, only 3 elements\"\n with pytest.warns(UserWarning, match=match):\n d.head(5)\n\n match = \"Insufficient elements\"\n with pytest.warns(UserWarning, match=match):\n d.head(100)\n\n with pytest.warns(UserWarning, match=match):\n d.head(7)\n\n with pytest.warns(UserWarning, match=match):\n d.head(7, npartitions=2)\n\n # No warn if all partitions are inspected\n for n in [3, -1]:\n with warnings.catch_warnings(record=True) as record:\n d.head(10, npartitions=n)\n assert not record\n\n # With default args, this means that a 1 partition dataframe won't warn\n d2 = dd.from_pandas(pd.DataFrame({\"x\": [1, 2, 3]}), npartitions=1)\n with warnings.catch_warnings(record=True) as record:\n d2.head()\n assert not record\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 176, "n_words": 81, "vocab_size": 58, "complexity": 2, "nloc": 19, "token_counts": 168, "n_ast_nodes": 282, "n_identifiers": 17, "random_cut": "def test_head_npartitions_warn():\n match = \"5 elements requested, only 3 elements\"\n with pytest.warns" }, { "id": 251223, "commit_id": "fdde9ba3b3caaa2654048cec0af07bfcc3a6a3f8", "repo": "mitmproxy", "path": "mitmproxy/tools/main.py", "file_name": "main.py", "fun_name": "mitmweb", "commit_message": "use Python 3.9+ typing", "code": "def mitmweb(args=None) -> Optional[int]: # pragma: no cover\n from mitmproxy.tools import web\n run(web.master.WebMaster, cmdline.mitmweb, args)\n return None\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 26, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 52, "n_identifiers": 11, "random_cut": "def mitmweb(args=None) -> Optional[int]: # pragma: no cover\n from mitmproxy.tools import web\n run(web.master.WebMaster, cmdline.mitmweb, args)\n return None\n" }, { "id": 63871, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/toml/decoder.py", "file_name": "decoder.py", "fun_name": "_getpath", "commit_message": "upd; format", "code": "def _getpath(p):\n if (3, 6) <= sys.version_info:\n import os\n return os.fspath(p)\n if _detect_pathlib_path(p):\n return str(p)\n return p\n\n\ntry:\n FNFError = FileNotFoundError\nexcept NameError:\n FNFError = IOError\n\n\nTIME_RE = re.compile(r\"([0-9]{2}):([0-9]{2}):([0-9]{2})(\\.([0-9]{3,6}))?\")\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 61, "n_words": 29, "vocab_size": 23, "complexity": 3, "nloc": 7, "token_counts": 38, "n_ast_nodes": 95, "n_identifiers": 15, "random_cut": "def _getpath(p):\n if (3, 6) <= sys.version_info:\n import os\n return os.fspath(p)\n if _detect_pathlib_path(p):\n return str(p)\n return p\n\n\ntry:\n FNFError = FileNotFoundError\nexcept NameError:\n " }, { "id": 74068, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_blocks.py", "file_name": "test_blocks.py", "fun_name": "test_validate_non_required_choice_block", "commit_message": "Reformat with black", "code": "def test_validate_non_required_choice_block(self):\n block = blocks.ChoiceBlock(\n choices=[(\"tea\", \"Tea\"), (\"coffee\", \"Coffee\")], required=False\n )\n self.assertEqual(block.clean(\"coffee\"), \"coffee\")\n\n with self.assertRaises(ValidationError):\n block.clean(\"whisky\")\n\n self.assertEqual(block.clean(\"\"), \"\")\n self.assertEqual(block.clean(None), \"\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 83, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 84, "n_ast_nodes": 150, "n_identifiers": 11, "random_cut": "def test_validate_non_required_choice_block(self):\n block = blocks.ChoiceBlock(\n choices=[(\"tea\", \"Tea\"), (\"coffee\", \"Coffee\")], required=False\n )\n self.assertEqual(block.clean(\"coffee\"), \"coffee\")\n\n with self.a" }, { "id": 40673, "commit_id": "c16180493bd44fd76092fdd9ea0060bac91e47fe", "repo": "seaborn", "path": "seaborn/tests/_core/test_subplots.py", "file_name": "test_subplots.py", "fun_name": "test_col_facet_wrapped", "commit_message": "Refactor figure setup and subplot metadata tracking into Subplots class\n\nSquashed commit of the following:\n\ncommit e6f99078d46947eab678b9dd0303657a3129f9fc\nAuthor: Michael Waskom \nDate: Sun Aug 1 17:56:49 2021 -0400\n\n Address a couple TODOs\n\ncommit c48ba3af8095973b7dca9554934a695751f58726\nAuthor: Michael Waskom \nDate: Mon Jul 26 06:42:29 2021 -0400\n\n Add docstrings in Subplots\n\ncommit 97e6465b0f998f541b445b189682fbf134869391\nAuthor: Michael Waskom \nDate: Sun Jul 25 17:53:22 2021 -0400\n\n Fix unshared label visibility test\n\ncommit e2d93a28313c2cb9170e56b2e4b373987993be7c\nAuthor: Michael Waskom \nDate: Sun Jul 25 17:16:41 2021 -0400\n\n Add more label visibility tests\n\ncommit 698ee72b5d5f9f3939c50cde9e2baacdf5487807\nAuthor: Michael Waskom \nDate: Sat Jul 24 11:08:32 2021 -0400\n\n Begin adding label visibility tests\n\ncommit 97167b4701532eeccadaa899520d57e38c26dd43\nAuthor: Michael Waskom \nDate: Mon Jul 19 06:55:48 2021 -0400\n\n Fix interior tick labels with unshared axes\n\ncommit 9331d5d91a7861aebfe03fa86ee122902c0d1d8a\nAuthor: Michael Waskom \nDate: Sat Jul 17 17:03:48 2021 -0400\n\n Fix interior labels for wrapped plots\n\ncommit 38f2efa7e732958430c006f24827c6ac69640ef3\nAuthor: Michael Waskom \nDate: Sat Jul 17 16:03:34 2021 -0400\n\n Fix non-cartesian interior labels\n\ncommit 3c07f981110890d38aee19b38c43080863132122\nAuthor: Michael Waskom \nDate: Sat Jul 17 15:44:48 2021 -0400\n\n Integrate Subplots into Plot\n\ncommit 841a3c998eae8f8cc85fd65af7ea8e6f32fc5510\nAuthor: Michael Waskom \nDate: Sat Jul 17 13:00:09 2021 -0400\n\n Complete subplots tests\n\ncommit 8ceb7e6c35ea0cbcd014067035d7ea219204f464\nAuthor: Michael Waskom \nDate: Fri Jul 16 19:45:29 2021 -0400\n\n Continue building out subplot tests\n\ncommit b0ce0e7a9e3534fdad04ef9e287e4c6bb19fe684\nAuthor: Michael Waskom \nDate: Thu Jul 15 21:35:21 2021 -0400\n\n Continue building out subplots tests\n\ncommit 5f4b67d4d90cde7d0d899527b1fd8607348a5f5b\nAuthor: Michael Waskom \nDate: Wed Jul 14 20:57:35 2021 -0400\n\n Add some tests for Subplots functionality\n\ncommit 58fbf8e3f349174f4d1d29f71fa867ad4b49d264\nAuthor: Michael Waskom \nDate: Sun Jul 11 20:49:29 2021 -0400\n\n Begin refactoring figure setup into Subplots class\n\ncommit 6bb853e20ad3b42b2728d212a51ed8de2ff47bde\nAuthor: Michael Waskom \nDate: Sun Jul 11 16:02:26 2021 -0400\n\n Fix overlooked lint and test", "code": "def test_col_facet_wrapped(self, long_df):\n\n key = \"b\"\n wrap = 3\n data = PlotData(long_df, {\"col\": key})\n s = Subplots({}, {\"wrap\": wrap}, {}, data)\n\n n_levels = len(categorical_order(long_df[key]))\n assert s.n_subplots == n_levels\n assert s.subplot_spec[\"ncols\"] == wrap\n assert s.subplot_spec[\"nrows\"] == n_levels // wrap + 1\n assert s.subplot_spec[\"sharex\"] is True\n assert s.subplot_spec[\"sharey\"] is True\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 117, "n_words": 48, "vocab_size": 32, "complexity": 1, "nloc": 11, "token_counts": 98, "n_ast_nodes": 165, "n_identifiers": 14, "random_cut": "def test_col_facet_wrapped(self, long_df):\n\n key = \"b\"\n wrap = 3\n data = PlotData(long_df, {\"col\": key})\n s = Subplots({}, {\"wrap\": wrap}, {}, data)\n\n n_levels = len(categorical_order(long_df[key]))\n assert s.n_subplots == n_levels\n assert s.subplot_spec[\"ncols\"] == wrap\n assert s.subplot_spec[\"nrows\"] == n_levels // wrap + 1\n assert s.subplot_spec[\"sharex\"] is True\n as" }, { "id": 313249, "commit_id": "3771c154fa0ea8e0b49d41ece55a7a18c444ee6a", "repo": "core", "path": "homeassistant/components/command_line/switch.py", "file_name": "switch.py", "fun_name": "_query_state", "commit_message": "Improve code quality command_line (#65333)", "code": "def _query_state(self) -> str | int | None:\n \n if self._command_state:\n if self._value_template:\n return self._query_state_value(self._command_state)\n return self._query_state_code(self._command_state)\n if TYPE_CHECKING:\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 89, "n_words": 20, "vocab_size": 15, "complexity": 4, "nloc": 8, "token_counts": 45, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def _query_state(self) -> str | int | None:\n " }, { "id": 224601, "commit_id": "2c986996d041f0059b4d3c2ff4bd647cadeb68de", "repo": "mkdocs", "path": "mkdocs/tests/config/config_options_tests.py", "file_name": "config_options_tests.py", "fun_name": "test_missing_default", "commit_message": "MarkdownExtensions' default is an empty list", "code": "def test_missing_default(self):\n option = config_options.MarkdownExtensions()\n config = {}\n config['markdown_extensions'] = option.validate(None)\n option.post_validation(config, 'markdown_extensions')\n self.assertEqual(\n {\n 'markdown_extensions': [],\n 'mdx_configs': {},\n },\n config,\n )\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 126, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 12, "token_counts": 55, "n_ast_nodes": 94, "n_identifiers": 9, "random_cut": "def test_missing_default(self):\n option = config_options.MarkdownExtensions()\n config = {}\n config['markdown_extensions'] = option.validate(None)\n option.post_validation(config, 'markdown_extensions')\n self.assertEqual(\n {\n " }, { "id": 290875, "commit_id": "b6586d5c34bf7ea5c30fbb1b62c438078ea14f39", "repo": "core", "path": "tests/components/number/test_init.py", "file_name": "test_init.py", "fun_name": "test_device_classes_aligned", "commit_message": "Align number and sensor device classes (#81909)\n\n* Align number and sensor device classes\r\n\r\n* Add tests\r\n\r\n* Tweak tests", "code": "def test_device_classes_aligned():\n \n\n non_numeric_device_classes = {\n SensorDeviceClass.DATE,\n SensorDeviceClass.DURATION,\n SensorDeviceClass.TIMESTAMP,\n }\n\n for device_class in SensorDeviceClass:\n if device_class in non_numeric_device_classes:\n continue\n\n assert hasattr(NumberDeviceClass, device_class.name)\n assert getattr(NumberDeviceClass, device_class.name).value == device_class.value\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 26, "vocab_size": 23, "complexity": 3, "nloc": 11, "token_counts": 56, "n_ast_nodes": 86, "n_identifiers": 12, "random_cut": "def test_device_classes_aligned():\n \n\n non_numeric_device_classes = {\n SensorDeviceClass.DATE,\n SensorDeviceClass.DURATION,\n SensorDeviceClass.TIMESTAMP,\n }\n\n for device_class in SensorDeviceClass:\n if device_class in non_numeric_device_classes:\n continue\n\n assert hasattr(NumberDeviceClass, device_class.name)\n assert getattr(NumberDeviceClass, device_class.name).va" }, { "id": 130772, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/internal/internal_api.py", "file_name": "internal_api.py", "fun_name": "store_stats_summary", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def store_stats_summary(reply):\n \n store_summary = \"--- Aggregate object store stats across all nodes ---\\n\"\n # TODO(ekl) it would be nice if we could provide a full memory usage\n # breakdown by type (e.g., pinned by worker, primary, etc.)\n store_summary += (\n \"Plasma memory usage {} MiB, {} objects, {}% full, {}% \"\n \"needed\\n\".format(\n int(reply.store_stats.object_store_bytes_used / (1024 * 1024)),\n reply.store_stats.num_local_objects,\n round(\n 100\n * reply.store_stats.object_store_bytes_used\n / reply.store_stats.object_store_bytes_avail,\n 2,\n ),\n round(\n 100\n * reply.store_stats.object_store_bytes_primary_copy\n / reply.store_stats.object_store_bytes_avail,\n 2,\n ),\n )\n )\n if reply.store_stats.object_store_bytes_fallback > 0:\n store_summary += \"Plasma filesystem mmap usage: {} MiB\\n\".format(\n int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024))\n )\n if reply.store_stats.spill_time_total_s > 0:\n store_summary += (\n \"Spilled {} MiB, {} objects, avg write throughput {} MiB/s\\n\".format(\n int(reply.store_stats.spilled_bytes_total / (1024 * 1024)),\n reply.store_stats.spilled_objects_total,\n int(\n reply.store_stats.spilled_bytes_total\n / (1024 * 1024)\n / reply.store_stats.spill_time_total_s\n ),\n )\n )\n if reply.store_stats.restore_time_total_s > 0:\n store_summary += (\n \"Restored {} MiB, {} objects, avg read throughput {} MiB/s\\n\".format(\n int(reply.store_stats.restored_bytes_total / (1024 * 1024)),\n reply.store_stats.restored_objects_total,\n int(\n reply.store_stats.restored_bytes_total\n / (1024 * 1024)\n / reply.store_stats.restore_time_total_s\n ),\n )\n )\n if reply.store_stats.consumed_bytes > 0:\n store_summary += \"Objects consumed by Ray tasks: {} MiB.\\n\".format(\n int(reply.store_stats.consumed_bytes / (1024 * 1024))\n )\n if reply.store_stats.object_pulls_queued:\n store_summary += \"Object fetches queued, waiting for available memory.\"\n\n return store_summary\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 800, "n_words": 194, "vocab_size": 101, "complexity": 6, "nloc": 56, "token_counts": 272, "n_ast_nodes": 438, "n_identifiers": 20, "random_cut": "def store_stats_summary(reply):\n \n store_summary = \"--- Aggregate object store stats across all nodes ---\\n\"\n # TODO(ekl) it would be nice if we could provide a full memory usage\n # breakdown by type (e.g., pinned by worker, primary, etc.)\n store_summary += (\n \"Plasma memory usage {} MiB, {} objects, {}% full, {}% \"\n \"needed\\n\".format(\n int(reply.store_stats.object_store_bytes_used / (1024 * 1024)),\n reply.store_stats.num_local_objects,\n round(\n 100\n * reply.store_stats.object_store_bytes_used\n / reply.store_stats.object_store_bytes_avail,\n 2,\n ),\n round(\n 100\n * reply.store_stats.object_store_bytes_primary_copy\n / reply.store_stats.object_store_bytes_avail,\n 2,\n ),\n )\n )\n if reply.store_stats.object_store_bytes_fallback > 0:\n store_summary += \"Plasma filesystem mmap usage: {} MiB\\n\".format(\n int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024))\n )\n if reply.store_stats.spill_time_total_s > 0:\n store_summary += (\n \"Spilled {} MiB, {} objects, avg write throughput {} MiB/s\\n\".format(\n int(reply.store_stats.spilled_bytes_total / (1024 * 1024)),\n reply.store_stats.spilled_objects_total,\n int(\n reply.store_stats.spilled_bytes_total\n / (1024 * 1024)\n / reply.store_stats.spill_time_total_s\n ),\n )\n " }, { "id": 252406, "commit_id": "002f919dda5f01d067c2e786426c68751551d15c", "repo": "mitmproxy", "path": "mitmproxy/contrib/kaitaistruct/png.py", "file_name": "png.py", "fun_name": "_read", "commit_message": "update kaitai definitions", "code": "def _read(self):\n self.magic = self._io.read_bytes(8)\n if not self.magic == b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\":\n raise kaitaistruct.ValidationNotEqualError(b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\", self.magic, self._io, u\"/seq/0\")\n self.ihdr_len = self._io.read_u4be()\n if not self.ihdr_len == 13:\n raise kaitaistruct.ValidationNotEqualError(13, self.ihdr_len, self._io, u\"/seq/1\")\n self.ihdr_type = self._io.read_bytes(4)\n if not self.ihdr_type == b\"\\x49\\x48\\x44\\x52\":\n raise kaitaistruct.ValidationNotEqualError(b\"\\x49\\x48\\x44\\x52\", self.ihdr_type, self._io, u\"/seq/2\")\n self.ihdr = Png.IhdrChunk(self._io, self, self._root)\n self.ihdr_crc = self._io.read_bytes(4)\n self.chunks = []\n i = 0\n while True:\n _ = Png.Chunk(self._io, self, self._root)\n self.chunks.append(_)\n if ((_.type == u\"IEND\") or (self._io.is_eof())) :\n break\n i += 1\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 243, "n_words": 74, "vocab_size": 48, "complexity": 7, "nloc": 20, "token_counts": 214, "n_ast_nodes": 354, "n_identifiers": 22, "random_cut": "def _read(self):\n self.magic = self._io.read_bytes(8)\n if not self.magic == b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\":\n raise kaitaistruct.ValidationNotEqualError(b\"\\x89\\x50\\x4E\\x47\\x0D\\x0A\\x1A\\x0A\", self.magic, self._io, u\"/seq/0\")\n self.ihdr_len = self._io.read_u4be()\n if not self.ihdr_len == 13:\n raise kaitaistruct.ValidationNotEqualError(13, self.ihdr_len, self._io, u\"/seq/1\")\n self.ihdr_type = self._io.read_bytes(4)\n if not self.ihdr_type == b\"\\x49\\x48\\x44\\x52\":\n raise kaitaistruct.ValidationNotEqualError(b\"\\x49\\x48\\x44\\x52\", self.ihdr_type, self._io, u\"/seq/2\")\n self.ihdr = Png.IhdrChunk(self._io, self, self._root)\n self.ihdr_crc = self._io.read_bytes(4)\n self.chunks = []\n i = 0\n while True:\n _ = Png.Chunk(self._io, self, self._root)\n self.chunks.append(_)\n if ((_.type == u\"IEND\") or (self._io.is_eof())) :\n break\n i += 1\n" }, { "id": 277931, "commit_id": "406774b60ac6b505ae9bf7e8728b00a1523ad4a3", "repo": "keras", "path": "keras/optimizers/optimizer_v2/adamax_test.py", "file_name": "adamax_test.py", "fun_name": "testSlotsUniqueEager", "commit_message": "resolve line-too-long in optimizer", "code": "def testSlotsUniqueEager(self):\n v1 = tf.Variable(1.0)\n v2 = tf.Variable(1.0)\n opt = adamax.Adamax(1.0)\n opt.minimize(lambda: v1 + v2, var_list=[v1, v2])\n # There should be iteration, and two unique slot variables for v1 and\n # v2.\n self.assertLen({id(v) for v in opt.variables()}, 5)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 86, "n_words": 38, "vocab_size": 30, "complexity": 2, "nloc": 6, "token_counts": 74, "n_ast_nodes": 108, "n_identifiers": 15, "random_cut": "def testSlotsUniqueEager(self):\n v1 = tf.Var" }, { "id": 213485, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy/backends/jax/core/general.py", "file_name": "general.py", "fun_name": "identity", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def identity(n, dtype='float32', batch_shape=None, dev=None):\n dtype = _jnp.__dict__[dtype]\n mat = _jnp.identity(n, dtype=dtype)\n if batch_shape is None:\n return_mat = mat\n else:\n reshape_dims = [1]*len(batch_shape) + [n, n]\n tile_dims = list(batch_shape) + [1, 1]\n return_mat = _jnp.tile(_jnp.reshape(mat, reshape_dims), tile_dims)\n return to_dev(return_mat, default_device(dev))\n\n\nmeshgrid = lambda *xs, indexing='ij': _jnp.meshgrid(*xs, indexing=indexing)\n\n", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 88, "n_words": 47, "vocab_size": 38, "complexity": 2, "nloc": 10, "token_counts": 102, "n_ast_nodes": 189, "n_identifiers": 20, "random_cut": "def identity(n, dtype='float32', batch_shape=None, dev=None):\n dtype = _jnp.__dict__[dtype]\n mat = _jnp.iden" }, { "id": 122207, "commit_id": "58a2abe1b5496acb177a5fd10394e001c381bff9", "repo": "jax", "path": "jax/experimental/sparse/bcoo.py", "file_name": "bcoo.py", "fun_name": "_bcoo_multiply_dense", "commit_message": "[sparse] Move broadcasting_vmap to sparse util.\n\nPiperOrigin-RevId: 478566197", "code": "def _bcoo_multiply_dense(data, indices, v, *, spinfo):\n \n # TODO(jakevdp): the logic here is similar to bcoo_extract... can we reuse that?\n shape = spinfo.shape\n if v.ndim == 0:\n return lax.mul(data, v)\n if shape == v.shape:\n # Note: due to distributive property, no deduplication necessary!\n return lax.mul(data, bcoo_extract(indices, v))\n\n if lax.broadcast_shapes(v.shape, shape) != shape:\n raise NotImplementedError(\n \"multiplication between sparse and dense is only implemented for cases \"\n \"where the output shape matches the sparse matrix shape. Got \"\n f\"shape={shape}, v.shape={v.shape}\")\n v = lax.expand_dims(v, range(len(shape) - v.ndim))\n\n props = _validate_bcoo(data, indices, shape)\n\n def _mul(data, indices, v):\n assert indices.shape[1] == v.ndim - props.n_dense\n ind = tuple(indices[:, i] for i in range(indices.shape[1]))\n ind = tuple(i if s != 1 else 0 for i, s in zip(ind, v.shape))\n return data * v[ind]\n for _ in range(props.n_batch):\n _mul = _broadcasting_vmap(_mul)\n return _mul(data, indices, v)\n\n@tree_util.register_pytree_node_class", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@tree_util.register_pytree_node_class", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 189, "n_words": 137, "vocab_size": 97, "complexity": 5, "nloc": 17, "token_counts": 135, "n_ast_nodes": 340, "n_identifiers": 29, "random_cut": "def _bcoo_multiply_dense(data, indices, v, *, spinfo):\n \n # TODO(jakevdp): the logic here is similar to bcoo_extract... can we reuse that?\n shape = spinfo.shape\n if v.ndim == 0:\n return lax.mul(data, v)\n if shape == v.shape:\n # Note: due to distributive property, no deduplication necessary!\n return lax.mul(data, bcoo_extract(indices, v))\n\n if lax.broadcast_shapes(v.shape, shape) != shape:\n raise NotImplementedError(\n \"multiplication between sparse and dense is only implemented for cases \"\n \"where the output shape matches the sparse matrix shape. Got \"\n f\"shape={shape}, v.shape={v.shape}\")\n v = lax.expand_dims(v, range(len(shape) - v.ndim))\n\n props = _validate_bcoo(data, indices, shape)\n\n def _mul(data, indices, v):\n assert indices.shape[1] == v.ndim - props.n_dense\n ind = tuple(indices[:, i] for i in range(indices.shape[1]))\n ind = tuple(i if s != 1 else 0 for i, s in zip(ind, v.shape))\n return data * v[ind]\n for _ in range(props.n_batch):\n _mul = _broadcasting_vmap(_mul)\n return _m" }, { "id": 125409, "commit_id": "0bc560bd541c320b0699464e8d23134c07899c18", "repo": "ray", "path": "rllib/evaluation/tests/test_env_runner_v2.py", "file_name": "test_env_runner_v2.py", "fun_name": "test_sample_batch_rollout_single_agent_env", "commit_message": "[RLlib] Make sure we step() after adding init_obs. (#26827)", "code": "def test_sample_batch_rollout_single_agent_env(self):\n config = (\n PPOConfig()\n .framework(\"torch\")\n .training(\n # Specifically ask for a batch of 200 samples.\n train_batch_size=200,\n )\n .rollouts(\n num_envs_per_worker=1,\n horizon=4,\n num_rollout_workers=0,\n # Enable EnvRunnerV2.\n enable_connectors=True,\n )\n )\n\n algo = PPO(config, env=DebugCounterEnv)\n\n rollout_worker = algo.workers.local_worker()\n sample_batch = rollout_worker.sample()\n\n self.assertEqual(sample_batch.env_steps(), 200)\n self.assertEqual(sample_batch.agent_steps(), 200)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 262, "n_words": 43, "vocab_size": 36, "complexity": 1, "nloc": 19, "token_counts": 95, "n_ast_nodes": 152, "n_identifiers": 24, "random_cut": "def test_sample_batch_rollout_single_agent_env(self):\n config = (\n PPOConfig()\n .framework(\"torch\")\n .training(\n # Specifically ask for a batch of 200 samples.\n train_batch_size=200,\n )\n .rollouts(\n num_envs_per_worker=1,\n horizon=4,\n " }, { "id": 148964, "commit_id": "13e74c5693e68ddb6b7afa4559ac23d2ec8ee26c", "repo": "freqtrade", "path": "freqtrade/wallets.py", "file_name": "wallets.py", "fun_name": "_update_dry", "commit_message": "Add dry-run position wallet calculation", "code": "def _update_dry(self) -> None:\n \n # Recreate _wallets to reset closed trade balances\n _wallets = {}\n _positions = {}\n open_trades = Trade.get_trades_proxy(is_open=True)\n # If not backtesting...\n # TODO: potentially remove the ._log workaround to determine backtest mode.\n if self._log:\n tot_profit = Trade.get_total_closed_profit()\n else:\n tot_profit = LocalTrade.total_profit\n tot_in_trades = sum(trade.stake_amount for trade in open_trades)\n used_stake = 0.0\n\n if self._config.get('trading_mode', 'spot') != TradingMode.FUTURES:\n current_stake = self.start_cap + tot_profit - tot_in_trades\n total_stake = current_stake\n for trade in open_trades:\n curr = self._exchange.get_pair_base_currency(trade.pair)\n _wallets[curr] = Wallet(\n curr,\n trade.amount,\n 0,\n trade.amount\n )\n else:\n tot_in_trades = 0\n for position in open_trades:\n # size = self._exchange._contracts_to_amount(position.pair, position['contracts'])\n size = position.amount\n # TODO-lev: stake_amount in real trades does not include the leverage ...\n collateral = position.stake_amount / position.leverage\n leverage = position.leverage\n tot_in_trades -= collateral\n _positions[position.pair] = PositionWallet(\n position.pair, position=size,\n leverage=leverage,\n collateral=collateral,\n side=position.trade_direction\n )\n current_stake = self.start_cap + tot_profit\n used_stake = tot_in_trades\n total_stake = current_stake - tot_in_trades\n\n _wallets[self._config['stake_currency']] = Wallet(\n currency=self._config['stake_currency'],\n free=current_stake,\n used=used_stake,\n total=total_stake\n )\n self._wallets = _wallets\n self._positions = _positions\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 750, "n_words": 160, "vocab_size": 94, "complexity": 6, "nloc": 51, "token_counts": 247, "n_ast_nodes": 389, "n_identifiers": 42, "random_cut": "def _update_dry(self) -> None:\n \n # Recreate _wallets to reset closed trade balances\n _wallets = {}\n _positions = {}\n open_trades = Trade.get_trades_proxy(is_open=True)\n # If not backtesting...\n # TODO: potentially remove the ._log workaround to determine backtest mode.\n if self._log:\n tot_profit = Trade.get_total_closed_profit()\n else:\n tot_profit = LocalTrade.total_profit\n tot_in_trades = sum(trade.stake_amount for trade in open_trades)\n used_stake = 0.0\n\n if self._config.get('trading_mode', 'spot') != TradingMode.FUTURES:\n current_stake = self.start_cap + tot_profit - tot_in_trades\n total_stake = current_stake\n for trade in open_trades:\n curr = self._exchange.get_pair_base_currency(trade.pair)\n _wallets[curr] = Wallet(\n curr,\n trade.amount,\n 0,\n trade.amount\n )\n else:\n tot_in_trades = 0\n for position in open_trades:\n # size = self._exchange._contracts_to_amount(position.pair, position['contracts'])\n size = position.amount\n # TODO-lev: stake_amount in real trades does not include the leverage ...\n collateral = position.stake_amount / position.leverage\n leverage = position.leverage\n tot_in_trades -= collateral\n _positions[position.pair] = PositionWallet(\n position.pair, position=size,\n leverage=leverage,\n collateral=collateral,\n side=position.trade_direction\n )\n current_stake = self.start_cap + tot_profit\n used_stak" }, { "id": 13205, "commit_id": "82960f105149c478e4fc88e8b4fef8bbe2454429", "repo": "jina", "path": "jina/orchestrate/deployments/__init__.py", "file_name": "__init__.py", "fun_name": "update_pod_args", "commit_message": "feat: distributed replicas across different hosts (#5217)", "code": "def update_pod_args(self):\n \n if self.args.runtime_cls == 'GatewayRuntime':\n _set_gateway_uses(self.args)\n if isinstance(self.args, Dict):\n # This is used when a Deployment is created in a remote context, where pods & their connections are already given.\n self.pod_args = self.args\n else:\n self.pod_args = self._parse_args(self.args)\n\n if self.external:\n for pod, port, host, scheme, tls in zip(\n self.pod_args['pods'][0],\n self.ext_repl_ports,\n self.ext_repl_hosts,\n self.ext_repl_schemes,\n self.ext_repl_tls,\n ):\n pod.port = port\n pod.host = host\n pod.scheme = scheme\n pod.tls = tls\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 302, "n_words": 66, "vocab_size": 54, "complexity": 5, "nloc": 19, "token_counts": 118, "n_ast_nodes": 184, "n_identifiers": 20, "random_cut": "def update_pod_args(self):\n \n if self.args.runtime_cls == 'GatewayRuntime':\n _set_gateway_uses(self.args)\n if isinstance(self.args, Dict):\n # This is used when a Deployment is created in a remote context, where pods & their connections are already given.\n self.pod_args = self.args\n else:\n self.pod_args = self._parse" }, { "id": 132833, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/trial.py", "file_name": "trial.py", "fun_name": "get_json_state", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_json_state(self) -> str:\n if not self._state_json or not self._state_valid:\n json_state = json.dumps(\n self.__getstate__(), indent=2, cls=TuneFunctionEncoder\n )\n self._state_json = json_state\n self._state_valid = True\n return self._state_json\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 97, "n_words": 25, "vocab_size": 19, "complexity": 3, "nloc": 8, "token_counts": 52, "n_ast_nodes": 82, "n_identifiers": 12, "random_cut": "def get_json_state(self) -> str:" }, { "id": 224347, "commit_id": "dca7cbb43fcd6ea7c677c98ba585395b070d387b", "repo": "mkdocs", "path": "mkdocs/tests/plugin_tests.py", "file_name": "plugin_tests.py", "fun_name": "test_plugin_config_multivalue_dict", "commit_message": "Format code with `black -l100 --skip-string-normalization`", "code": "def test_plugin_config_multivalue_dict(self, mock_class):\n\n cfg = {\n 'plugins': [\n {\n 'sample': {\n 'foo': 'foo value',\n 'bar': 42,\n },\n 'extra_key': 'baz',\n }\n ],\n }\n option = config.config_options.Plugins()\n with self.assertRaises(config.base.ValidationError):\n option.validate(cfg['plugins'])\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 221, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 15, "token_counts": 65, "n_ast_nodes": 116, "n_identifiers": 12, "random_cut": "def test_plugin_config_multivalue_dict(self, mock_class):\n\n cfg = {\n 'plugins': [\n {\n 'sample': {\n 'foo': 'foo value',\n 'bar': 42,\n },\n 'extra_key': " }, { "id": 297451, "commit_id": "cc5d3193698c107d6b56f6001ffb7707fb77bdef", "repo": "core", "path": "homeassistant/components/justnimbus/entity.py", "file_name": "entity.py", "fun_name": "available", "commit_message": "Fix Just Nimbus error codes (#83856)", "code": "def available(self) -> bool:\n \n return super().available and self.coordinator.data is not None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 23, "n_ast_nodes": 39, "n_identifiers": 6, "random_cut": "def available(self) -> bool:\n \n retu" }, { "id": 170704, "commit_id": "490c5d049890d8ea71ec5e2dc4ffa6196c10cc63", "repo": "pandas", "path": "pandas/tests/util/test_assert_almost_equal.py", "file_name": "test_assert_almost_equal.py", "fun_name": "test_assert_almost_equal_numbers_atol", "commit_message": "DEPR: Remove check_less_precise in asserters (#49461)", "code": "def test_assert_almost_equal_numbers_atol(a, b):\n # Equivalent to the deprecated check_less_precise=True, enforced in 2.0\n _assert_almost_equal_both(a, b, rtol=0.5e-3, atol=0.5e-3)\n\n\n@pytest.mark.parametrize(\"a,b\", [(1.1, 1.11), (0.1, 0.101), (0.000011, 0.001012)])", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"a,b\", [(1.1, 1.11), (0.1, 0.101), (0.000011, 0.001012)])", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 27, "n_words": 23, "vocab_size": 23, "complexity": 1, "nloc": 2, "token_counts": 29, "n_ast_nodes": 72, "n_identifiers": 9, "random_cut": "def test_assert_almost_equal_numbers_atol(a, b):\n # Equivalent to the deprecated check_less_precise=True, enfor" }, { "id": 165141, "commit_id": "7ee8ab07e538de55bd02f1ed5c2d211c7e342ddc", "repo": "pandas", "path": "pandas/tests/io/parser/test_converters.py", "file_name": "test_converters.py", "fun_name": "test_converter_identity_object", "commit_message": "BUG: read_csv not respecting converter in all cases for index col (#46053)", "code": "def test_converter_identity_object(all_parsers):\n # GH#40589\n parser = all_parsers\n data = \"A,B\\n1,2\\n3,4\"\n\n rs = parser.read_csv(StringIO(data), converters={\"A\": lambda x: x})\n\n xp = DataFrame({\"A\": [\"1\", \"3\"], \"B\": [2, 4]})\n tm.assert_frame_equal(rs, xp)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 44, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 6, "token_counts": 63, "n_ast_nodes": 111, "n_identifiers": 13, "random_cut": "def test_converter_identity_object(all_parsers):\n # GH#40589\n parser = all_parsers\n data = \"A,B\\n1,2\\n3,4\"\n\n rs = parser.re" }, { "id": 272929, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/preprocessing/discretization_test.py", "file_name": "discretization_test.py", "fun_name": "test_one_hot_output", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_one_hot_output(self):\n input_data = np.array([-1.5, 1.0, 3.4, 3.5])\n\n expected_output = [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n expected_output_shape = [None, 4]\n\n inputs = keras.Input(shape=(1,))\n layer = discretization.Discretization(\n bin_boundaries=[0.0, 1.0, 2.0], output_mode=\"one_hot\"\n )\n outputs = layer(inputs)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n\n model = keras.Model(inputs, outputs)\n output_data = model(input_data)\n self.assertAllEqual(expected_output, output_data)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 195, "n_words": 57, "vocab_size": 38, "complexity": 1, "nloc": 18, "token_counts": 196, "n_ast_nodes": 213, "n_identifiers": 22, "random_cut": "def test_one_hot_output(self):\n input_data = np.array([-1.5, 1.0, 3.4, 3.5])\n\n expected_output = [\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n [0.0, 0.0, 0.0, 1.0],\n ]\n expected_output_shape = [None, 4]\n\n inputs = keras.Input(shape=(1,))\n layer = discretization.Discretization(\n bin_boundaries=[0.0, 1.0, 2.0], output_mode=\"one_hot\"\n )\n outputs = layer(inputs)\n self.assertAllEqual(expected_output_shape, outputs.shape.as_list())\n\n model = keras.Model(inputs, outputs)\n output_data = model(input_data)\n self.assertAllEqual(expect" }, { "id": 132716, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/tests/test_trial_scheduler.py", "file_name": "test_trial_scheduler.py", "fun_name": "testMedianStoppingSoftStop", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def testMedianStoppingSoftStop(self):\n rule = MedianStoppingRule(\n metric=\"episode_reward_mean\",\n mode=\"max\",\n grace_period=0,\n min_samples_required=1,\n hard_stop=False,\n )\n t1, t2 = self.basicSetup(rule)\n runner = mock_trial_runner()\n rule.on_trial_complete(runner, t1, result(10, 1000))\n rule.on_trial_complete(runner, t2, result(10, 1000))\n t3 = Trial(\"PPO\")\n self.assertEqual(\n rule.on_trial_result(runner, t3, result(1, 260)), TrialScheduler.CONTINUE\n )\n self.assertEqual(\n rule.on_trial_result(runner, t3, result(2, 260)), TrialScheduler.PAUSE\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 196, "n_words": 43, "vocab_size": 30, "complexity": 1, "nloc": 19, "token_counts": 129, "n_ast_nodes": 194, "n_identifiers": 23, "random_cut": "def testMedianStoppingSoftStop(self):\n rule = MedianStoppingRule(\n metric=\"episode_reward_mean\",\n mode=\"max\",\n grace_period=0,\n min_samples_required=1,\n hard_stop=False,\n )\n t1, t2 = self.basicSetup(rule)\n runner = mock_trial_runner()\n rule.on_trial_complete(runner, t1, result(10, 1000))\n rule.on_trial_complete(runner, t2, result(10, 1000))\n t3 = Trial(\"PPO\")\n self.assertEqual(\n rule.on_trial_result(runner, t3, result(1, 260)), TrialScheduler.CONTINUE\n )\n self.assertEqual(\n rule.on_trial_result(runner, t3, result(2, 260)), TrialScheduler." }, { "id": 63458, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "copy", "commit_message": "upd; format", "code": "def copy(self):\n \n ret = ParseResults(self.__toklist)\n ret.__tokdict = dict(self.__tokdict.items())\n ret.__parent = self.__parent\n ret.__accumNames.update(self.__accumNames)\n ret.__name = self.__name\n return ret\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 54, "n_ast_nodes": 90, "n_identifiers": 12, "random_cut": "def copy(self):\n \n ret = ParseResults(self.__toklist)\n ret.__tokdict = dict(self.__tokdict.items())\n ret.__parent = self.__parent\n ret." }, { "id": 157706, "commit_id": "1c2e25a557db446b5691c18e595e5664cc254730", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "read_csv_labels", "commit_message": "sync lib", "code": "def read_csv_labels(fname):\n \n with open(fname, 'r') as f:\n # 跳过文件头行(列名)\n lines = f.readlines()[1:]\n tokens = [l.rstrip().split(',') for l in lines]\n return dict(((name, label) for name, label in tokens))\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 27, "vocab_size": 24, "complexity": 3, "nloc": 5, "token_counts": 62, "n_ast_nodes": 106, "n_identifiers": 13, "random_cut": "def read_csv_labels(fname):\n \n with open(fname, 'r') as f:\n " }, { "id": 320824, "commit_id": "c9380605a1240748769c012403520323b4d2c3be", "repo": "qutebrowser", "path": "tests/unit/config/test_configexc.py", "file_name": "test_configexc.py", "fun_name": "test_no_option_error", "commit_message": "Display close matches for invalid settings", "code": "def test_no_option_error(deleted, renamed, all_names, expected):\n e = configexc.NoOptionError(\n 'opt',\n deleted=deleted,\n renamed=renamed,\n all_names=all_names,\n )\n assert e.option == 'opt'\n assert str(e) == expected\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 60, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 45, "n_ast_nodes": 68, "n_identifiers": 10, "random_cut": "def test_no_option_error(deleted, renamed, all_names, expected):\n e = configexc.NoOptionError(\n 'opt',\n deleted=deleted,\n renamed=renamed,\n all_names=all_names,\n )\n assert e.option == 'opt'\n assert str(e) == expected\n\n" }, { "id": 216443, "commit_id": "5550d1823e9cb571740ae9e57b25424cfe6a919e", "repo": "salt", "path": "tests/pytests/functional/utils/win_dacl/test_file.py", "file_name": "test_file.py", "fun_name": "test_has_permission_missing", "commit_message": "Add changelong", "code": "def test_has_permission_missing(test_file):\n result = win_dacl.set_permissions(\n obj_name=str(test_file),\n principal=\"Backup Operators\",\n permissions=\"read_execute\",\n access_mode=\"grant\",\n obj_type=\"file\",\n reset_perms=False,\n protected=None,\n )\n assert result is True\n\n # Test has_permission not exact\n result = win_dacl.has_permission(\n obj_name=str(test_file),\n principal=\"Backup Operators\",\n permission=\"write\",\n access_mode=\"grant\",\n obj_type=\"file\",\n exact=False,\n )\n assert result is False\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 149, "n_words": 38, "vocab_size": 26, "complexity": 1, "nloc": 20, "token_counts": 85, "n_ast_nodes": 137, "n_identifiers": 16, "random_cut": "def test_has_permission_missing(test_file):\n" }, { "id": 153924, "commit_id": "af7f4ed8ff0033a9a4e7d35a948f2057033bd826", "repo": "modin", "path": "modin/pandas/test/test_series.py", "file_name": "test_series.py", "fun_name": "test_add_series_to_timedeltaindex", "commit_message": "FIX-#4411: Fix binary_op between datetime64 Series and pandas timedelta (#4592)\n\nSigned-off-by: Karthik Velayutham ", "code": "def test_add_series_to_timedeltaindex():\n # Make a pandas.core.indexes.timedeltas.TimedeltaIndex\n deltas = pd.to_timedelta([1], unit=\"h\")\n test_series = create_test_series(np.datetime64(\"2000-12-12\"))\n eval_general(*test_series, lambda s: s + deltas)\n eval_general(*test_series, lambda s: s - deltas)\n\n\n@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 41, "n_words": 28, "vocab_size": 22, "complexity": 1, "nloc": 5, "token_counts": 53, "n_ast_nodes": 114, "n_identifiers": 17, "random_cut": "def test_add_series_to_timedeltaindex():\n # Make a pandas.core.indexes.timedeltas.TimedeltaIndex\n deltas = pd.to_timedelta([1], unit=\"h\")\n test_series = create_test_series(np.datetim" }, { "id": 163268, "commit_id": "d603d43df2057ecdf74010d9dadc735e37f8f7b5", "repo": "pandas", "path": "pandas/core/indexing.py", "file_name": "indexing.py", "fun_name": "_ensure_iterable_column_indexer", "commit_message": "TYP: Ignore numpy related issues (#45244)", "code": "def _ensure_iterable_column_indexer(self, column_indexer):\n \n ilocs: Sequence[int]\n if is_integer(column_indexer):\n ilocs = [column_indexer]\n elif isinstance(column_indexer, slice):\n ilocs = np.arange(len(self.obj.columns))[ # type: ignore[assignment]\n column_indexer\n ]\n elif isinstance(column_indexer, np.ndarray) and is_bool_dtype(\n column_indexer.dtype\n ):\n ilocs = np.arange(len(column_indexer))[column_indexer]\n else:\n ilocs = column_indexer\n return ilocs\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 175, "n_words": 37, "vocab_size": 27, "complexity": 5, "nloc": 15, "token_counts": 89, "n_ast_nodes": 144, "n_identifiers": 17, "random_cut": "def _ensure_iterable_column_indexer(self, column_indexer):\n \n ilocs: Sequence[int]\n if is_integer(column_indexer):\n ilocs = [column_indexer]\n elif isinstance(column_indexer, slice):\n ilocs = np.arange(len(self.obj.columns))[ # type: ignore[assignment]\n column_" }, { "id": 258373, "commit_id": "9ebf164cfdfb320503b7161493420c1b0ec577a3", "repo": "haystack", "path": "test/nodes/test_prompt_node.py", "file_name": "test_prompt_node.py", "fun_name": "test_run_invalid_template", "commit_message": "feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667)\n\nCo-authored-by: ZanSara ", "code": "def test_run_invalid_template(prompt_node):\n with pytest.raises(ValueError, match=\"invalid-task not supported\"):\n prompt_node.prompt(\"invalid-task\", {})\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 47, "n_identifiers": 7, "random_cut": "def test_run_invalid_template(prompt_node):\n with pytest.raises(" }, { "id": 212106, "commit_id": "fca16442ae90afcd2ac61f4e554e538776730831", "repo": "bokeh", "path": "tests/unit/bokeh/test_objects.py", "file_name": "test_objects.py", "fun_name": "test_get_class", "commit_message": "Redesign serialization protocol (#11960)\n\n* Redesign serialization in bokeh\r\n\r\n* Redesign deserialization in bokehjs\r\n\r\n* Resolve type issues and test failures\r\n\r\n* Make 'bytes' serialization work in bokeh\r\n\r\n* Partially update bokeh's serialization tests\r\n\r\n* Resolve issues with cyclic references\r\n\r\n* Don't limit StaticGraphProvider to tuples\r\n\r\n* Make FixedTicker.ticks' type more flexible\r\n\r\n* Use np.array instead of np.ndarray\r\n\r\n* Remove references to BokehJSONEncoder\r\n\r\n* Resolve sphinx warnings related to JSON\r\n\r\n* Implement hybrid serialization for map/dict\r\n\r\n* Use === or !== with unset symbol\r\n\r\n* Finalize deserialization of refs\r\n\r\n* Remove 'old' attribute from ModelChangedEvent\r\n\r\n* Make ButtonClick.__init__ less restrictive\r\n\r\n* Use Map in StaticLayoutProvider.graph_layout\r\n\r\n* Start using Map for non-string keys\r\n\r\n* Fix plotting/file/line_on_off example\r\n\r\n* Don't denormalize specs in bokehjs\r\n\r\n* Hack around issues with resolving figure model\r\n\r\n* Remove special cases from defaults' tests\r\n\r\n* Temporarily update unit/bokeh/test_objects\r\n\r\n* Promote streaming/patching events and remove hints\r\n\r\n* Allow to stream/patch any property in bokehjs\r\n\r\n* Drop unneeded Property.serializable_value()\r\n\r\n* Set callback_invoker on hinted events\r\n\r\n* Simplify unit/bokeh/test_objects.py\r\n\r\n* Always preserve ndarrays even for dtype=\"object\"\r\n\r\n* Refine and normalize naming conventions\r\n\r\n* Remove unused functions\r\n\r\n* Move Model.to_json() to sphinxext.bokeh_model\r\n\r\n* Include references in serialized values\r\n\r\n* Actually encode data when streaming/patching\r\n\r\n* Robustify differential serialization\r\n\r\n* Allow bokehjs to send binary buffers\r\n\r\n* Add dtype=object code path to ColorSpec\r\n\r\n* Simplify definitions of data specs\r\n\r\n* Remove meaningless code comments\r\n\r\n* Introduce Bytes and replace Base64String\r\n\r\n* Add support for serialization of slices\r\n\r\n* Remove obsolete comment from property/dataspec.py\r\n\r\n* Add a comment regarding ndarray.tobytes()\r\n\r\n* Try serializing pandas' types last\r\n\r\n* Standardize error reporting\r\n\r\n* Resturucture bokehjs serialization code\r\n\r\n* Redesign default model resolution\r\n\r\n* Refactor 'kind' in document events\r\n\r\n* Don't depend on Document in Deserializer\r\n\r\n* Make Deserializer.encode() re-entrant\r\n\r\n* Move *Buffer to serialization/buffer\r\n\r\n* Finalize differential serialization\r\n\r\n* Serialize vectorized values as structures\r\n\r\n* Rename Event.{decode_json->from_serializable}\r\n\r\n* Don't use has_ref() in Model.to_serializable()\r\n\r\n* Handle circular object references in bokehjs\r\n\r\n* Reorganize serialization unit tests\r\n\r\n* Redesign model registry and qualified names\r\n\r\n* Remove the need for StaticSerializer\r\n\r\n* Make 'attributes' optional in type reps\r\n\r\n* Allow to serialize typed arrays as binary\r\n\r\n* Finalize handling of binary buffers\r\n\r\n* Use memoryview to further defer encoding\r\n\r\n* Test dict serialization and ordering\r\n\r\n* Downcast ndarrays {u}int{64->32} if possible\r\n\r\n* Add preliminary release/migration notes\r\n\r\n* Robustify encoding of objects and object refs\r\n\r\n* Remove support for serialization of relativedelta\r\n\r\n* Import pandas only if really necessary\r\n\r\n* Statically type bokeh.core.serialization\r\n\r\n* Add preliminary serialization's documentation\r\n\r\n* Add Deserializer.deserialize() for symmetric APIs\r\n\r\n* Handle streaming/patching/data events in io.notebook\r\n\r\n* Update handling of buffers in io.notebook\r\n\r\n* Properly serialize MessageSent event\r\n\r\n* Add a regression test for issue #11694\r\n\r\n* Preserve order of inherited properties\r\n\r\n* Add support for serialization of symbols\r\n\r\n* Update defaults' tests to use type=\"object\"\r\n\r\n* Move DocJson.version to the first entry\r\n\r\n* Add a preliminary regression test for #11930\r\n\r\n* Fix integration/glyphs/rect_log_axis.py\r\n\r\n* Fix value detection in dataspecs involving String\r\n\r\n* Remove an unnecessary type assertion", "code": "def test_get_class(self) -> None:\n from bokeh.model import get_class\n self.mkclass()\n tclass = get_class('test_objects.TestModelCls.mkclass.Test_Class')\n assert hasattr(tclass, 'foo')\n with pytest.raises(KeyError):\n get_class('Imaginary_Class')\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 43, "n_ast_nodes": 78, "n_identifiers": 11, "random_cut": "def test_get_class(self) -> None:\n from bokeh.model import get_class\n self.mkclass()\n tclass = get_class('test_objects.TestModelCls.mkclass.Test_Class')\n assert hasattr(tclass, 'foo')\n with pytest.raises" }, { "id": 33104, "commit_id": "c72d7d91bf4899760725793421eff9da640c8527", "repo": "transformers", "path": "src/transformers/models/xglm/modeling_tf_xglm.py", "file_name": "modeling_tf_xglm.py", "fun_name": "_reorder_cache", "commit_message": "Add TF implementation of `XGLMModel` (#16543)\n\n* Add TFXGLM models \r\n\r\n* Add todo: self.supports_xla_generation = False\r\n\r\nCo-authored-by: Daniel Stancl \r\nCo-authored-by: Daniel Stancl \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Daniel \r\nCo-authored-by: Patrick von Platen ", "code": "def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),)\n return reordered_past\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 52, "n_words": 21, "vocab_size": 17, "complexity": 3, "nloc": 5, "token_counts": 42, "n_ast_nodes": 62, "n_identifiers": 10, "random_cut": "def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(tf.gather(past_sta" }, { "id": 113641, "commit_id": "d68c786ff81bad19c04619d6a999ff34aaa724e7", "repo": "nni", "path": "nni/compression/pytorch/quantization/observer_quantizer.py", "file_name": "observer_quantizer.py", "fun_name": "quantize_input", "commit_message": "[Compression] remove pruning v1 & refactor directory (#5228)", "code": "def quantize_input(self, inputs, wrapper, **kwargs):\n if self.compressed:\n module = wrapper.module\n inputs = self._quantize(inputs,\n module.input_scale,\n module.input_zero_point,\n module.input_qmin,\n module.input_qmax)\n else:\n self.record(wrapper, 'input', inputs)\n return inputs\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 224, "n_words": 23, "vocab_size": 21, "complexity": 2, "nloc": 11, "token_counts": 60, "n_ast_nodes": 89, "n_identifiers": 13, "random_cut": "def quantize_input(self, inputs, wrapper, **kwargs):\n if self.compressed:\n module = wrapper.module\n " }, { "id": 205126, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/postgresql/base.py", "file_name": "base.py", "fun_name": "psycopg2_version", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def psycopg2_version():\n version = psycopg2.__version__.split(\" \", 1)[0]\n return get_version_tuple(version)\n\n\nPSYCOPG2_VERSION = psycopg2_version()\n\nif PSYCOPG2_VERSION < (2, 8, 4):\n raise ImproperlyConfigured(\n \"psycopg2 version 2.8.4 or newer is required; you have %s\"\n % psycopg2.__version__\n )\n\n\n# Some of these import psycopg2, so import them after checking if it's installed.\nfrom .client import DatabaseClient # NOQA\nfrom .creation import DatabaseCreation # NOQA\nfrom .features import DatabaseFeatures # NOQA\nfrom .introspection import DatabaseIntrospection # NOQA\nfrom .operations import DatabaseOperations # NOQA\nfrom .schema import DatabaseSchemaEditor # NOQA\n\npsycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)\npsycopg2.extras.register_uuid()\n\n# Register support for inet[] manually so we don't have to handle the Inet()\n# object on load all the time.\nINETARRAY_OID = 1041\nINETARRAY = psycopg2.extensions.new_array_type(\n (INETARRAY_OID,),\n \"INETARRAY\",\n psycopg2.extensions.UNICODE,\n)\npsycopg2.extensions.register_type(INETARRAY)\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 141, "n_words": 118, "vocab_size": 83, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 233, "n_identifiers": 31, "random_cut": "def psycopg2_version():\n version = psycopg2.__version__.split(\" \", 1)[0]\n return get_version_tuple(version)\n\n\nPSYCOPG2_VERSION = psycopg2_version()\n\nif PSYCOPG2_VERSION < (2, 8, 4):\n raise ImproperlyConfigured(\n \"psycopg2 version 2.8.4 or newer is required; you have %s\"\n % psycopg2.__version__\n )\n\n\n# Some of these import psycopg2, so import them after checking if it's installed.\nfrom .client import DatabaseClient # NOQA\nfrom .creation import DatabaseCreation # NOQA\nfrom .features import DatabaseFeatures # NOQA\nfrom .introspection import DatabaseIntrospection # NOQA\nfrom .operations import DatabaseOperations # NOQA\nfrom .schema import DatabaseSchemaEditor # NOQA\n\npsycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)\npsycopg2.extras.register_uuid()\n\n# Register sup" }, { "id": 264225, "commit_id": "7421e5f7d7e579ed1a0acf840c39ae61fd851504", "repo": "netbox", "path": "netbox/utilities/forms/fields.py", "file_name": "fields.py", "fun_name": "to_python", "commit_message": "Fixes #8317: Fix CSV import of multi-select custom field values", "code": "def to_python(self, value):\n if not value:\n return []\n if not isinstance(value, str):\n raise forms.ValidationError(f\"Invalid value for a multiple choice field: {value}\")\n return value.split(',')\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 23, "vocab_size": 20, "complexity": 3, "nloc": 6, "token_counts": 38, "n_ast_nodes": 67, "n_identifiers": 8, "random_cut": "def to_python(self, value):\n if not value:\n return []\n if not isinstance(value, str):\n raise forms.Valid" }, { "id": 278003, "commit_id": "b1105dca17670dcac229271e63d5073fe445b84c", "repo": "keras", "path": "keras/distribute/dataset_creator_model_fit_test.py", "file_name": "dataset_creator_model_fit_test.py", "fun_name": "testModelPredict", "commit_message": "resolve line-too-long in distribute", "code": "def testModelPredict(self, strategy):\n _, predictions = self._model_predict(strategy, steps=3)\n # Check the first (0th index), fourth (3rd index) and the last\n # predictions because the first, fourth and the last input are the same\n # in `model.predict` so there predictions should match.\n self.assertTrue(\n all(predictions[0] == predictions[i] for i in [0, 3, 5])\n )\n\n self.assertFalse(\n all(predictions[0] == predictions[i] for i in [0, 1, 2, 4])\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 141, "n_words": 64, "vocab_size": 44, "complexity": 3, "nloc": 8, "token_counts": 77, "n_ast_nodes": 113, "n_identifiers": 11, "random_cut": "def testModelPredict(self, strategy):\n _, predictions = self._model_predict(strategy, steps=3)\n # Check the first (0th index), fourth (3rd index) and the l" }, { "id": 106522, "commit_id": "c91cbf60729af93c4677864aa6c8b74b576146ca", "repo": "youtube-dl", "path": "youtube_dl/extractor/neteasemusic.py", "file_name": "neteasemusic.py", "fun_name": "extract_formats", "commit_message": "[netease] Get netease music download url through player api (#31235)\n\n* remove unplayable song from test\r\n* compatible with python 2\r\n* using standard User_Agent, fix imports\r\n* use hash instead of long description\r\n* fix lint\r\n* fix hash", "code": "def extract_formats(self, info):\n formats = []\n song_id = info['id']\n for song_format in self._FORMATS:\n details = info.get(song_format)\n if not details:\n continue\n\n bitrate = int_or_none(details.get('bitrate')) or 999000\n data = self._call_player_api(song_id, bitrate)\n for song in try_get(data, lambda x: x['data'], list) or []:\n song_url = try_get(song, lambda x: x['url'])\n if self._is_valid_url(song_url, info['id'], 'song'):\n formats.append({\n 'url': song_url,\n 'ext': details.get('extension'),\n 'abr': float_or_none(song.get('br'), scale=1000),\n 'format_id': song_format,\n 'filesize': int_or_none(song.get('size')),\n 'asr': int_or_none(details.get('sr')),\n })\n return formats\n", "url": "https://github.com/ytdl-org/youtube-dl.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 369, "n_words": 66, "vocab_size": 54, "complexity": 7, "nloc": 21, "token_counts": 176, "n_ast_nodes": 296, "n_identifiers": 22, "random_cut": "def extract_formats(self, info):\n formats = []\n song_id = info['id']\n for song_format in self._FORMATS:\n details = info.get(song_format)\n if not details:\n continue\n\n bitrate = int_or_none(details.get('bitrate')) or 999000\n data = self._call_player_api(song_id, bitrate)\n for song in try_get(data, lambda x: x['data'], list) or []:\n song_url = try_get(song, lambda x: x['url'])\n if self._is_valid_url(song_url, info['id'], 'song'):\n formats.append({\n 'url': song_url,\n 'ext': details.get('extension'),\n 'abr': float_or_none(song.get('br'), scale=1000),\n " }, { "id": 266845, "commit_id": "0990c4ca7cb1b239a76e8cdb78af01ca9601731e", "repo": "ansible", "path": "test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py", "file_name": "main.py", "fun_name": "_check_for_new_args", "commit_message": "Extend validate-modules to also validate plugins (#71734)\n\n* Let validate-modules also validate plugins.\r\n\r\n* Support 'option' in 'cli'.\r\n\r\n* Use DOCUMENTABLE_PLUGINS instead of UNDOCUMENTED_PLUGIN_TYPES.\r\n\r\n* Support 'keyword', clean up error codes.\r\n\r\n* Call settings.process_errors only once; remove __version__.\r\n\r\n* Add changelog fragment.", "code": "def _check_for_new_args(self, doc):\n if not self.base_branch or self._is_new_module():\n return\n\n with CaptureStd():\n try:\n existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(\n self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name,\n is_module=self.plugin_type == 'module')\n existing_options = existing_doc.get('options', {}) or {}\n except AssertionError:\n fragment = doc['extends_documentation_fragment']\n self.reporter.warning(\n path=self.object_path,\n code='missing-existing-doc-fragment',\n msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment\n )\n return\n except Exception as e:\n self.reporter.warning_trace(\n path=self.object_path,\n tracebk=e\n )\n self.reporter.warning(\n path=self.object_path,\n code='unknown-doc-fragment',\n msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')\n )\n return\n\n try:\n mod_collection_name = existing_doc.get('version_added_collection')\n mod_version_added = self._create_strict_version(\n str(existing_doc.get('version_added', '0.0')),\n collection_name=mod_collection_name)\n except ValueError:\n mod_collection_name = self.collection_name\n mod_version_added = self._create_strict_version('0.0')\n\n options = doc.get('options', {}) or {}\n\n should_be = '.'.join(ansible_version.split('.')[:2])\n strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')\n\n for option, details in options.items():\n try:\n names = [option] + details.get('aliases', [])\n except (TypeError, AttributeError):\n # Reporting of this syntax error will be handled by schema validation.\n continue\n\n if any(name in existing_options for name in names):\n # The option already existed. Make sure version_added didn't change.\n for name in names:\n existing_collection_name = existing_options.get(name, {}).get('version_added_collection')\n existing_version = existing_options.get(name, {}).get('version_added')\n if existing_version:\n break\n current_collection_name = details.get('version_added_collection')\n current_version = details.get('version_added')\n if current_collection_name != existing_collection_name:\n self.reporter.error(\n path=self.object_path,\n code='option-incorrect-version-added-collection',\n msg=('version_added for existing option (%s) should '\n 'belong to collection %r. Currently belongs to %r' %\n (option, current_collection_name, existing_collection_name))\n )\n elif str(current_version) != str(existing_version):\n self.reporter.error(\n path=self.object_path,\n code='option-incorrect-version-added',\n msg=('version_added for existing option (%s) should '\n 'be %r. Currently %r' %\n (option, existing_version, current_version))\n )\n continue\n\n try:\n collection_name = details.get('version_added_collection')\n version_added = self._create_strict_version(\n str(details.get('version_added', '0.0')),\n collection_name=collection_name)\n except ValueError as e:\n # already reported during schema validation\n continue\n\n if collection_name != self.collection_name:\n continue\n if (strict_ansible_version != mod_version_added and\n (version_added < strict_ansible_version or\n strict_ansible_version < version_added)):\n self.reporter.error(\n path=self.object_path,\n code='option-incorrect-version-added',\n msg=('version_added for new option (%s) should '\n 'be %r. Currently %r' %\n (option, should_be, version_added))\n )\n\n return existing_doc\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1735, "n_words": 280, "vocab_size": 167, "complexity": 21, "nloc": 89, "token_counts": 522, "n_ast_nodes": 868, "n_identifiers": 56, "random_cut": "def _check_for_new_args(self, doc):\n if not self.base_branch or self._is_new_module():\n return\n\n with CaptureStd():\n try:\n existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(\n self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name,\n is_module=self.plugin_type == 'module')\n existing_options = existing_doc.get('options', {}) or {}\n except AssertionError:\n fragment = doc['extends_documentation_fragment']\n self.reporter.warning(\n path=self.object_path,\n code='missing-existing-doc-fragment',\n msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment\n )\n return\n except Exception as e:\n self.reporter.warning_trace(\n path=self.object_path,\n tracebk=e\n )\n self.reporter.warning(\n path=self.object_path,\n code='unknown-doc-fragment',\n msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')\n )\n return\n\n try:\n mod_collection_name = existing_doc.get('version_added_collection')\n mod_version_added = self._create_strict_version(\n str(existing_doc.get('version_added', '0.0')),\n collection_name=mod_collection_name)\n except ValueError:\n mod_collection_name = self.collection_name\n mod_version_added = self._create_strict_version('0.0')\n\n options = doc.get('options', {}) or {}\n\n should_be = '.'.join(ansible_version.split('.')[:2])\n strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')\n\n for option, details in options.items():\n try:\n names = [option] + details.get('aliases', [])\n except (TypeError, AttributeError):\n # Reporting of this syntax error will be handled by schema validation.\n continue\n\n if any(name in existing_options for name in names):\n # The option already existed. Make sure versio" }, { "id": 145008, "commit_id": "331b71ea8dfee20bd71f6529fa372fd9d91c9ff4", "repo": "ray", "path": "release/ray_release/tests/test_cluster_manager.py", "file_name": "test_cluster_manager.py", "fun_name": "testFindCreateClusterEnvExisting", "commit_message": "[ci/release] Refactor release test e2e into package (#22351)\n\nAdds a unit-tested and restructured ray_release package for running release tests.\r\n\r\nRelevant changes in behavior:\r\n\r\nPer default, Buildkite will wait for the wheels of the current commit to be available. Alternatively, users can a) specify a different commit hash, b) a wheels URL (which we will also wait for to be available) or c) specify a branch (or user/branch combination), in which case the latest available wheels will be used (e.g. if master is passed, behavior matches old default behavior).\r\n\r\nThe main subpackages are:\r\n\r\n Cluster manager: Creates cluster envs/computes, starts cluster, terminates cluster\r\n Command runner: Runs commands, e.g. as client command or sdk command\r\n File manager: Uploads/downloads files to/from session\r\n Reporter: Reports results (e.g. to database)\r\n\r\nMuch of the code base is unit tested, but there are probably some pieces missing.\r\n\r\nExample build (waited for wheels to be built): https://buildkite.com/ray-project/kf-dev/builds/51#_\r\nWheel build: https://buildkite.com/ray-project/ray-builders-branch/builds/6023", "code": "def testFindCreateClusterEnvExisting(self):\n # Find existing env and succeed\n self.cluster_manager.set_cluster_env(self.cluster_env)\n self.assertTrue(self.cluster_manager.cluster_env_name)\n self.assertFalse(self.cluster_manager.cluster_env_id)\n\n self.sdk.returns[\"search_cluster_environments\"] = APIDict(\n metadata=APIDict(\n next_paging_token=None,\n ),\n results=[\n APIDict(\n name=\"no_match\",\n id=\"wrong\",\n ),\n APIDict(name=self.cluster_manager.cluster_env_name, id=\"correct\"),\n ],\n )\n self.cluster_manager.create_cluster_env()\n self.assertEqual(self.cluster_manager.cluster_env_id, \"correct\")\n self.assertEqual(self.sdk.call_counter[\"search_cluster_environments\"], 1)\n self.assertEqual(len(self.sdk.call_counter), 1)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 244, "n_words": 33, "vocab_size": 30, "complexity": 1, "nloc": 20, "token_counts": 138, "n_ast_nodes": 222, "n_identifiers": 21, "random_cut": "def testFindCreateClusterEnvExisting(self):\n # Find existing env and succeed\n self.cluster_manager.set_cluster_env(self.cluster_env)\n self.assertTrue(self.cluster_manager.cluster_env_name)\n self.assertFalse(self.cluster_manager.cluster_env_id)\n\n self.sdk.returns[\"search_cluster_environments\"] = APIDict(\n metadata=APIDict(\n next_paging_token=None,\n ),\n results=[\n APIDict(\n name=\"no_match\",\n id=\"wrong\",\n ),\n APIDict(name=self.cluster_manager.cluster_env_name, id=\"correct\"),\n " }, { "id": 200602, "commit_id": "6fe28f68866ac6fb1aea564dbde99190cec9c1ff", "repo": "sympy", "path": "sympy/algebras/tests/test_quaternion.py", "file_name": "test_quaternion.py", "fun_name": "test_to_euler", "commit_message": "minor edit", "code": "def test_to_euler():\n q = Quaternion(w, x, y, z)\n norm_of_q = Quaternion(q.norm())\n\n # Extrinsic rotations\n for seq_tuple in permutations('xyz'):\n # asymmetric sequences\n seq = ''.join(seq_tuple)\n euler_from_q = q.to_euler(seq)\n q_back = Quaternion.from_euler(euler_from_q, seq)\n q_diff = simplify(q * q_back.conjugate())\n assert q_diff == norm_of_q\n\n # symmetric sequences\n seq = ''.join([seq_tuple[0], seq_tuple[1], seq_tuple[0]])\n euler_from_q = q.to_euler(seq)\n q_back = Quaternion.from_euler(euler_from_q, seq)\n q_diff = simplify(q * q_back.conjugate())\n assert q_diff == norm_of_q\n\n # Intrinsic rotations\n for seq_tuple in permutations('XYZ'):\n # asymmetric sequences\n seq = ''.join(seq_tuple)\n euler_from_q = q.to_euler(seq)\n q_back = Quaternion.from_euler(euler_from_q, seq)\n q_diff = simplify(q * q_back.conjugate())\n assert q_diff == norm_of_q\n\n # symmetric sequences\n seq = ''.join([seq_tuple[0], seq_tuple[1], seq_tuple[0]])\n euler_from_q = q.to_euler(seq)\n q_back = Quaternion.from_euler(euler_from_q, seq)\n q_diff = simplify(q * q_back.conjugate())\n assert q_diff == norm_of_q\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 302, "n_words": 117, "vocab_size": 38, "complexity": 3, "nloc": 25, "token_counts": 240, "n_ast_nodes": 389, "n_identifiers": 20, "random_cut": "def test_to_euler():\n q = Quaternion(w, x, y, z)\n norm_of_q = Quaternion(q.norm())\n\n # Extrinsic rotations\n for seq_tuple in permutations('xyz'):\n # asymmetric sequences\n seq = ''.join(seq_tuple)\n euler_from_q = q.to_euler(seq)\n q_back = Quaternion.from_euler(euler_from_q, seq)\n q_diff = simplify(q * q_back.conjugate())\n assert q_diff == norm_of_q\n\n # symmetric sequences\n seq = ''.join([seq_tup" }, { "id": 247489, "commit_id": "e10a2fe0c28ec9206c0e2275df492f61ff5025f2", "repo": "synapse", "path": "tests/handlers/test_auth.py", "file_name": "test_auth.py", "fun_name": "test_short_term_login_token_gives_user_id", "commit_message": "Add some type hints to the tests.handlers module. (#12207)", "code": "def test_short_term_login_token_gives_user_id(self) -> None:\n token = self.macaroon_generator.generate_short_term_login_token(\n self.user1, \"\", duration_in_ms=5000\n )\n res = self.get_success(self.auth_handler.validate_short_term_login_token(token))\n self.assertEqual(self.user1, res.user_id)\n self.assertEqual(\"\", res.auth_provider_id)\n\n # when we advance the clock, the token should be rejected\n self.reactor.advance(6)\n self.get_failure(\n self.auth_handler.validate_short_term_login_token(token),\n AuthError,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 129, "n_words": 34, "vocab_size": 30, "complexity": 1, "nloc": 12, "token_counts": 86, "n_ast_nodes": 137, "n_identifiers": 18, "random_cut": "def test_short_term_login_token_gives_user_id(self) -> None:\n token = self.macaroon_generator.generate_short_term_login_token(\n self.user1, \"\", duration_in_ms=5000\n )\n res = self.get_success(self.auth_handler.validate_short_term_login_token(token))\n self.assertEqual(self.user1, res.user_id)\n self.assertEqual(\"\", res.auth_provider_id)\n\n # when we advance the clock, the token should be rejected\n self.reactor.advance(6)\n self.get_failure(\n self.auth_handler.validate_short_term_login_token(token),\n AuthError,\n )\n" }, { "id": 13972, "commit_id": "87912a37ce7ab3c3b63c12b48d6cdfe31f81742c", "repo": "jina", "path": "tests/unit/serve/runtimes/worker/test_worker_runtime.py", "file_name": "test_worker_runtime.py", "fun_name": "test_worker_runtime_reflection", "commit_message": "fix: list-like args passed as string (#5464)\n\nCo-authored-by: Alaeddine Abdessalem ", "code": "async def test_worker_runtime_reflection():\n args = _generate_pod_args()\n\n cancel_event = multiprocessing.Event()\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 14, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 27, "token_counts": 125, "n_ast_nodes": 30, "n_identifiers": 6, "random_cut": "async def test_worker_runtime_reflection():\n args = _generate_pod_args()\n\n cancel_event = multiprocessing.E" }, { "id": 179000, "commit_id": "c6b19fa56bbd6d14728f152e92b9001dc76dd550", "repo": "Nuitka", "path": "nuitka/utils/Download.py", "file_name": "Download.py", "fun_name": "getCachedDownloadedMinGW64", "commit_message": "Windows: Updated MinGW64 compiler to be used", "code": "def getCachedDownloadedMinGW64(target_arch, assume_yes_for_downloads):\n # Large URLs, pylint: disable=line-too-long\n\n if target_arch == \"x86_64\":\n url = \"https://github.com/brechtsanders/winlibs_mingw/releases/download/11.2.0-14.0.0-9.0.0-msvcrt-r7/winlibs-x86_64-posix-seh-gcc-11.2.0-llvm-14.0.0-mingw-w64msvcrt-9.0.0-r7.zip\"\n binary = r\"mingw64\\bin\\gcc.exe\"\n else:\n url = \"https://github.com/brechtsanders/winlibs_mingw/releases/download/11.2.0-14.0.0-9.0.0-msvcrt-r7/winlibs-i686-posix-dwarf-gcc-11.2.0-llvm-14.0.0-mingw-w64msvcrt-9.0.0-r7.zip\"\n binary = r\"mingw32\\bin\\gcc.exe\"\n\n gcc_binary = getCachedDownload(\n url=url,\n is_arch_specific=target_arch,\n specificity=url.rsplit(\"/\", 2)[1],\n binary=binary,\n flatten=False,\n message=\"Nuitka will use gcc from MinGW64 of winlibs to compile on Windows.\",\n reject=\"Only this specific gcc is supported with Nuitka.\",\n assume_yes_for_downloads=assume_yes_for_downloads,\n )\n\n return gcc_binary\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 159, "n_words": 58, "vocab_size": 50, "complexity": 2, "nloc": 18, "token_counts": 77, "n_ast_nodes": 126, "n_identifiers": 13, "random_cut": "def getCachedDownloadedMinGW64(target_arch, assume_yes_for_downloads):\n # Large URLs, pylint: disable=line-too-long\n\n if target_arch == \"x86_64\":\n url = \"https://github.com/brechtsanders/winlibs_mingw/releases/download/11.2.0-14.0.0-9.0.0-msvcrt-r7/winlibs-x86_64-posix-seh-gcc-11.2.0-llvm-14.0.0-mingw-w64msvcrt-9.0.0-r7.zip\"\n binary = r\"mingw64\\bin\\gcc.exe\"\n else:\n url = \"https://github.com/brechtsanders/winlibs_mingw/releases/download/11.2.0-14.0.0-9.0.0-msvcrt-r7/winlibs-i686-posix-dwarf-gcc-11.2.0-llvm-14.0.0-mingw-w64msvcrt-9.0.0-r7.zip\"\n binary = r\"mingw32\\bin\\gcc.exe\"\n\n gcc_binary = getCachedDownload(\n url=url,\n is_arch_specific=target_arch,\n " }, { "id": 96421, "commit_id": "146fba432a32568be7d0b884dae0c39a6c33a11f", "repo": "sentry", "path": "tests/sentry/incidents/test_subscription_processor.py", "file_name": "test_subscription_processor.py", "fun_name": "test_multiple_triggers", "commit_message": "fix(metric_alerts): Make sure critical triggers resolve properly when no action is set on a warning trigger (#31883)\n\n### Problem\r\nIf we have an alert set up like:\r\n- Warning: 50. Action: None\r\n- Critical: 100. Action: Slack\r\n\r\nThen if we go from critical -> warning state the slack resolve action will fail to fire.\r\n\r\n### Cause\r\nThe reason this happens is related to a previous fix. For an alert like\r\n- Warning: 50. Action: Slack\r\n- Critical: 100. Action: Slack\r\n\r\nWhen going from critical -> warning the critical action would be marked as resolved. This would\r\ncause a slack notification with `Resolved` to be sent to the channel. This is misleading, because\r\nthe alert is still active, just in the warning state. What we want here is to fire a warning\r\nnotification instead.\r\n\r\nThe initial fix for this was that when we resolved a critical trigger, we’d check and see whether\r\nthere was an active warning trigger. If so, we’d send a warning trigger fire to our actions, rather\r\nthan a critical trigger resolve. This works ok for many cases, but fails when the actions on the\r\nwarning trigger are different to those on the critical trigger.\r\n\r\n### Fix\r\nSubstituting the warning trigger for the critical trigger causes us subtle bugs. So, instead of\r\nthis, when triggering fires/resolves on our action handlers we will also pass along the incident\r\nstate change that the trigger/resolve caused the incident to go into.\r\n\r\nSo if a critical trigger resolves, we check what state it would have put the incident in. If\r\nthere’s a warning trigger, then the state is warning. If no warning trigger, the state is closed.\r\nThis state is then used to appropriately generate the messages that we send to users via our\r\nvarious actions.\r\n\r\nSo now, If we have an alert set up like:\r\n- Warning: 50. Action: None\r\n- Critical: 100. Action: Slack\r\n\r\nIf this goes from\r\n- critical -> warning OR critical -> resolved we will send `IncidentStatus.WARNING` to any actions\r\nrelated to the critical trigger. \r\n- warning -> resolved We do nothing since there are no actions on the warning trigger\r\n\r\nIf we have an alert set up like:\r\n- Warning: 50. Action: Slack\r\n- Critical: 100. Action: Slack\r\n\r\nIf this goes from:\r\n- critical -> warning: critical trigger, `IncidentStatus.Warning`\r\n- warning -> resolved: warning trigger, `IncidentStatus.Closed`\r\n- critical -> resolved: Since we de-dupe triggers to avoid spamming the user, we will select the\r\nwarning trigger here, and send `IncidentStatus.closed`\r\n\r\nIf we have an alert set up like:\r\n- Warning: 50. Action: Slack\r\n- Critical: 100. Action: Pagerduty\r\n\r\nIf this goes from:\r\n- critical -> warning: critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Nothing sent\r\nto Slack\r\n- warning -> resolved: warning trigger, `IncidentStatus.Closed` sent to Slack. Nothing sent to\r\nPagerduty\r\n- critical -> resolved: Critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Warning\r\ntrigger, `IncidentStatus.Closed` sent to Slack. We don’t de-dupe here since the actions are\r\ndifferent.", "code": "def test_multiple_triggers(self):\n rule = self.rule\n rule.update(threshold_period=1)\n trigger = self.trigger\n warning_trigger = create_alert_rule_trigger(\n self.rule, WARNING_TRIGGER_LABEL, trigger.alert_threshold - 20\n )\n warning_action = create_alert_rule_trigger_action(\n warning_trigger,\n AlertRuleTriggerAction.Type.EMAIL,\n AlertRuleTriggerAction.TargetType.USER,\n str(self.user.id),\n )\n processor = self.send_update(\n rule, warning_trigger.alert_threshold + 1, timedelta(minutes=-10), subscription=self.sub\n )\n self.assert_trigger_counts(processor, warning_trigger, 0, 0)\n self.assert_trigger_counts(processor, trigger, 0, 0)\n incident = self.assert_active_incident(rule, self.sub)\n self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.ACTIVE)\n self.assert_trigger_does_not_exist(trigger)\n self.assert_actions_fired_for_incident(\n incident,\n [warning_action],\n [(warning_trigger.alert_threshold + 1, IncidentStatus.WARNING)],\n )\n\n processor = self.send_update(\n rule, trigger.alert_threshold + 1, timedelta(minutes=-9), subscription=self.sub\n )\n self.assert_trigger_counts(processor, trigger, 0, 0)\n self.assert_trigger_counts(processor, warning_trigger, 0, 0)\n incident = self.assert_active_incident(rule, self.sub)\n self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.ACTIVE)\n self.assert_trigger_exists_with_status(incident, trigger, TriggerStatus.ACTIVE)\n self.assert_actions_fired_for_incident(\n incident, [self.action], [(trigger.alert_threshold + 1, IncidentStatus.CRITICAL)]\n )\n\n processor = self.send_update(\n rule, trigger.alert_threshold - 1, timedelta(minutes=-7), subscription=self.sub\n )\n self.assert_trigger_counts(processor, trigger, 0, 0)\n self.assert_trigger_counts(processor, warning_trigger, 0, 0)\n incident = self.assert_active_incident(rule, self.sub)\n self.assert_trigger_exists_with_status(incident, trigger, TriggerStatus.RESOLVED)\n self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.ACTIVE)\n self.assert_actions_resolved_for_incident(\n incident, [self.action], [(trigger.alert_threshold - 1, IncidentStatus.WARNING)]\n )\n\n processor = self.send_update(\n rule, rule.resolve_threshold - 1, timedelta(minutes=-6), subscription=self.sub\n )\n self.assert_trigger_counts(processor, trigger, 0, 0)\n self.assert_trigger_counts(processor, warning_trigger, 0, 0)\n self.assert_no_active_incident(rule, self.sub)\n self.assert_trigger_exists_with_status(incident, trigger, TriggerStatus.RESOLVED)\n self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.RESOLVED)\n self.assert_actions_resolved_for_incident(\n incident, [warning_action], [(rule.resolve_threshold - 1, IncidentStatus.CLOSED)]\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 631, "n_words": 166, "vocab_size": 58, "complexity": 1, "nloc": 59, "token_counts": 512, "n_ast_nodes": 734, "n_identifiers": 43, "random_cut": "def test_multiple_triggers(self):\n rule = self.rule\n rule.update(threshold_period=1)\n trigger = self.trigger\n warning_trigger = create_alert_rule_trigger(\n self.rule, WARNING_TRIGGER_LABEL, trigger.alert_threshold - 20\n )\n warning_action = create_alert_rule_trigger_action(\n warning_trigger,\n AlertRuleTriggerAction.Type.EMAIL,\n AlertRuleTriggerAction.TargetType.USER,\n str(self.user.id),\n )\n processor = self.send_update(\n rule, warning_trigger.alert_threshold + 1, timedelta(minutes=-10), subscription=self.sub\n )\n self.assert_trigger_counts(processor, warning_trigger, 0, 0)\n self.assert_trigger_counts(processor, trigger, 0, 0)\n incident = self.assert_active_incident(rule, self.sub)\n self.assert_trigger_exists_with_status(incident, warning_trigger, TriggerStatus.ACTIVE)\n self.assert_trigger_does_not_exist(trigger)\n self.assert_actions_fired_for_incident(\n incident,\n [warning_action],\n [(warning_trigger.alert_threshold + 1, IncidentStatus.WARNING)],\n )\n\n processor = self.send_update(\n rule, trigger.alert_threshold + 1, timedelta(minutes=-9), subscription=self.sub\n )\n self.assert_trigger_counts(processor, trigger, 0, 0)\n self.assert_trigger_counts(processor, warning_trigger, 0, 0)\n incident = self.assert_active_incident(rule, self.sub)\n self.assert_trigger_exists_with_status(incident," }, { "id": 105930, "commit_id": "2945690ea731f85a356220a71cdc630281c676f4", "repo": "datasets", "path": "tests/test_builder.py", "file_name": "test_builder.py", "fun_name": "test_generator_based_builder_download_and_prepare_as_parquet", "commit_message": "Multiprocessed dataset builder [WIP] (#5107)\n\n* multiprocessing-compatible naming scheme and refactor\r\n\r\n* multiprocessed shard writing for GeneratorBasedBuilder\r\n\r\n* multiprocessed shard writing for ArrowBasedBuilder\r\n\r\n* style\r\n\r\n* multiprocessed dataset loading\r\n\r\n* compatibility with non-sharded datasets\r\n\r\n* bugfix\r\n\r\n* bugfix\r\n\r\n* removed unused import\r\n\r\n* fixed bad ordering\r\n\r\n* less misleading tqdm\r\n\r\n* fix gen_kwargs distribution + read shards\r\n\r\n* minor\r\n\r\n* minor2\r\n\r\n* support beam datasets\r\n\r\n* docstrings + minor\r\n\r\n* add iflatmap_unordered for parallel write & progress updates\r\n\r\n* use 1 tqdm bar receiving updates from subprocesses\r\n\r\n* docs\r\n\r\n* add test_iflatmap_unordered\r\n\r\n* style\r\n\r\n* test arrow_reader.py\r\n\r\n* fix test_iflatmap_unordered\r\n\r\n* add Beam test_download_and_prepare_sharded\r\n\r\n* test gen_kwargs distribution\r\n\r\n* test download_and_prepare with num_proc\r\n\r\n* style\r\n\r\n* improve test\r\n\r\n* don't close the pool\r\n\r\n* fix multiprocessing on windows\r\n\r\n* keep multiprocessing disabled by default\r\n\r\n* again + docs\r\n\r\n* more docs\r\n\r\n* more docs\r\n\r\n* some var renaming\r\n\r\n* style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\n* added utils/sharding.py\r\n\r\n* style\r\n\r\n* style\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Mario Šaško ", "code": "def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path):\n builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path)\n builder.download_and_prepare(file_format=\"parquet\")\n assert builder.info.splits[\"train\"].num_examples == 100\n parquet_path = os.path.join(tmp_path, builder.name, \"default\", \"0.0.0\", f\"{builder.name}-train.parquet\")\n assert os.path.exists(parquet_path)\n assert pq.ParquetFile(parquet_path) is not None\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 41, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 74, "n_ast_nodes": 127, "n_identifiers": 18, "random_cut": "def test_generator_based_builder_download_and_prepare_as_parquet(tmp_path):\n builder = DummyGeneratorBasedBuilder(cache_dir=tmp_path)\n builder.download_and_prepare(file_format=\"parquet\")\n assert builder.info.splits[\"train\"].num_examples == 100\n parquet_path = os.path.join(tmp_path, builder.name, \"default\", \"0.0.0\", f\"{builder.name}-train.parquet\")\n " }, { "id": 274898, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/mixed_precision/autocast_variable.py", "file_name": "autocast_variable.py", "fun_name": "scatter_sub", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n return self._apply_update(\n self._variable.scatter_sub, sparse_delta, use_locking, name\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 36, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 44, "n_identifiers": 7, "random_cut": "def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n return self._apply_update(\n self" }, { "id": 102289, "commit_id": "baeca11a21e285d66ec3e4103c29dfd0b0245b85", "repo": "pytorch", "path": "test/test_linalg.py", "file_name": "test_linalg.py", "fun_name": "test_inverse", "commit_message": "Remove random_fullrank_matrix_distinc_singular_value (#68183)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/68183\n\nWe do so in favour of\n`make_fullrank_matrices_with_distinct_singular_values` as this latter\none not only has an even longer name, but also generates inputs\ncorrectly for them to work with the PR that tests noncontig inputs\nlatter in this stack.\n\nWe also heavily simplified the generation of samples for the SVD, as it was\nfairly convoluted and it was not generating the inputs correclty for\nthe noncontiguous test.\n\nTo do the transition, we also needed to fix the following issue, as it was popping\nup in the tests:\n\nFixes https://github.com/pytorch/pytorch/issues/66856\n\ncc jianyuh nikitaved pearu mruberry walterddr IvanYashchuk xwang233 Lezcano\n\nTest Plan: Imported from OSS\n\nReviewed By: ngimel\n\nDifferential Revision: D32684853\n\nPulled By: mruberry\n\nfbshipit-source-id: e88189c8b67dbf592eccdabaf2aa6d2e2f7b95a4", "code": "def test_inverse(self, device, dtype):\n make_fullrank = make_fullrank_matrices_with_distinct_singular_values\n make_arg = partial(make_fullrank, device=device, dtype=dtype)\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 12, "vocab_size": 11, "complexity": 4, "nloc": 20, "token_counts": 175, "n_ast_nodes": 38, "n_identifiers": 8, "random_cut": "def test_inverse(self, device, dtype):\n make_fullrank = make_fullrank_matrices_with_distinct_singular_value" }, { "id": 264783, "commit_id": "82706eb3a68e963d7ac089478788b87892d4ee79", "repo": "netbox", "path": "netbox/dcim/models/cables.py", "file_name": "cables.py", "fun_name": "save", "commit_message": "Migrate CablePath to use two-dimensional array", "code": "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n # Save the flattened nodes list\n self._nodes = flatten_path(self.path)\n\n # TODO\n # Record a direct reference to this CablePath on its originating object\n # model = self.origin._meta.model\n # model.objects.filter(pk=self.origin.pk).update(_path=self.pk)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 83, "n_words": 35, "vocab_size": 30, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 58, "n_identifiers": 8, "random_cut": "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n # Save the flattened nodes list\n self._nodes = flatten_path(self.path)\n\n " }, { "id": 313748, "commit_id": "576de9ac4052c90b8737e41110d05f06f41d000e", "repo": "core", "path": "homeassistant/components/wallbox/number.py", "file_name": "number.py", "fun_name": "max_value", "commit_message": "Migrate NumberEntity u-z to native_value (#73488)", "code": "def native_max_value(self) -> float:\n \n return cast(float, self._coordinator.data[CHARGER_MAX_AVAILABLE_POWER_KEY])\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "def native_max_value(self) -> float:\n \n return cast(float," }, { "id": 133014, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/collective/collective_group/nccl_util.py", "file_name": "nccl_util.py", "fun_name": "get_tensor_n_elements", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_tensor_n_elements(tensor):\n \n if isinstance(tensor, cupy.ndarray) or isinstance(tensor, numpy.ndarray):\n return tensor.size\n if torch_available():\n if isinstance(tensor, torch.Tensor):\n return torch.numel(tensor)\n raise ValueError(\n \"Unsupported tensor type. Got: {}. Supported \"\n \"GPU tensor types are: torch.Tensor, \"\n \"cupy.ndarray.\".format(type(tensor))\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 95, "n_words": 34, "vocab_size": 27, "complexity": 5, "nloc": 11, "token_counts": 66, "n_ast_nodes": 112, "n_identifiers": 14, "random_cut": "def get_tensor_n_elements(tensor):\n \n if isinstance(tensor, cupy.ndarray) or isinstance(tensor, numpy.ndarray):\n return tensor.size\n if torch_available():\n if isinstance(tensor, torch.Tensor):\n return torch.numel(tensor)\n raise ValueError(\n \"Unsupported tensor type. Got: {}. Supported \"\n \"GPU tensor types are: torch.Tensor, \"\n " }, { "id": 261655, "commit_id": "40d7d880eddaf3a9a5e37ba2a8206caf22744926", "repo": "scikit-learn", "path": "examples/compose/plot_transformed_target.py", "file_name": "plot_transformed_target.py", "fun_name": "compute_score", "commit_message": "FEA add PredictionErrorDisplay (#18020)\n\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Christian Lorentzen ", "code": "def compute_score(y_true, y_pred):\n return {\n \"R2\": f\"{r2_score(y_true, y_pred):.3f}\",\n \"MedAE\": f\"{median_absolute_error(y_true, y_pred):.3f}\",\n }\n\n\n# %%\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.metrics import PredictionErrorDisplay\n\nf, (ax0, ax1) = plt.subplots(1, 2, sharey=True)\n\nridge_cv = RidgeCV().fit(X_train, y_train)\ny_pred_ridge = ridge_cv.predict(X_test)\n\nridge_cv_with_trans_target = TransformedTargetRegressor(\n regressor=RidgeCV(), func=np.log1p, inverse_func=np.expm1\n).fit(X_train, y_train)\ny_pred_ridge_with_trans_target = ridge_cv_with_trans_target.predict(X_test)\n\nPredictionErrorDisplay.from_predictions(\n y_test,\n y_pred_ridge,\n kind=\"actual_vs_predicted\",\n ax=ax0,\n scatter_kwargs={\"alpha\": 0.5},\n)\nPredictionErrorDisplay.from_predictions(\n y_test,\n y_pred_ridge_with_trans_target,\n kind=\"actual_vs_predicted\",\n ax=ax1,\n scatter_kwargs={\"alpha\": 0.5},\n)\n\n# Add the score in the legend of each axis\nfor ax, y_pred in zip([ax0, ax1], [y_pred_ridge, y_pred_ridge_with_trans_target]):\n for name, score in compute_score(y_test, y_pred).items():\n ax.plot([], [], \" \", label=f\"{name}={score}\")\n ax.legend(loc=\"upper left\")\n\nax0.set_title(\"Ridge regression \\n without target transformation\")\nax1.set_title(\"Ridge regression \\n with target transformation\")\nf.suptitle(\"Synthetic data\", y=1.05)\nplt.tight_layout()\n\n# %%\n# Real-world data set\n#####################\n#\n# In a similar manner, the Ames housing data set is used to show the impact\n# of transforming the targets before learning a model. In this example, the\n# target to be predicted is the selling price of each house.\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.preprocessing import quantile_transform\n\names = fetch_openml(name=\"house_prices\", as_frame=True, parser=\"pandas\")\n# Keep only numeric columns\nX = ames.data.select_dtypes(np.number)\n# Remove columns with NaN or Inf values\nX = X.drop(columns=[\"LotFrontage\", \"GarageYrBlt\", \"MasVnrArea\"])\n# Let the price be in k$\ny = ames.target / 1000\ny_trans = quantile_transform(\n y.to_frame(), n_quantiles=900, output_distribution=\"normal\", copy=True\n).squeeze()\n\n# %%\n# A :class:`~sklearn.preprocessing.QuantileTransformer` is used to normalize\n# the target distribution before applying a\n# :class:`~sklearn.linear_model.RidgeCV` model.\nf, (ax0, ax1) = plt.subplots(1, 2)\n\nax0.hist(y, bins=100, density=True)\nax0.set_ylabel(\"Probability\")\nax0.set_xlabel(\"Target\")\nax0.set_title(\"Target distribution\")\n\nax1.hist(y_trans, bins=100, density=True)\nax1.set_ylabel(\"Probability\")\nax1.set_xlabel(\"Target\")\nax1.set_title(\"Transformed target distribution\")\n\nf.suptitle(\"Ames housing data: selling price\", y=1.05)\nplt.tight_layout()\n\n# %%\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\n\n# %%\n# The effect of the transformer is weaker than on the synthetic data. However,\n# the transformation results in an increase in :math:`R^2` and large decrease\n# of the MedAE. The residual plot (predicted target - true target vs predicted\n# target) without target transformation takes on a curved, 'reverse smile'\n# shape due to residual values that vary depending on the value of predicted\n# target. With target transformation, the shape is more linear indicating\n# better model fit.\nfrom sklearn.preprocessing import QuantileTransformer\n\nf, (ax0, ax1) = plt.subplots(2, 2, sharey=\"row\", figsize=(6.5, 8))\n\nridge_cv = RidgeCV().fit(X_train, y_train)\ny_pred_ridge = ridge_cv.predict(X_test)\n\nridge_cv_with_trans_target = TransformedTargetRegressor(\n regressor=RidgeCV(),\n transformer=QuantileTransformer(n_quantiles=900, output_distribution=\"normal\"),\n).fit(X_train, y_train)\ny_pred_ridge_with_trans_target = ridge_cv_with_trans_target.predict(X_test)\n\n# plot the actual vs predicted values\nPredictionErrorDisplay.from_predictions(\n y_test,\n y_pred_ridge,\n kind=\"actual_vs_predicted\",\n ax=ax0[0],\n scatter_kwargs={\"alpha\": 0.5},\n)\nPredictionErrorDisplay.from_predictions(\n y_test,\n y_pred_ridge_with_trans_target,\n kind=\"actual_vs_predicted\",\n ax=ax0[1],\n scatter_kwargs={\"alpha\": 0.5},\n)\n\n# Add the score in the legend of each axis\nfor ax, y_pred in zip([ax0[0], ax0[1]], [y_pred_ridge, y_pred_ridge_with_trans_target]):\n for name, score in compute_score(y_test, y_pred).items():\n ax.plot([], [], \" \", label=f\"{name}={score}\")\n ax.legend(loc=\"upper left\")\n\nax0[0].set_title(\"Ridge regression \\n without target transformation\")\nax0[1].set_title(\"Ridge regression \\n with target transformation\")\n\n# plot the residuals vs the predicted values\nPredictionErrorDisplay.from_predictions(\n y_test,\n y_pred_ridge,\n kind=\"residual_vs_predicted\",\n ax=ax1[0],\n scatter_kwargs={\"alpha\": 0.5},\n)\nPredictionErrorDisplay.from_predictions(\n y_test,\n y_pred_ridge_with_trans_target,\n kind=\"residual_vs_predicted\",\n ax=ax1[1],\n scatter_kwargs={\"alpha\": 0.5},\n)\nax1[0].set_title(\"Ridge regression \\n without target transformation\")\nax1[1].set_title(\"Ridge regression \\n with target transformation\")\n\nf.suptitle(\"Ames housing data: selling price\", y=1.05)\nplt.tight_layout()\nplt.show()\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 555, "n_words": 497, "vocab_size": 243, "complexity": 1, "nloc": 5, "token_counts": 20, "n_ast_nodes": 1312, "n_identifiers": 81, "random_cut": "def compute_score(y_true, y_pred):\n return {\n \"R2\": f\"{r2_score(y_true, y_pred):.3f}\",\n \"MedAE\": f\"{median_absolute_error(y_true, y_pred):.3f}\",\n }\n\n\n# %%\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.metrics import PredictionErrorDisplay\n\nf, (ax0, ax1) = plt.subplots(1, 2, sharey=True)\n\nridge_cv = RidgeCV().fit(X_train, y_train)\ny_pred_ridge = ridge_cv.predict(X_test)\n\nridge_cv_with_trans_target = TransformedTargetRegressor(\n regressor=RidgeCV(), func=np.log1p, inverse_func=np.expm1\n).fit(X_train, y_train)\ny_pred_ridge_with_trans_target = ridge_cv_with_trans_target.predict(X_test)\n\nPredictionErrorDisplay.from_predictions(\n y_test,\n y_pred_ridge,\n kind=\"actual_vs_predicted\",\n ax=ax0,\n scatter_kwargs={\"alpha\": 0.5},\n)\nPredictionErrorDisplay.from_predictions(\n y_test,\n y_pred_ridge_with_trans_target,\n kind=\"actual_vs_predicted\",\n ax=ax1,\n scatter_kwargs={\"alpha\": 0.5},\n)\n\n# Add the score in the legend of each axis\nfor ax, y_pred in zip([ax0, ax1], [y_pred_ridge, y_pred_ridge_with_trans_target]):\n for name, score in compute_score(y_test, y_pred).items():\n ax.plot([], [], \" \", label=f\"{name}={score}\")\n ax.legend(loc=\"upper left\")\n\nax0.set_title(\"Ridge regression \\n without target transformation\")\nax1.set_title(\"Ridge regression \\n with target transformation\")\nf.suptitle(\"Synthetic data\", y=1.05)\nplt.tight_layout()\n\n# %%\n# Real-world data set\n#####################\n#\n# In a similar manner, the Ames housing data set is used to show the impact\n# of transforming the targets before learning a model. In this example, the\n# target to be predicted is the selling price of each house.\nfrom sklearn.datasets import fetch_openml\nfrom sklearn.preprocessing import quantile_transform\n\names = fetch_openml(name=\"house_prices\", as_frame=True, parser=\"pandas\")\n# Keep only numeric columns\nX = ames.data.select_dtypes(np.number)\n# Remove columns with NaN or Inf values\nX = X.drop(columns=[\"LotFrontage\", \"GarageYrBlt\", \"MasVnrArea\"])\n# Let the price be in k$\ny = ames.target / 1000\ny_trans = quantile_transform(\n y.to_frame(), n_quantiles=900, output_distribution=\"normal\", copy=True\n).squeeze()\n\n# %%\n# A :class:`~sklearn.preprocessing.QuantileTransformer` is used to normalize\n# the target distribution before applying a\n# :class:`~sklearn.linear_model.RidgeCV` model.\nf, (ax0, ax1) = plt.subplots(1, 2)\n\nax0.hist(y, bins=100, density=True)\nax0.set_ylabel(\"Probability\")\nax0.set_xlabel(\"Target\")\nax0.set_title(\"Target distribution\")\n\nax1.hist(y_trans, bins=100, density=True)\nax1.set_ylabel(\"Probability\")\nax1.set_xlabel(\"Targ" }, { "id": 104703, "commit_id": "8caed0c1e7b9658f08c10c8b90eb203b2cedc8e4", "repo": "datasets", "path": "datasets/xtreme/xtreme.py", "file_name": "xtreme.py", "fun_name": "generate_examples", "commit_message": "Support streaming xtreme dataset for PAWS-X config (#4132)\n\n* Support streaming xtreme dataset for PAWS-X config\r\n\r\n* Align tasks in dataset card", "code": "def generate_examples(config=None, filepath=None, filename=None):\n lang = config.name.split(\".\")[1]\n for path, file in filepath:\n if f\"/{lang}/\" in path and path.endswith(filename):\n lines = (line.decode(\"utf-8\") for line in file)\n data = csv.reader(lines, delimiter=\"\\t\")\n next(data) # skip header\n for id_, row in enumerate(data):\n if len(row) == 4:\n yield id_, {\n \"sentence1\": row[1],\n \"sentence2\": row[2],\n \"label\": row[3],\n }\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 283, "n_words": 52, "vocab_size": 43, "complexity": 7, "nloc": 14, "token_counts": 122, "n_ast_nodes": 201, "n_identifiers": 22, "random_cut": "def generate_examples(config=None, filepath=None, filename=None):\n lang = config.name.split(\".\")[1]\n for path, file in filepath:\n if f\"/{lang}/\" in path and path.endswith(filename):\n lines = (line.decode(\"utf-8\") for line in file)\n data = csv.reader(lines, delimiter=\"\\t\")\n next(data) # skip header\n for id_, row in enumerate(data):\n if len(row) == 4:\n yield id_, {\n \"sentence1\": row[1],\n \"sentence2\": row[2],\n \"label\": row[3],\n }\n\n" }, { "id": 32968, "commit_id": "2ab790e82d0759b667cd848a4d49e6ad65e15d59", "repo": "transformers", "path": "src/transformers/models/donut/feature_extraction_donut.py", "file_name": "feature_extraction_donut.py", "fun_name": "rotate_image", "commit_message": "Add Donut (#18488)\n\n* First draft\r\n\r\n* Improve script\r\n\r\n* Update script\r\n\r\n* Make conversion work\r\n\r\n* Add final_layer_norm attribute to Swin's config\r\n\r\n* Add DonutProcessor\r\n\r\n* Convert more models\r\n\r\n* Improve feature extractor and convert base models\r\n\r\n* Fix bug\r\n\r\n* Improve integration tests\r\n\r\n* Improve integration tests and add model to README\r\n\r\n* Add doc test\r\n\r\n* Add feature extractor to docs\r\n\r\n* Fix integration tests\r\n\r\n* Remove register_buffer\r\n\r\n* Fix toctree and add missing attribute\r\n\r\n* Add DonutSwin\r\n\r\n* Make conversion script work\r\n\r\n* Improve conversion script\r\n\r\n* Address comment\r\n\r\n* Fix bug\r\n\r\n* Fix another bug\r\n\r\n* Remove deprecated method from docs\r\n\r\n* Make Swin and Swinv2 untouched\r\n\r\n* Fix code examples\r\n\r\n* Fix processor\r\n\r\n* Update model_type to donut-swin\r\n\r\n* Add feature extractor tests, add token2json method, improve feature extractor\r\n\r\n* Fix failing tests, remove integration test\r\n\r\n* Add do_thumbnail for consistency\r\n\r\n* Improve code examples\r\n\r\n* Add code example for document parsing\r\n\r\n* Add DonutSwin to MODEL_NAMES_MAPPING\r\n\r\n* Add model to appropriate place in toctree\r\n\r\n* Update namespace to appropriate organization\r\n\r\nCo-authored-by: Niels Rogge ", "code": "def rotate_image(self, image, size):\n if not isinstance(image, Image.Image):\n image = self.to_pil_image(image)\n\n if (size[1] > size[0] and image.width > image.height) or (size[1] < size[0] and image.width < image.height):\n image = self.rotate(image, angle=-90, expand=True)\n\n return image\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 76, "n_words": 34, "vocab_size": 24, "complexity": 6, "nloc": 6, "token_counts": 88, "n_ast_nodes": 131, "n_identifiers": 12, "random_cut": "def rotate_image(self, image, size):\n if not isin" }, { "id": 274593, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/losses_test.py", "file_name": "losses_test.py", "fun_name": "test_ragged_tensors_3d", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_ragged_tensors_3d(self):\n # shape [2, 1, None]\n y_true = tf.ragged.constant([[[1, 1]], [[0]]])\n # shape [2, 1, None, 2]\n y_pred = tf.ragged.constant(\n [[[[0.1, 0.9], [0.1, 0.9]]], [[[0.9, 0.1]]]]\n )\n cce_obj = losses.SparseCategoricalCrossentropy()\n loss = cce_obj(y_true, y_pred)\n self.assertAlmostEqual(self.evaluate(loss), 0.1054, 3)\n\n\n@test_combinations.generate(test_combinations.combine(mode=[\"graph\", \"eager\"]))", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@test_combinations.generate(test_combinations.combine(mode=[\"graph\", \"eager\"]))", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 105, "n_words": 40, "vocab_size": 33, "complexity": 1, "nloc": 8, "token_counts": 109, "n_ast_nodes": 174, "n_identifiers": 17, "random_cut": "def test_ragged_tensors_3d(self):\n # shape [2, 1, None]\n y_true = tf.ragged.constant([[[1, 1]], [[0]]])\n # shape [2, 1, None, 2]\n y_pred = tf.ragged.constant(\n [[[[0.1, 0.9], [0.1, 0.9]]]," }, { "id": 15082, "commit_id": "09b439be4c7b8d1ef31ad1cbb3688f9ac48dcdcd", "repo": "ccxt", "path": "python/ccxt/deribit.py", "file_name": "deribit.py", "fun_name": "fetch_markets", "commit_message": "1.66.37\n\n[ci skip]", "code": "def fetch_markets(self, params={}):\n currenciesResponse = self.publicGetGetCurrencies(params)\n #\n # {\n # jsonrpc: '2.0',\n # result: [\n # {\n # withdrawal_priorities: [\n # {value: 0.15, name: 'very_low'},\n # {value: 1.5, name: 'very_high'},\n # ],\n # withdrawal_fee: 0.0005,\n # min_withdrawal_fee: 0.0005,\n # min_confirmations: 1,\n # fee_precision: 4,\n # currency_long: 'Bitcoin',\n # currency: 'BTC',\n # coin_type: 'BITCOIN'\n # }\n # ],\n # usIn: 1583761588590479,\n # usOut: 1583761588590544,\n # usDiff: 65,\n # testnet: False\n # }\n #\n currenciesResult = self.safe_value(currenciesResponse, 'result', [])\n result = []\n for i in range(0, len(currenciesResult)):\n currencyId = self.safe_string(currenciesResult[i], 'currency')\n request = {\n 'currency': currencyId,\n }\n instrumentsResponse = self.publicGetGetInstruments(self.extend(request, params))\n #\n # {\n # jsonrpc: '2.0',\n # result: [\n # {\n # tick_size: 0.0005,\n # taker_commission: 0.0004,\n # strike: 300,\n # settlement_period: 'week',\n # quote_currency: 'USD',\n # option_type: 'call',\n # min_trade_amount: 1,\n # maker_commission: 0.0004,\n # kind: 'option',\n # is_active: True,\n # instrument_name: 'ETH-13MAR20-300-C',\n # expiration_timestamp: 1584086400000,\n # creation_timestamp: 1582790403000,\n # contract_size: 1,\n # base_currency: 'ETH'\n # },\n # ],\n # usIn: 1583761889500586,\n # usOut: 1583761889505066,\n # usDiff: 4480,\n # testnet: False\n # }\n #\n instrumentsResult = self.safe_value(instrumentsResponse, 'result', [])\n for k in range(0, len(instrumentsResult)):\n market = instrumentsResult[k]\n id = self.safe_string(market, 'instrument_name')\n baseId = self.safe_string(market, 'base_currency')\n quoteId = self.safe_string(market, 'quote_currency')\n settleId = quoteId\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n settle = self.safe_currency_code(settleId)\n kind = self.safe_string(market, 'kind')\n settlementPeriod = self.safe_value(market, 'settlement_period')\n swap = (settlementPeriod == 'perpetual')\n future = not swap and (kind == 'future')\n option = (kind == 'option')\n symbol = quote + '/' + base + ':' + settle\n expiry = self.safe_integer(market, 'expiration_timestamp')\n strike = None\n optionType = None\n type = 'swap'\n if option or future:\n symbol = symbol + '-' + self.yymmdd(expiry, '')\n if option:\n type = 'option'\n strike = self.safe_number(market, 'strike')\n optionType = self.safe_string(market, 'option_type')\n symbol = symbol + ':' + self.number_to_string(strike) + ':' + optionType\n else:\n type = 'future'\n minTradeAmount = self.safe_number(market, 'min_trade_amount')\n tickSize = self.safe_number(market, 'tick_size')\n result.append({\n 'id': id,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'settle': settle,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': settleId,\n 'type': type,\n 'spot': False,\n 'margin': False,\n 'swap': swap,\n 'future': future,\n 'option': option,\n 'contract': True,\n 'linear': False,\n 'inverse': True,\n 'taker': self.safe_number(market, 'taker_commission'),\n 'maker': self.safe_number(market, 'maker_commission'),\n 'contractSize': self.safe_number(market, 'contract_size'),\n 'active': self.safe_value(market, 'is_active'),\n 'expiry': expiry,\n 'expiryDatetime': self.iso8601(expiry),\n 'strike': strike,\n 'optionType': optionType,\n 'precision': {\n 'amount': minTradeAmount,\n 'price': tickSize,\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': minTradeAmount,\n 'max': None,\n },\n 'price': {\n 'min': tickSize,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 3131, "n_words": 407, "vocab_size": 220, "complexity": 7, "nloc": 92, "token_counts": 549, "n_ast_nodes": 993, "n_identifiers": 46, "random_cut": "def fetch_markets(self, params={}):\n currenciesResponse = self.publicGetGetCurrencies(params)\n #\n # {\n # jsonrpc: '2.0',\n # result: [\n # {\n # withdrawal_priorities: [\n # {value: 0.15, name: 'very_low'},\n # {value: 1.5, name: 'very_high'},\n # ],\n # withdrawal_fee: 0.0005,\n # min_withdrawal_fee: 0.0005,\n # min_confirmations: 1,\n # fee_precision: 4,\n # currency_long: 'Bitcoin',\n # currency: 'BTC',\n # coin_type: 'BITCOIN'\n # }\n # ],\n # usIn: 1583761588590479,\n # usOut: 1583761588590544,\n # usDiff: 65,\n # testnet: False\n # }\n #\n currenciesResult = self.safe_value(currenciesResponse, 'result', [])\n result = []\n for i in range(0, len(currenciesResult)):\n currencyId = self.safe_string(currenciesResult[i], 'currency')\n request = {\n 'currency': currencyId,\n }\n instrumentsResponse = self.publicGetGetInstruments(self.extend(request, params))\n #\n # {\n # jsonrpc: '2.0',\n # result: [\n # {\n # tick_size: 0.0005,\n # taker_commission: 0.0004,\n # strike: 300,\n # settlement_period: 'week',\n # quote_currency: 'USD',\n # " }, { "id": 97723, "commit_id": "f2e775086eb653cf8c4680a2bdd90ee707e30ae0", "repo": "sentry", "path": "src/sentry/search/events/builder.py", "file_name": "builder.py", "fun_name": "validate_orderby_clause", "commit_message": "feat(mep): Validate orderby for mep (#32943)\n\n- This validates the orderby for mep queries to check that we aren't\r\n ordering by something that cannot be ordered", "code": "def validate_orderby_clause(self) -> None:\n \n for orderby in self.orderby:\n if isinstance(orderby.exp, Column) and orderby.exp.subscriptable == \"tags\":\n raise IncompatibleMetricsQuery(\"Can't orderby tags\")\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 59, "n_words": 19, "vocab_size": 18, "complexity": 4, "nloc": 6, "token_counts": 38, "n_ast_nodes": 65, "n_identifiers": 8, "random_cut": "def validate_orderby_clause(self) -> None:\n \n for orderby in self.orderby:\n if isinstance(orderby.exp, Column) and orderby.exp.subscriptable == \"tags\":\n raise IncompatibleMetricsQuery(\"Can't orderby tags\")\n" }, { "id": 6764, "commit_id": "5c3b4475a02aaa340a6e11d4302d29d4b7eccedf", "repo": "ludwig", "path": "ludwig/utils/data_utils.py", "file_name": "data_utils.py", "fun_name": "read_spss", "commit_message": "Use pandas instead of dask to read excel (#2005)\n\nhttps://github.com/ludwig-ai/ludwig/pull/2005", "code": "def read_spss(data_fp, df_lib):\n # https://github.com/dask/dask/issues/9055\n if df_lib.__name__ == DASK_MODULE_NAME:\n logger.warning(\"Falling back to pd.read_spss() since dask backend does not support it\")\n return pd.read_spss(data_fp)\n\n\n@spread", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@spread", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 37, "n_words": 23, "vocab_size": 23, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 50, "n_identifiers": 9, "random_cut": "def read_spss(data_fp, df_lib):\n # https://github.com/dask/dask/issues/9055\n if df_lib.__name__ == DASK_MODULE_NAME:\n logger.warning(\"Falling back to pd.read_spss() since dask backend does not support it\")\n return pd.read_spss(data_fp)\n\n\n@spread" }, { "id": 222055, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ctypes/test/test_loading.py", "file_name": "test_loading.py", "fun_name": "test_find", "commit_message": "add python 3.10.4 for windows", "code": "def test_find(self):\n for name in (\"c\", \"m\"):\n lib = find_library(name)\n if lib:\n cdll.LoadLibrary(lib)\n CDLL(lib)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 72, "n_words": 14, "vocab_size": 14, "complexity": 3, "nloc": 6, "token_counts": 33, "n_ast_nodes": 57, "n_identifiers": 8, "random_cut": "def test_find(self):\n for name" }, { "id": 100247, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/incidents/endpoints/test_project_alert_rule_index.py", "file_name": "test_project_alert_rule_index.py", "fun_name": "test_simple_crash_rate_alerts_for_users", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_simple_crash_rate_alerts_for_users(self):\n self.valid_alert_rule.update(\n {\n \"aggregate\": \"percentage(users_crashed, users) AS _crash_rate_alert_aggregate\",\n }\n )\n with self.feature([\"organizations:incidents\", \"organizations:performance-view\"]):\n resp = self.get_success_response(\n self.organization.slug, self.project.slug, status_code=201, **self.valid_alert_rule\n )\n assert \"id\" in resp.data\n alert_rule = AlertRule.objects.get(id=resp.data[\"id\"])\n assert resp.data == serialize(alert_rule, self.user)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 149, "n_words": 34, "vocab_size": 30, "complexity": 1, "nloc": 13, "token_counts": 93, "n_ast_nodes": 154, "n_identifiers": 19, "random_cut": "def test_simple_crash_rate_alerts_for_users(self):\n self.valid_alert_rule.update(\n {\n \"aggregate\": \"percentage(users_crashed, users) AS _crash_rate_alert_aggregate\",\n }\n )\n with self.feature([\"organizations:incidents\", \"organizations:performance-view\"]):\n resp = self.get_success_response(\n self.organization.slug, self.project.slug, status_code=201, **self.valid_alert_rule\n )\n assert \"id\" in resp.data\n " }, { "id": 91499, "commit_id": "284e980df0018f8baee659999268bdd4c7d08255", "repo": "sentry", "path": "tests/sentry/utils/test_committers.py", "file_name": "test_committers.py", "fun_name": "test_no_commits", "commit_message": "ref: replace self.assertRaises with pytest.raises (#35685)\n\n* add flake8 plugin to detect assertRaises\r\n\r\n* ref: replace self.assertRaises with pytest.raises\r\n\r\n* non-sed fixes", "code": "def test_no_commits(self):\n event = self.store_event(\n data={\n \"timestamp\": iso_format(before_now(seconds=1)),\n \"message\": \"Kaboom!\",\n \"stacktrace\": {\n \"frames\": [\n {\n \"function\": \"handle_set_commits\",\n \"abs_path\": \"/usr/src/sentry/src/sentry/tasks.py\",\n \"module\": \"sentry.tasks\",\n \"in_app\": True,\n \"lineno\": 30,\n \"filename\": \"sentry/tasks.py\",\n },\n {\n \"function\": \"set_commits\",\n \"abs_path\": \"/usr/src/sentry/src/sentry/models/release.py\",\n \"module\": \"sentry.models.release\",\n \"in_app\": True,\n \"lineno\": 39,\n \"filename\": \"sentry/models/release.py\",\n },\n ]\n },\n \"tags\": {\"sentry:release\": self.release.version},\n },\n project_id=self.project.id,\n )\n GroupRelease.objects.create(\n group_id=event.group.id, project_id=self.project.id, release_id=self.release.id\n )\n\n with pytest.raises(Commit.DoesNotExist):\n get_serialized_event_file_committers(self.project, event)\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 677, "n_words": 59, "vocab_size": 45, "complexity": 1, "nloc": 34, "token_counts": 164, "n_ast_nodes": 288, "n_identifiers": 24, "random_cut": "def test_no_commits(self):\n event = self.store_event(\n data={\n \"timestamp\": iso_format(before_now(seconds=1)),\n \"message\": \"Kaboom!\",\n \"stacktrace\": {\n \"frames\": [\n {\n \"function\": \"handle_set_commits\",\n \"abs_path\": \"/usr/src/sentry/src/sentry/tasks.py\",\n \"module\": \"sentry.tasks\",\n \"in_app\": True,\n \"lineno\": 30,\n \"filename\": \"sentry/tasks.py\",\n },\n {\n \"function\": \"set_commits\",\n \"abs_path\": \"/usr/src/sentry/src/sentry/models/release.py\",\n \"module\": \"sentry.models.release\",\n \"in_a" }, { "id": 213412, "commit_id": "562846b6dce660054181cae7b05bbadd75489795", "repo": "ivy", "path": "ivy_tests/test_core/test_random.py", "file_name": "test_random.py", "fun_name": "test_seed", "commit_message": "renamed dtype_str arg to dtype for all methods.", "code": "def test_seed(seed_val, dtype, tensor_fn, dev_str, call):\n # smoke test\n ivy.seed(seed_val)\n # compilation test\n if call in [helpers.torch_call]:\n # pytorch scripting does not support functions with None return\n return\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.seed)\n\n\n# shuffle\n@pytest.mark.parametrize(\n \"x\", [[1, 2, 3], [[1., 4.], [2., 5.], [3., 6.]]])\n@pytest.mark.parametrize(\n \"dtype\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"x\", [[1, 2, 3], [[1., 4.], [2., 5.], [3., 6.]]])\n@pytest.mark.parametrize(\n \"dtype\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 92, "n_words": 52, "vocab_size": 43, "complexity": 3, "nloc": 6, "token_counts": 45, "n_ast_nodes": 179, "n_identifiers": 17, "random_cut": "def test_seed(seed_val, dtype, tensor_fn, dev_str, call):\n # smoke test\n ivy.seed(seed_val)\n # compilation test\n if call in [helpers.torch_call]:\n # pytorch scripting does not support functions with None return\n return\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.seed)\n\n\n# shuffl" }, { "id": 37498, "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_tf", "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", "code": "def require_tf(test_case):\n \n return unittest.skipUnless(is_tf_available(), \"test requires TensorFlow\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def require_tf(test_case):\n \n return unittest.skipUnless(is_tf_available()" }, { "id": 17261, "commit_id": "ff158ebe7e1ed14772139737d13bb5edfd6d9430", "repo": "ccxt", "path": "python/ccxt/async_support/vcc.py", "file_name": "vcc.py", "fun_name": "describe", "commit_message": "1.71.83\n\n[ci skip]", "code": "def describe(self):\n return self.deep_extend(super(vcc, self).describe(), {\n 'id': 'vcc',\n 'name': 'VCC Exchange',\n 'countries': ['VN'], # Vietnam\n 'rateLimit': 1000,\n 'version': 'v3',\n 'has': {\n 'CORS': None,\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'addMargin': False,\n 'cancelAllOrders': True,\n 'cancelOrder': True,\n 'createOrder': True,\n 'createReduceOnlyOrder': False,\n 'editOrder': None,\n 'fetchBalance': True,\n 'fetchBorrowRate': False,\n 'fetchBorrowRateHistories': False,\n 'fetchBorrowRateHistory': False,\n 'fetchBorrowRates': False,\n 'fetchBorrowRatesPerSymbol': False,\n 'fetchClosedOrders': True,\n 'fetchCurrencies': True,\n 'fetchDepositAddress': True,\n 'fetchDeposits': True,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchIsolatedPositions': False,\n 'fetchLeverage': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchMyTrades': True,\n 'fetchOHLCV': True,\n 'fetchOpenOrders': True,\n 'fetchOrder': True,\n 'fetchOrderBook': True,\n 'fetchOrders': None,\n 'fetchPosition': False,\n 'fetchPositions': False,\n 'fetchPositionsRisk': False,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchTicker': 'emulated',\n 'fetchTickers': True,\n 'fetchTrades': True,\n 'fetchTradingFee': True,\n 'fetchTradingFees': None,\n 'fetchTransactions': True,\n 'fetchWithdrawals': True,\n 'reduceMargin': False,\n 'setLeverage': False,\n 'setMarginMode': False,\n 'setPositionMode': False,\n },\n 'timeframes': {\n '1m': '60000',\n '5m': '300000',\n '15m': '900000',\n '30m': '1800000',\n '1h': '3600000',\n '2h': '7200000',\n '4h': '14400000',\n '6h': '21600000',\n '12h': '43200000',\n '1d': '86400000',\n '1w': '604800000',\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/100545356-8427f500-326c-11eb-9539-7d338242d61b.jpg',\n 'api': {\n 'public': 'https://api.vcc.exchange',\n 'private': 'https://api.vcc.exchange',\n },\n 'www': 'https://vcc.exchange',\n 'doc': [\n 'https://vcc.exchange/api',\n ],\n 'fees': 'https://support.vcc.exchange/hc/en-us/articles/360016401754',\n 'referral': 'https://vcc.exchange?ref=l4xhrH',\n },\n 'api': {\n 'public': {\n 'get': [\n 'summary',\n 'exchange_info',\n 'assets', # Available Currencies\n 'ticker', # Ticker list for all symbols\n 'trades/{market_pair}', # Recent trades\n 'orderbook/{market_pair}', # Orderbook\n 'chart/bars', # Candles\n 'tick_sizes',\n ],\n },\n 'private': {\n 'get': [\n 'user',\n 'balance', # Get trading balance\n 'orders/{order_id}', # Get a single order by order_id\n 'orders/open', # Get open orders\n 'orders', # Get closed orders\n 'orders/trades', # Get trades history\n 'deposit-address', # Generate or get deposit address\n 'transactions', # Get deposit/withdrawal history\n ],\n 'post': [\n 'orders', # Create new order\n ],\n 'put': [\n 'orders/{order_id}/cancel', # Cancel order\n 'orders/cancel-by-type',\n 'orders/cancel-all',\n ],\n },\n },\n 'fees': {\n 'trading': {\n 'tierBased': False,\n 'percentage': True,\n 'maker': self.parse_number('0.002'),\n 'taker': self.parse_number('0.002'),\n },\n },\n 'exceptions': {\n 'exact': {},\n 'broad': {\n 'limit may not be greater than': BadRequest, # {\"message\":\"The given data was invalid.\",\"errors\":{\"limit\":[\"The limit may not be greater than 1000.\"]}}\n 'Insufficient balance': InsufficientFunds, # {\"message\":\"Insufficient balance.\"}\n 'Unauthenticated': AuthenticationError, # {\"message\":\"Unauthenticated.\"} # wrong api key\n 'signature is invalid': AuthenticationError, # {\"message\":\"The given data was invalid.\",\"errors\":{\"signature\":[\"HMAC signature is invalid\"]}}\n 'Timeout': RequestTimeout, # {\"code\":504,\"message\":\"Gateway Timeout\",\"description\":\"\"}\n 'Too many requests': RateLimitExceeded, # {\"code\":429,\"message\":\"Too many requests\",\"description\":\"Too many requests\"}\n 'quantity field is required': InvalidOrder, # {\"message\":\"The given data was invalid.\",\"errors\":{\"quantity\":[\"The quantity field is required when type is market.\"]}}\n 'price field is required': InvalidOrder, # {\"message\":\"The given data was invalid.\",\"errors\":{\"price\":[\"The price field is required when type is limit.\"]}}\n 'error_security_level': PermissionDenied, # {\"message\":\"error_security_level\"}\n 'pair is invalid': BadSymbol, # {\"message\":\"The given data was invalid.\",\"errors\":{\"coin\":[\"Trading pair is invalid\",\"Trading pair is offline\"]}}\n # {\"message\":\"The given data was invalid.\",\"errors\":{\"type\":[\"The selected type is invalid.\"]}}\n # {\"message\":\"The given data was invalid.\",\"errors\":{\"trade_type\":[\"The selected trade type is invalid.\"]}}\n 'type is invalid': InvalidOrder,\n 'Data not found': OrderNotFound, # {\"message\":\"Data not found\"}\n },\n },\n })\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 2884, "n_words": 446, "vocab_size": 259, "complexity": 1, "nloc": 147, "token_counts": 523, "n_ast_nodes": 1000, "n_identifiers": 15, "random_cut": "def describe(self):\n return self.deep_extend(super(vcc, self).describe(), {\n 'id': 'vcc',\n 'name': 'VCC Exchange',\n 'countries': ['VN'], # Vietnam\n 'rateLimit': 1000,\n 'version': 'v3',\n 'has': {\n 'CORS': None,\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'addMargin': False,\n 'cancelAllOrders': True,\n 'cancelOrder': True,\n 'createOrder': True,\n 'createReduceOnlyOrder': False,\n 'editOrder': None,\n 'fetchBalance': True,\n 'fetchBorrowRate': False,\n 'fetchBorrowRateHistories': False,\n 'fetchBorrowRateHistory': False,\n 'fetchBorrowRates': False,\n 'fetchBorrowRatesPerSymbol': False,\n 'fetchClosedOrders': True,\n 'fetchCurrencies': True,\n 'fetchDepositAddress': True,\n 'fetchDeposits': True,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchIsolatedPositions': False,\n 'fetchLeverage': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchMyTrades': True,\n 'fetchOHLCV': True,\n 'fetchOpenOrders': True,\n 'fetchOrder': True,\n 'fetchOrderBook': True,\n 'fetchOrders': None,\n 'fetchPosition': False,\n 'fetchPositions': False,\n 'fetchPositionsRisk': False,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchTicker': 'emulated',\n 'fetchTickers': True,\n 'fetchTrades': True,\n 'fetchTradingFee': True,\n 'fetchTradingFees': None,\n 'fetchTransactions': True,\n 'fetchWithdrawals': True,\n 'reduceMargin': False,\n 'setLeverage': False,\n 'setMarginMode': False,\n 'setPositionMode': False,\n },\n 'timeframes': {\n '1m': '60000',\n '5m': '300000',\n '15m': '900000',\n '30m': '1800000',\n '1h': '3600000',\n '2h': '7200000',\n '4h': '14400000',\n '6h': '21600000',\n '12h': '43200000',\n '1d': '86400000',\n '1w': '604800000',\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/100545356-8427f500-326c-11eb-9539-7d338242d61b.jpg',\n 'api': {\n 'public': 'https://api.vcc.exchange',\n 'private': 'https://api.vcc.exchange',\n },\n 'www': 'https://vcc.exchange',\n 'doc': [\n 'https://vcc.exchange/api',\n ],\n 'fees': 'https://support.vcc.exchange/hc/en-us/articles/360016401754',\n 'referral': 'https://vcc.exchange?ref=l4xhrH',\n },\n 'api': {\n 'public': {\n 'get': [\n 'summary',\n 'exchange_info',\n 'assets', # Available Currencies\n 'ticker', # Ticker list for all symbols\n 'trades/{market_pair}', # Recent trades\n 'orderbook/{market_pair}', # Orderbook\n 'chart/bars', # Candles\n 'tick_sizes',\n ],\n },\n 'private': {\n 'get': [\n 'user',\n 'balance', # Get trading balance\n 'orders/{order_id}', # Get a single order by order_id\n 'orders/open', # Get open orders\n 'orders', # Get closed orders\n 'orders/trades', # Get trades history\n 'deposit-address', # Generate or get deposit address\n 'transactions', # Get deposit/withdrawal history\n ],\n 'post': [\n 'orders', # Create new order\n ],\n 'put': [\n 'orders/{order_id}/cancel', # Cancel order\n 'orders/cancel-by-type',\n 'orders/cancel-all',\n ],\n },\n },\n 'fees': {\n 'trading': {\n 'tierBased': False,\n 'percentage': True,\n 'maker': self.parse_number('0.002'),\n 'taker': self.parse_number('0.002'),\n },\n },\n 'exceptions': {\n 'exact': {},\n 'broad': {\n 'limit may not be greater than': BadRequest, # {\"message\":\"The given data was invalid.\",\"errors\":{\"limit\":[\"The limit may not be greater than 1000.\"]}}\n 'Insufficient balance': InsufficientFunds, # {\"message\":\"Insufficient balance.\"}\n 'Unauthenticated': AuthenticationError, # {\"message\":\"Unauthenticated.\"} # wrong api key\n 'signature is invalid': AuthenticationError, # {\"message\":\"The given data was invalid.\",\"errors\":{\"signature\":[\"HMAC signature is invalid\"]}}\n 'Timeout': RequestTimeout, # {\"code\":504,\"message\":\"Gateway Timeout\",\"description\":\"\"}\n 'Too many requests': RateLimitExceeded, # {\"code\":429,\"message\":\"Too many requests\",\"description\":\"Too many requests\"}\n 'quantity field is required': InvalidOrder, # {\"message\":\"The given data was invalid.\",\"errors\":{\"quantity\":[\"The quantity field is required when type is market.\"]}}\n 'price field is required': InvalidOrder, # {\"message\":\"The given data was invalid.\",\"errors\":{\"price\":[\"The price field is required when type is limit.\"]}}\n 'error_security_level': PermissionDenied, # {\"message\":\"error_security_level\"}\n 'pair is invalid': BadSymbol, # {\"message\":\"The given data was invalid.\",\"errors\":{\"coin\":[\"Trading pair is invalid\",\"Trading pa" }, { "id": 318888, "commit_id": "85b210ebf61d4525cae3311eaae91012c8986cf7", "repo": "paperless-ngx", "path": "src/documents/tests/test_matchables.py", "file_name": "test_matchables.py", "fun_name": "test_tach_invalid_regex", "commit_message": "Reduces number of warnings from testing from 165 to 128. In doing so, fixes a few minor things in the decrypt and export commands", "code": "def test_tach_invalid_regex(self):\n self._test_matching(\"[\", \"MATCH_REGEX\", [], [\"Don't match this\"])\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 36, "n_identifiers": 3, "random_cut": "def test_tach_invalid_regex(self):\n self._test_matching(\"[\", \"MATCH_REGEX\", [], [\"Don't match this\"])\n" }, { "id": 63231, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "ensure_directory", "commit_message": "upd; format", "code": "def ensure_directory(path):\n \n dirname = os.path.dirname(path)\n py31compat.makedirs(dirname, exist_ok=True)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 16, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 7, "random_cut": "def ensure_directory(path):\n \n dirname = os.path.dirname(path)\n py31compat.makedirs(dirname, exist_ok=True)\n\n" }, { "id": 31764, "commit_id": "6c8f4c9a938a09749ea1b19a5fa2a8dd27e99a29", "repo": "transformers", "path": "tests/models/groupvit/test_modeling_groupvit.py", "file_name": "test_modeling_groupvit.py", "fun_name": "setUp", "commit_message": "Adding GroupViT Models (#17313)\n\n* add group vit and fixed test (except slow)\r\n\r\n* passing slow test\r\n\r\n* addressed some comments\r\n\r\n* fixed test\r\n\r\n* fixed style\r\n\r\n* fixed copy\r\n\r\n* fixed segmentation output\r\n\r\n* fixed test\r\n\r\n* fixed relative path\r\n\r\n* fixed copy\r\n\r\n* add ignore non auto configured\r\n\r\n* fixed docstring, add doc\r\n\r\n* fixed copies\r\n\r\n* Apply suggestions from code review\r\n\r\nmerge suggestions\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* resolve comment, renaming model\r\n\r\n* delete unused attr\r\n\r\n* use fix copies\r\n\r\n* resolve comments\r\n\r\n* fixed attn\r\n\r\n* remove unused vars\r\n\r\n* refactor tests\r\n\r\n* resolve final comments\r\n\r\n* add demo notebook\r\n\r\n* fixed inconsitent default\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* rename stage->stages\r\n\r\n* Create single GroupViTEncoderLayer class\r\n\r\n* Update conversion script\r\n\r\n* Simplify conversion script\r\n\r\n* Remove cross-attention class in favor of GroupViTAttention\r\n\r\n* Convert other model as well, add processor to conversion script\r\n\r\n* addressing final comment\r\n\r\n* fixed args\r\n\r\n* Update src/transformers/models/groupvit/modeling_groupvit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Niels Rogge ", "code": "def setUp(self):\n self.model_tester = GroupViTVisionModelTester(self)\n self.config_tester = ConfigTester(\n self, config_class=GroupViTVisionConfig, has_text_modality=False, hidden_size=37\n )\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 44, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 33, "n_ast_nodes": 50, "n_identifiers": 10, "random_cut": "def setUp(self):\n self.model_tester = GroupViTVisionModelTester(self)\n self.config_tester = ConfigTester(\n self, config_class=GroupViTVisionConfig," }, { "id": 251087, "commit_id": "dd61b21ce37c112c3b1e35774396da9ad0d51b76", "repo": "mitmproxy", "path": "test/mitmproxy/addons/test_dns_resolver.py", "file_name": "test_dns_resolver.py", "fun_name": "test_simple", "commit_message": "[dns] offline dns_resolve tests at 100% coverage", "code": "async def test_simple(monkeypatch):\n monkeypatch.setattr(dns_resolver, \"resolve_message\", lambda _, __: asyncio.sleep(0, \"resp\"))\n\n dr = dns_resolver.DnsResolver()\n with taddons.context(dr, proxyserver.Proxyserver()) as tctx:\n f = tflow.tdnsflow()\n await dr.dns_request(f)\n assert f.response\n\n tctx.options.dns_mode = \"reverse:8.8.8.8\"\n f = tflow.tdnsflow()\n await dr.dns_request(f)\n assert not f.response\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 93, "n_words": 36, "vocab_size": 27, "complexity": 1, "nloc": 11, "token_counts": 94, "n_ast_nodes": 160, "n_identifiers": 22, "random_cut": "async def test_simple(monkeypatch):\n monkeypatch.setattr(dns_resolver, \"resolve_message\", lambda _, __: asyncio.sleep(0, \"resp\"))\n\n dr = dns_resolver.DnsResolver()\n with taddons.context(dr, proxyserver.Proxyserver()) as tctx:\n f = tflow.tdnsflow()\n await dr.dns_request(f)\n assert f.response\n\n tctx.options.dns_mode = \"reverse:8.8.8.8\"\n f " }, { "id": 60924, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/operations/freeze.py", "file_name": "freeze.py", "fun_name": "__str__", "commit_message": "upd; format", "code": "def __str__(self):\n # type: () -> str\n req = self.req\n if self.editable:\n req = f'-e {req}'\n return '\\n'.join(list(self.comments) + [str(req)]) + '\\n'\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 22, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 40, "n_ast_nodes": 76, "n_identifiers": 8, "random_cut": "def __str__(self):\n # type: () -> str\n req = self.req\n if self.editable:\n req = f'-e {req}'\n return '\\n'.join(list(self.comments) " }, { "id": 294185, "commit_id": "dc8e87a6f70439f9830d93d03c53d6ff098a4861", "repo": "core", "path": "tests/components/alexa/test_capabilities.py", "file_name": "test_capabilities.py", "fun_name": "test_api_set_color_rgb", "commit_message": "Exclude hidden entities from alexa (#68555)", "code": "async def test_api_set_color_rgb(hass):\n \n request = get_new_request(\"Alexa.ColorController\", \"SetColor\", \"light#test\")\n\n # add payload\n request[\"directive\"][\"payload\"][\"color\"] = {\n \"hue\": \"120\",\n \"saturation\": \"0.612\",\n \"brightness\": \"0.342\",\n }\n\n # setup test devices\n hass.states.async_set(\n \"light.test\", \"off\", {\"friendly_name\": \"Test light\", \"supported_features\": 16}\n )\n\n call_light = async_mock_service(hass, \"light\", \"turn_on\")\n\n msg = await smart_home.async_handle_message(hass, get_default_config(hass), request)\n await hass.async_block_till_done()\n\n assert \"event\" in msg\n msg = msg[\"event\"]\n\n assert len(call_light) == 1\n assert call_light[0].data[\"entity_id\"] == \"light.test\"\n assert call_light[0].data[\"rgb_color\"] == (33, 87, 33)\n assert msg[\"header\"][\"name\"] == \"Response\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 151, "n_words": 72, "vocab_size": 57, "complexity": 1, "nloc": 19, "token_counts": 150, "n_ast_nodes": 276, "n_identifiers": 15, "random_cut": "async def test_api_set_color_rgb(hass):\n \n request = get_new_request(\"Alexa.ColorController\", \"SetColor\", \"light#test\")\n\n # add payload\n request[\"directive\"][\"payload\"][\"color\"] = {\n " }, { "id": 1093, "commit_id": "859b728f41b728447b88b54479e1600a4996dc09", "repo": "PySyft", "path": "packages/syft/tests/syft/core/tensor/adp/private_method_test.py", "file_name": "private_method_test.py", "fun_name": "test_string_entity", "commit_message": "Remove autograd, old Mechanism, continued renaming entities to datasubject", "code": "def test_string_entity() -> None:\n x = sy.Tensor(np.array([1, 2, 3, 4], dtype=DEFAULT_INT_NUMPY_TYPE))\n out = x.private(min_val=0, max_val=5, data_subjects=\"bob\")\n assert out.child.entity.name == \"bob\"\n\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 28, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 4, "token_counts": 59, "n_ast_nodes": 91, "n_identifiers": 16, "random_cut": "def test_string_entity() -> None:\n x = sy.Tensor(np.array([1, 2, 3, 4], dtype=DEFAULT_INT_NUMPY_TYPE))\n out = x.private(min_val=0, max_val=5, data_subjects=\"bob\")\n " }, { "id": 38977, "commit_id": "e46d808a1b6cb7e04cb2806e38547b1e3e50c25a", "repo": "DeepSpeed", "path": "deepspeed/runtime/utils.py", "file_name": "utils.py", "fun_name": "has_overflow", "commit_message": "MoE inference + PR-MoE model support (#1705)\n\nCo-authored-by: Reza Yazdani \r\nCo-authored-by: Zhewei Yao \r\nCo-authored-by: Ammar Ahmad Awan \r\nCo-authored-by: Jeff Rasley \r\nCo-authored-by: Samyam Rajbhandari ", "code": "def has_overflow(self, params, has_moe_params=None):\n if has_moe_params is None:\n has_moe_params = self.has_moe_params\n overflow = self.has_overflow_serial(params)\n # Since each model parallel GPU carries only part of the model,\n # make sure overflow flag is synced across all the model parallel GPUs\n overflow_gpu = torch.cuda.ByteTensor([overflow])\n # torch.distributed.all_reduce(overflow_gpu,\n # op=torch.distributed.ReduceOp.MAX,\n # group=mpu.get_model_parallel_group())\n if has_moe_params:\n # All reduce this across expert_parallel_group, so that if an expert\n # overflows, we detect it here\n dist.all_reduce(overflow_gpu,\n op=dist.ReduceOp.MAX,\n group=groups.get_max_expert_parallel_group())\n if self.zero_reduce_scatter:\n torch.distributed.all_reduce(overflow_gpu,\n op=torch.distributed.ReduceOp.MAX,\n group=torch.distributed.group.WORLD)\n elif self.mpu is not None:\n if self.deepspeed is not None:\n using_pipeline = hasattr(self.deepspeed,\n 'pipeline_enable_backward_allreduce')\n if (using_pipeline\n and self.deepspeed.pipeline_enable_backward_allreduce is False\n ) or (not using_pipeline\n and self.deepspeed.enable_backward_allreduce is False):\n torch.distributed.all_reduce(\n overflow_gpu,\n op=torch.distributed.ReduceOp.MAX,\n group=self.mpu.get_data_parallel_group())\n torch.distributed.all_reduce(overflow_gpu,\n op=torch.distributed.ReduceOp.MAX,\n group=self.mpu.get_model_parallel_group())\n elif self.deepspeed is not None and self.deepspeed.enable_backward_allreduce is False:\n torch.distributed.all_reduce(overflow_gpu,\n op=torch.distributed.ReduceOp.MAX,\n group=torch.distributed.group.WORLD)\n\n overflow = overflow_gpu[0].item()\n return bool(overflow)\n\n # `x` is a torch.Tensor", "url": "https://github.com/microsoft/DeepSpeed.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 895, "n_words": 132, "vocab_size": 83, "complexity": 12, "nloc": 34, "token_counts": 266, "n_ast_nodes": 413, "n_identifiers": 31, "random_cut": "def has_overflow(self, params, has_moe_params=None):\n if has_moe_params is None:\n has_moe_params = self.has_moe_params\n overflow = self.has_overflow_serial(params)\n # Since each model parallel GPU carries only part of the model,\n # make sure overflow flag is synced across all the model parallel GPUs\n overflow_gpu = torch.cuda.ByteTensor([overflow])\n # torch.distributed.all_reduce(overflow_gpu,\n # op=torch.distributed.ReduceOp.MAX,\n # group=mpu.get_model_parallel_group())\n if has_moe_params:\n # All reduce this across expert_parallel_group, so that if an expert\n # overflows, we detect it here\n dist.all_reduce(overflow_gpu,\n op=dist.ReduceOp.MAX,\n group=groups.get_max_expert_parallel_group())\n if self.zero" }, { "id": 11237, "commit_id": "07e2ef0a5cd2baf90a0e30c32e5898d1fdfc4d48", "repo": "jina", "path": "jina/parsers/create.py", "file_name": "create.py", "fun_name": "set_new_project_parser", "commit_message": "docs: adapt to 3.0 (#4254)\n\n* docs: comparing alternatives (#4249)\r\n\r\n* docs: fix conflict\r\n\r\n* docs: remove line\r\n\r\n* docs: add docarray logos\r\n\r\n* docs: proper link to docarray\r\n\r\n* docs: change index\r\n\r\n* docs: change reference types ecosystem\r\n\r\n* docs: change comparing\r\n\r\n* docs: update docs/get-started/comparing-to-alternatives.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/get-started/comparing-to-alternatives.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/get-started/comparing-to-alternatives.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/get-started/comparing-to-alternatives.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/get-started/comparing-to-alternatives.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/get-started/comparing-to-alternatives.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/get-started/comparing-to-alternatives.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/index.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: fix kubernetes docs (#4259)\r\n\r\n* docs: fix kubernetes docs\r\n\r\n* docs: add caution\r\n\r\n* docs: executor documentation refactoring (#4256)\r\n\r\n* fix: fix link to share executors\r\n\r\n* docs: adjust install section for 3.0 (#4265)\r\n\r\n* docs: adjust readme (#4270)\r\n\r\n* docs: async in executors tuto (#4264)\r\n\r\n* docs: move things to how-to (#4271)\r\n\r\n* docs: updating docker-compose docs (#4252)\r\n\r\n* docs: move docker compose\r\n\r\n* docs: caution in kubernetes and docker compose (#4272)\r\n\r\n* docs: update gpu guide for jina 3 (#4255)\r\n\r\n* docs: move gpu to how-to (#4273)\r\n\r\n* docs: migration guide to jina 3 (#4263)\r\n\r\n* docs: change index link to how-ot\r\n\r\n* docs: move migrate to get-started (#4274)\r\n\r\n* docs: adapt some kubernetes content (#4275)\r\n\r\n* docs: add architecture overview (#4280)\r\n\r\n* docs: add proto back to API reference (#4281)\r\n\r\n* docs: external executors tutorial (#4267)\r\n\r\n* docs: move external executor how-to (#4283)\r\n\r\n* docs: rephrase comparing to alternatives (#4282)\r\n\r\n* docs: update docs/fundamentals/concepts.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/concepts.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: fix architeceture map legend\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: index with readme content (#4285)\r\n\r\n* docs(executor): fix grammatical errors (#4284)\r\n\r\n* docs: update docs/fundamentals/executor/index.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/index.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/index.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/index.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/index.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs. update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: Update docs/fundamentals/executor/index.md\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: add containerize executor section (#4288)\r\n\r\n* docs: update docs/fundamentals/architecture-overview.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/how-to/kubernetes.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/how-to/sandbox.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/how-to/sandbox.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/how-to/sandbox.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/fundamentals/executor/hub/index.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: apply suggestions from code review\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: update docs/how-to/kubernetes.md\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/how-to/sandbox.md\r\n\r\n* docs: apply suggestions from code review\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: add scale tutorial (#4287)\r\n\r\n* docs: refactor scale how-to (#4289)\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-in-flow.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: Update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: update docs/fundamentals/executor/executor-api.md\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: rewrite flow section (#4266)\r\n\r\n* docs: refactor flow docs\r\n\r\n* docs: update flow index\r\n\r\n* docs: refactor create a flow section\r\n\r\n* docs: add flow api section\r\n\r\n* docs: some minor polishing\r\n\r\n* docs: add more flow info\r\n\r\n* docs: address comments\r\n\r\n* docs: small refactor flow docs (#4293)\r\n\r\n* docs: fix examples (#4294)\r\n\r\n* docs: small changes to flow (#4297)\r\n\r\n* chore: remove the eah announcement (#4295)\r\n\r\n* docs: polish sandbox tutorial (#4286)\r\n\r\n* docs: add Hub to ecosys (#4300)\r\n\r\n* docs: minor clean up on 3.0 branch (#4301)\r\n\r\n* docs: apply suggestions from code review\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: migration attributes (#4299)\r\n\r\n* docs: use post and not search (#4302)\r\n\r\n* docs: very small change (#4304)\r\n\r\n* docs: add section for extending the http api (#4303)\r\n\r\n* docs: update docs/how-to/sandbox.md\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: restructure content and layout (#4305)\r\n\r\n* docs: why to use flow (#4308)\r\n\r\n* docs: unify docarray import (#4310)\r\n\r\n* docs(readme): polish (#4307)\r\n\r\n* docs: fix snippets (#4311)\r\n\r\n* docs: apply suggestions from code review\r\n\r\nCo-authored-by: CatStark \r\n\r\n* docs: add jina new (#4313)\r\n\r\n* docs: add jina new\r\n\r\n* docs: add jina new\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: rephrase dockerize section (#4298)\r\n\r\n* docs: fix create flow images (#4314)\r\n\r\n* docs: restructure 2 (#4315)\r\n\r\n* docs: keep clean code (#4316)\r\n\r\n* docs: restructure 2\r\n\r\n* docs: restructure 2\r\n\r\n* docs: restructure 2\r\n\r\n* docs: update docs/fundamentals/flow/index.md\r\n\r\n* docs: restructure 2\r\n\r\n* docs: restructure 2\r\n\r\n* docs: restructure 2\r\n\r\n* docs: restructure 2\r\n\r\n* docs: restructure 2\r\n\r\n* docs: add minimum working example (#4321)\r\n\r\n* docs: create landing page for how-to's (#4312)\r\n\r\n* docs(how-to): create landing page\r\n\r\n* docs: add links to executor how-tos\r\n\r\n* docs: add links to deployment how-tos\r\n\r\n* docs: shorten scaling-out description\r\n\r\n* docs: add info box\r\n\r\n* docs(sandbox): optimize pic (#4324)\r\n\r\n* docs: fix inconsistent definition for executor and flow (#4322)\r\n\r\n* docs: fix inconsistent definitions\r\n\r\n* docs: fix inconsistent definitions\r\n\r\n* docs: restructure 2\r\n\r\n* fix(docs): yaml formating (#4327)\r\n\r\n* docs: restructure 2\r\n\r\n* docs: fix formatting (#4329)\r\n\r\n* chore: update banner for docs (#4330)\r\n\r\n* docs: review readme 2 (#4323)\r\n\r\n* docs(sandbox): optimize sandbox pic (#4331)\r\n\r\n* docs: remove jinad from install section (#4333)\r\n\r\n* docs: apply suggestions from code review\r\n\r\nCo-authored-by: CatStark \r\n\r\n* docs: restructure 2\r\n\r\n* docs: restructure 2\r\n\r\n* docs: add what is jina (#4332)\r\n\r\n* docs: add what is jina\r\n\r\n* docs: remove comparing to alternatives document\r\n\r\n* docs: update docs/get-started/what-is-jina.md\r\n\r\nCo-authored-by: cristian \r\n\r\n* docs: update docs/get-started/what-is-jina.md\r\n\r\nCo-authored-by: cristian \r\n\r\n* docs: apply suggestions from code review\r\n\r\nCo-authored-by: cristian \r\n\r\n* docs: add link to docarray\r\n\r\n* docs: apply suggestions from code review\r\n\r\nCo-authored-by: Nan Wang \r\nCo-authored-by: Han Xiao \r\n\r\nCo-authored-by: cristian \r\nCo-authored-by: Nan Wang \r\nCo-authored-by: Han Xiao \r\n\r\n* fix(docs): apply black automatically (#4337)\r\n\r\n* docs: fix executor api snippet (#4339)\r\n\r\nCo-authored-by: Sami Jaghouar \r\n\r\n* docs: fix quote\r\n\r\n* fix: blackifiy readme + single quote (#4340)\r\n\r\n* docs: fix quote\r\n\r\n* docs: fix quote\r\n\r\n* docs: fix quote\r\n\r\n* docs: fix quote\r\n\r\n* docs: replace png with svg (#4334)\r\n\r\n* docs: apply suggestions from code review\r\n\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\n\r\n* docs: fix quote\r\n\r\n* docs: add highlighting and more positive phrasing (#4338)\r\n\r\n* docs: fix quote\r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* chore: apply suggestions from code review\r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\n\r\n* docs: apply suggestions from code review\r\n\r\nCo-authored-by: CatStark \r\n\r\n* docs: fix quote\r\n\r\n* chore: fix typo\r\n\r\nCo-authored-by: CatStark \r\n\r\nCo-authored-by: Alex Cureton-Griffiths \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Tobias Jacobowitz \r\nCo-authored-by: samsja <55492238+samsja@users.noreply.github.com>\r\nCo-authored-by: Johannes Messner <44071807+JohannesMessner@users.noreply.github.com>\r\nCo-authored-by: Johannes Messner \r\nCo-authored-by: Roshan Jossy \r\nCo-authored-by: Wang Bo \r\nCo-authored-by: Nan Wang \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>\r\nCo-authored-by: Han Xiao \r\nCo-authored-by: CatStark \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: cristian \r\nCo-authored-by: Han Xiao \r\nCo-authored-by: Sami Jaghouar ", "code": "def set_new_project_parser(parser=None):\n \n if not parser:\n parser = set_base_parser()\n\n parser.add_argument(\n 'name', type=str, help='The name of the project', default='hello-jina'\n )\n return parser\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 49, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 37, "n_ast_nodes": 66, "n_identifiers": 8, "random_cut": "def set_new_project_parser(parser=None):\n \n if not parser:\n parser = set_base_parser()\n\n parser.add_argument(\n 'nam" }, { "id": 6644, "commit_id": "a95f611d582a724740af772ead1fa439b3713124", "repo": "ludwig", "path": "ludwig/combiners/combiners.py", "file_name": "combiners.py", "fun_name": "get_combiner_conds", "commit_message": "fix: Naming scheme cleanup that includes: renaming `ludwig.marshmallow` module to `ludwig.validation` to avoid implicit import errors, and moving `ludwig.utils.schema` into this new module. (#1936)\n\n* Rename marshmallow/ folder to marshmallow_schema_utils/, marshmallow_schema_utils.py to utils.py (under folder), update all refs.\r\n\r\n* Rename marshmallow/ folder to marshmallow_schema_utils/, marshmallow_schema_utils.py to utils.py (under folder), update all refs.\r\n\r\n* update extract_schema\r\n\r\n* update generated files.\r\n\r\n* update manifest\r\n\r\n* rename using validation/schema_utils naming\r\n\r\n* update generated files\r\n\r\n* new naming scheme\r\n\r\n* fix imports.\r\n\r\n* rerun extract_schema", "code": "def get_combiner_conds():\n \n combiner_types = sorted(list(combiner_registry.keys()))\n conds = []\n for combiner_type in combiner_types:\n combiner_cls = combiner_registry[combiner_type]\n schema_cls = combiner_cls.get_schema_cls()\n combiner_schema = marshmallow_utils.get_custom_schema_from_marshmallow_class(schema_cls)\n combiner_props = combiner_schema[\"properties\"]\n combiner_cond = marshmallow_utils.create_cond({\"type\": combiner_type}, combiner_props)\n conds.append(combiner_cond)\n return conds\n\n\n# super class to house common properties", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 95, "n_words": 39, "vocab_size": 32, "complexity": 2, "nloc": 11, "token_counts": 76, "n_ast_nodes": 130, "n_identifiers": 18, "random_cut": "def get_combiner_conds():\n \n combiner_types = sorted(list(combiner_registry.keys()))\n conds = []\n for combiner_type in combiner_types:\n " }, { "id": 275849, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/experimental/saving_lib_test.py", "file_name": "saving_lib_test.py", "fun_name": "train_step", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def train_step(self, data):\n tf.print(train_step_message)\n x, y = data\n with tf.GradientTape() as tape:\n y_pred = self(x)\n loss = self.compiled_loss(y, y_pred)\n\n gradients = tape.gradient(loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n return {}\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 90, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 73, "n_ast_nodes": 118, "n_identifiers": 19, "random_cut": "def train_step(self, data):\n tf.print(train_step_message)\n x, y = data\n with tf.GradientTape() as tape:\n y_pred = s" }, { "id": 31265, "commit_id": "a72f1c9f5b907f96cbb7de3bbb02a1d431d34071", "repo": "transformers", "path": "src/transformers/models/longt5/modeling_flax_longt5.py", "file_name": "modeling_flax_longt5.py", "fun_name": "update_inputs_for_generation", "commit_message": "Add `LongT5` model (#16792)\n\n* Initial commit\r\n\r\n* Make some fixes\r\n\r\n* Make PT model full forward pass\r\n\r\n* Drop TF & Flax implementation, fix copies etc\r\n\r\n* Add Flax model and update some corresponding stuff\r\n\r\n* Drop some TF things\r\n\r\n* Update config and flax local attn\r\n\r\n* Add encoder_attention_type to config\r\n\r\n* .\r\n\r\n* Update docs\r\n\r\n* Do some cleansing\r\n\r\n* Fix some issues -> make style; add some docs\r\n\r\n* Fix position_bias + mask addition + Update tests\r\n\r\n* Fix repo consistency\r\n\r\n* Fix model consistency by removing flax operation over attn_mask\r\n\r\n* [WIP] Add PT TGlobal LongT5\r\n\r\n* .\r\n\r\n* [WIP] Add flax tglobal model\r\n\r\n* [WIP] Update flax model to use the right attention type in the encoder\r\n\r\n* Fix flax tglobal model forward pass\r\n\r\n* Make the use of global_relative_attention_bias\r\n\r\n* Add test suites for TGlobal model\r\n\r\n* Fix minor bugs, clean code\r\n\r\n* Fix pt-flax equivalence though not convinced with correctness\r\n\r\n* Fix LocalAttn implementation to match the original impl. + update READMEs\r\n\r\n* Few updates\r\n\r\n* Update: [Flax] improve large model init and loading #16148\r\n\r\n* Add ckpt conversion script accoring to #16853 + handle torch device placement\r\n\r\n* Minor updates to conversion script.\r\n\r\n* Typo: AutoModelForSeq2SeqLM -> FlaxAutoModelForSeq2SeqLM\r\n\r\n* gpu support + dtype fix\r\n\r\n* Apply some suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Patrick von Platen \r\n\r\n* * Remove (de)parallelize stuff\r\n* Edit shape comments\r\n* Update README.md\r\n* make fix-copies\r\n\r\n* Remove caching logic for local & tglobal attention\r\n\r\n* Apply another batch of suggestions from code review\r\n\r\n* Add missing checkpoints\r\n* Format converting scripts\r\n* Drop (de)parallelize links from longT5 mdx\r\n\r\n* Fix converting script + revert config file change\r\n\r\n* Revert \"Remove caching logic for local & tglobal attention\"\r\n\r\nThis reverts commit 2a619828f6ddc3e65bd9bb1725a12b77fa883a46.\r\n\r\n* Stash caching logic in Flax model\r\n\r\n* Make side relative bias used always\r\n\r\n* Drop caching logic in PT model\r\n\r\n* Return side bias as it was\r\n\r\n* Drop all remaining model parallel logic\r\n\r\n* Remove clamp statements\r\n\r\n* Move test files to the proper place\r\n\r\n* Update docs with new version of hf-doc-builder\r\n\r\n* Fix test imports\r\n\r\n* Make some minor improvements\r\n\r\n* Add missing checkpoints to docs\r\n* Make TGlobal model compatible with torch.onnx.export\r\n* Replace some np.ndarray with jnp.ndarray\r\n\r\n* Fix TGlobal for ONNX conversion + update docs\r\n\r\n* fix _make_global_fixed_block_ids and masked neg value\r\n\r\n* update flax model\r\n\r\n* style and quality\r\n\r\n* fix imports\r\n\r\n* remove load_tf_weights_in_longt5 from init and fix copies\r\n\r\n* add slow test for TGlobal model\r\n\r\n* typo fix\r\n\r\n* Drop obsolete is_parallelizable and one warning\r\n\r\n* Update __init__ files to fix repo-consistency\r\n\r\n* fix pipeline test\r\n\r\n* Fix some device placements\r\n\r\n* [wip]: Update tests -- need to generate summaries to update expected_summary\r\n\r\n* Fix quality\r\n\r\n* Update LongT5 model card\r\n\r\n* Update (slow) summarization tests\r\n\r\n* make style\r\n\r\n* rename checkpoitns\r\n\r\n* finish\r\n\r\n* fix flax tests\r\n\r\nCo-authored-by: phungvanduy \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: patil-suraj ", "code": "def update_inputs_for_generation(self, model_outputs, model_kwargs):\n model_kwargs[\"past_key_values\"] = model_outputs.past_key_values\n return model_kwargs\n\n\nFLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING = \n\n\noverwrite_call_docstring(\n FlaxLongT5ForConditionalGeneration, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING\n)\nappend_replace_return_docstrings(\n FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC\n)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 37, "n_words": 22, "vocab_size": 18, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 66, "n_identifiers": 14, "random_cut": "def update_inputs_for_generation(self, model_outputs, model_kwargs):\n model_kwargs[\"past_key_values\"] = model_outputs.past_key_values\n return model_kwargs\n\n\nFLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING = \n\n\noverwrite_call_docstring(\n FlaxLongT5ForConditionalGeneration, LONGT5_INPUTS_DOCSTRING + FLAX_LONGT5_CONDITIONAL_GENERATION_DOCSTRING\n)\nappend_replace_return_docstrings(\n FlaxLongT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC\n)\n" }, { "id": 248462, "commit_id": "1e453053cb12ff084fdcdc2f75c08ced274dff21", "repo": "synapse", "path": "tests/test_visibility.py", "file_name": "test_visibility.py", "fun_name": "_inject_outlier", "commit_message": "Rename storage classes (#12913)", "code": "def _inject_outlier(self) -> EventBase:\n builder = self.event_builder_factory.for_room_version(\n RoomVersions.V1,\n {\n \"type\": \"m.room.member\",\n \"sender\": \"@test:user\",\n \"state_key\": \"@test:user\",\n \"room_id\": TEST_ROOM_ID,\n \"content\": {\"membership\": \"join\"},\n },\n )\n\n event = self.get_success(builder.build(prev_event_ids=[], auth_event_ids=[]))\n event.internal_metadata.outlier = True\n self.get_success(\n self._storage_controllers.persistence.persist_event(\n event, EventContext.for_outlier(self._storage_controllers)\n )\n )\n return event\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 230, "n_words": 37, "vocab_size": 31, "complexity": 1, "nloc": 19, "token_counts": 101, "n_ast_nodes": 171, "n_identifiers": 21, "random_cut": "def _inject_outlier(self) -> EventBase:\n builder = self.event_builder_factory.for_room_version(\n RoomVersions.V1,\n {\n " }, { "id": 56890, "commit_id": "32d4fb18769d663292fb059eda1e15a8628af689", "repo": "prefect", "path": "tests/packaging/test_file_packager.py", "file_name": "test_file_packager.py", "fun_name": "test_file_packager_by_serializer", "commit_message": "Fix packager flow collisions", "code": "async def test_file_packager_by_serializer(serializer):\n packager = FilePackager(serializer=serializer)\n manifest = await packager.package(howdy)\n\n assert isinstance(manifest, FilePackageManifest)\n unpackaged_howdy = await manifest.unpackage()\n assert unpackaged_howdy(\"bro\").result() == \"howdy bro\"\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 36, "n_words": 22, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 84, "n_identifiers": 12, "random_cut": "async def test_file_packager_by_serializer(serializer):\n packager = FileP" }, { "id": 303366, "commit_id": "d5695a2d8656d2f9cb4d549c80cad331c914af1f", "repo": "core", "path": "homeassistant/components/homekit_controller/config_flow.py", "file_name": "config_flow.py", "fun_name": "_async_setup_controller", "commit_message": "Fix some homekit_controller pylint warnings and (local only) test failures (#76122)", "code": "async def _async_setup_controller(self) -> None:\n \n self.controller = await async_get_controller(self.hass)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "async def _async_setup_controller(self) -> None:\n \n self.controller = await async_get_controller(self.hass)\n" }, { "id": 257028, "commit_id": "834f8c49024063ce17a63e50a9d7cff12f1c4f91", "repo": "haystack", "path": "test/test_preprocessor.py", "file_name": "test_preprocessor.py", "fun_name": "test_remove_substrings", "commit_message": "Change return types of indexing pipeline nodes (#2342)\n\n* Change return types of file converters\r\n\r\n* Change return types of preprocessor\r\n\r\n* Change return types of crawler\r\n\r\n* Adapt utils to functions to new return types\r\n\r\n* Adapt __init__.py to new method names\r\n\r\n* Prevent circular imports\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Let DocStores' run method accept Documents\r\n\r\n* Adapt tests to new return types\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Put \"# type: ignore\" to right place\r\n\r\n* Remove id_hash_keys property from Document primitive\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Adapt tests to new return types and missing id_hash_keys property\r\n\r\n* Fix mypy\r\n\r\n* Fix mypy\r\n\r\n* Adapt PDFToTextOCRConverter\r\n\r\n* Remove id_hash_keys from RestAPI tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Rename tests\r\n\r\n* Remove redundant setting of content_type=\"text\"\r\n\r\n* Add DeprecationWarning\r\n\r\n* Add id_hash_keys to elasticsearch_index_to_document_store\r\n\r\n* Change document type from dict to Docuemnt in PreProcessor test\r\n\r\n* Fix file path in Tutorial 5\r\n\r\n* Remove added output in Tutorial 5\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix file_paths in Tutorial 9 + fix gz files in fetch_archive_from_http\r\n\r\n* Adapt tutorials to new return types\r\n\r\n* Adapt tutorial 14 to new return types\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Change assertions to HaystackErrors\r\n\r\n* Import HaystackError correctly\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_remove_substrings():\n document = Document(content=\"This is a header. Some additional text. wiki. Some emoji ✨ 🪲 Weird whitespace\\b\\b\\b.\")\n\n # check that the file contains the substrings we are about to remove\n assert \"This is a header.\" in document.content\n assert \"wiki\" in document.content\n assert \"🪲\" in document.content\n assert \"whitespace\" in document.content\n assert \"✨\" in document.content\n\n preprocessor = PreProcessor(remove_substrings=[\"This is a header.\", \"wiki\", \"🪲\"])\n documents = preprocessor.process(document)\n\n assert \"This is a header.\" not in documents[0].content\n assert \"wiki\" not in documents[0].content\n assert \"🪲\" not in documents[0].content\n assert \"whitespace\" in documents[0].content\n assert \"✨\" in documents[0].content\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 132, "n_words": 91, "vocab_size": 47, "complexity": 1, "nloc": 14, "token_counts": 112, "n_ast_nodes": 199, "n_identifiers": 9, "random_cut": "def test_remove_substrings():\n document = Document(content=\"This is a header. Some additional text. wiki. Some emoji ✨ 🪲 Weird whitespace\\b\\b\\b.\")\n\n # check that the file contains the substrings we are about to remove\n assert \"This is a header.\" in document.content\n assert \"wiki\" in document.content\n assert \"🪲\" in document.content\n assert \"whitespace\" in document.content\n assert \"✨\" in document.content\n\n preprocessor = PreProcessor(remove_substrings=[\"This is a header.\", \"wiki\", \"🪲\"])\n documents = preprocessor.process(document)\n\n assert \"This is a header.\" not in documents[0].content\n assert \"wiki\" not in documents[0].content\n assert \"🪲\" n" }, { "id": 312790, "commit_id": "0ea82bdbfb0d58b1af273e39da65cbb9e4af1015", "repo": "core", "path": "homeassistant/components/fivem/__init__.py", "file_name": "__init__.py", "fun_name": "_async_update_data", "commit_message": "Fivem integration (#65089)\n\n* Initial fivem integration setup\r\n\r\n* Use licenseKey for unique ID\r\n\r\n* Create FiveMServer class\r\n\r\n* Create FiveMStatusBinarySensor\r\n\r\n* Fix platform loading\r\n\r\n* Create sensor platform\r\n\r\n* Remove config flow tests\r\n\r\n* Update manifest.json\r\n\r\n* Use attr_ instead or properties in sensors.py\r\n\r\n* Use entry_id as unique_id\r\n\r\n* Move device info to _attr instead of property\r\n\r\n* Register callback in FiveMEntity\r\n\r\n* Create config flow tests\r\n\r\n* Add loggin to fivem\r\n\r\n* Use FiveM in config_flow\r\n\r\n* Use update_coordinator instead of dispatcher\r\n\r\n* Bump fivem-api to 0.1.2\r\n\r\n* Remove leftovers\r\n\r\n* More tests for config flow\r\n\r\n* Add component files to .coveragerc\r\n\r\n* Fix simple comments\r\n\r\n* Add gamename check to config flow\r\n\r\n* Use entity descriptions for sensors\r\n\r\n* Move extra attributes to init\r\n\r\n* Use [] instead of get() for server info\r\n\r\n* Fix error in gamename test", "code": "async def _async_update_data(self) -> dict[str, Any]:\n \n was_online = self.online\n\n try:\n server = await self._fivem.get_server()\n self.online = True\n except FiveMServerOfflineError:\n self.online = False\n\n if was_online and not self.online:\n _LOGGER.warning(\"Connection to '%s:%s' lost\", self.host, self.port)\n elif not was_online and self.online:\n _LOGGER.info(\"Connection to '%s:%s' (re-)established\", self.host, self.port)\n\n if self.online:\n players_list: list[str] = []\n for player in server.players:\n players_list.append(player.name)\n players_list.sort()\n\n resources_list = server.resources\n resources_list.sort()\n\n return {\n NAME_PLAYERS_ONLINE: len(players_list),\n NAME_PLAYERS_MAX: server.max_players,\n NAME_RESOURCES: len(resources_list),\n NAME_STATUS: self.online,\n ATTR_PLAYERS_LIST: players_list,\n ATTR_RESOURCES_LIST: resources_list,\n }\n\n raise UpdateFailed\n\n\n@dataclass", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@dataclass", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 370, "n_words": 78, "vocab_size": 60, "complexity": 8, "nloc": 28, "token_counts": 170, "n_ast_nodes": 272, "n_identifiers": 35, "random_cut": "async def _async_update_data(self) -> dict[str, Any]:\n \n was_online = self.online\n\n try:\n server = await self._fivem.get_server()\n self.online = True\n except FiveMServerOfflineError:\n self.online = False\n\n if was_online and not self.online:\n _LOGGER.warning(\"Connection to '%s:%s' lost\", self.host, self.port)\n elif not was_online and self.online:\n _LOGGER.info(\"Connection to '%s:%s' (re-)established\", self.host, self.port)\n\n if self.online:\n players_list: list[str] = []\n for player in server.players:\n players_list.append(player.name)\n players_list.sort()\n\n resources_list = server.resources\n resources_list.sort()\n\n return {\n NAME_PLAYERS_ONLINE: len(players_list),\n NAME_PLAYERS_MAX: server.max_players,\n NAME_RESOURCES: len(resources_list),\n NAME_STATUS: self.online,\n ATTR_PLAYERS" }, { "id": 155013, "commit_id": "9013f54283eb6776920ee3bf527e208a516d086d", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "swaplevel", "commit_message": "REFACTOR-#5092: Fix future warning for `set_axis` function (#5093)\n\nCo-authored-by: Vasily Litvinov \r\nSigned-off-by: Myachev ", "code": "def swaplevel(self, i=-2, j=-1, axis=0): # noqa: PR01, RT01, D200\n \n axis = self._get_axis_number(axis)\n idx = self.index if axis == 0 else self.columns\n return self.set_axis(idx.swaplevel(i, j), axis=axis)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 26, "vocab_size": 24, "complexity": 2, "nloc": 4, "token_counts": 59, "n_ast_nodes": 91, "n_identifiers": 10, "random_cut": "def swaplevel(self, i=-2, j=-1, axis=0): # noqa: PR01, RT01, D200\n " }, { "id": 85610, "commit_id": "f1c3fa1660fa8144b5965f0375f5abec122243bf", "repo": "sentry", "path": "tests/sentry/models/test_projectownership.py", "file_name": "test_projectownership.py", "fun_name": "test_get_autoassign_owners_no_codeowners_or_issueowners", "commit_message": "feat(issues): Store assignee integration in group activity (#38526)\n\n- When a user is assigned via slack or ms teams, add the integration to activity data\r\n- When assigned via codeowners, add the integration and rule as a string", "code": "def test_get_autoassign_owners_no_codeowners_or_issueowners(self):\n assert ProjectOwnership.get_autoassign_owners(self.project.id, {}) == (\n False,\n [],\n False,\n None,\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 69, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 31, "n_ast_nodes": 45, "n_identifiers": 6, "random_cut": "def test_get_autoassign_owners_no_codeowners_or_issueowners(self):\n assert ProjectOw" }, { "id": 88281, "commit_id": "4c9c03f8a9416b53bf74f2d77df43499973ecf89", "repo": "sentry", "path": "src/sentry/search/events/datasets/metrics.py", "file_name": "metrics.py", "fun_name": "function_converter", "commit_message": "chore(metrics): Remove tag values are strings option (#41092)\n\n- This removes the tag value option since we're now fully on using tag\r\nvalues as strings instead of indexed integers\r\n- This is needed so we can start on wildcard searching", "code": "def function_converter(self) -> Mapping[str, fields.MetricsFunction]:\n \n resolve_metric_id = {\n \"name\": \"metric_id\",\n \"fn\": lambda args: self.resolve_metric(args[\"column\"]),\n }\n\n function_converter = {\n function.name: function\n for function in [\n # Note while the discover version of apdex, count_miserable, user_misery\n # accepts arguments, because this is precomputed with tags no parameters\n # are available\n fields.MetricsFunction(\n \"apdex\",\n optional_args=[fields.NullableNumberRange(\"satisfaction\", 0, None)],\n snql_distribution=self._resolve_apdex_function,\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"avg\",\n required_args=[\n fields.MetricArg(\n \"column\",\n allowed_columns=constants.METRIC_DURATION_COLUMNS,\n )\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"avgIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n args[\"metric_id\"],\n ],\n ),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_miserable\",\n required_args=[\n fields.MetricArg(\n \"column\", allowed_columns=[\"user\"], allow_custom_measurements=False\n )\n ],\n optional_args=[fields.NullableNumberRange(\"satisfaction\", 0, None)],\n calculated_args=[resolve_metric_id],\n snql_set=self._resolve_count_miserable_function,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_unparameterized_transactions\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"equals\",\n [\n self.builder.column(\"transaction\"),\n self.builder.resolve_tag_value(\"<< unparameterized >>\"),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n # Not yet exposed, need to add far more validation around tag&value\n private=True,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_null_transactions\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"equals\",\n [\n self.builder.column(\"transaction\"),\n \"\",\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n private=True,\n ),\n fields.MetricsFunction(\n \"count_has_transaction_name\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"and\",\n [\n Function(\n \"notEquals\",\n [\n self.builder.column(\"transaction\"),\n \"\",\n ],\n ),\n Function(\n \"notEquals\",\n [\n self.builder.column(\"transaction\"),\n self.builder.resolve_tag_value(\n \"<< unparameterized >>\"\n ),\n ],\n ),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n private=True,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"user_misery\",\n optional_args=[\n fields.NullableNumberRange(\"satisfaction\", 0, None),\n fields.with_default(\n constants.MISERY_ALPHA, fields.NumberRange(\"alpha\", 0, None)\n ),\n fields.with_default(\n constants.MISERY_BETA, fields.NumberRange(\"beta\", 0, None)\n ),\n ],\n calculated_args=[],\n snql_set=self._resolve_user_misery_function,\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"p50\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.5\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p75\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.75\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p90\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.90\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p95\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.95\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p99\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.99\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p100\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"max\",\n required_args=[\n fields.MetricArg(\"column\"),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"maxIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n ),\n fields.MetricsFunction(\n \"min\",\n required_args=[\n fields.MetricArg(\"column\"),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"minIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n ),\n fields.MetricsFunction(\n \"sum\",\n required_args=[\n fields.MetricArg(\"column\"),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"sumIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n ),\n fields.MetricsFunction(\n \"sumIf\",\n required_args=[\n fields.ColumnTagArg(\"if_col\"),\n fields.FunctionArg(\"if_val\"),\n ],\n calculated_args=[\n {\n \"name\": \"resolved_val\",\n \"fn\": lambda args: self.builder.resolve_tag_value(args[\"if_val\"]),\n }\n ],\n snql_counter=lambda args, alias: Function(\n \"sumIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [args[\"if_col\"], args[\"resolved_val\"]]),\n ],\n alias,\n ),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"percentile\",\n required_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n fields.NumberRange(\"percentile\", 0, 1),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=self._resolve_percentile,\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"count_unique\",\n required_args=[\n fields.MetricArg(\n \"column\", allowed_columns=[\"user\"], allow_custom_measurements=False\n )\n ],\n calculated_args=[resolve_metric_id],\n snql_set=lambda args, alias: Function(\n \"uniqIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [Column(\"metric_id\"), args[\"metric_id\"]]),\n ],\n alias,\n ),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"uniq\",\n snql_set=lambda args, alias: Function(\n \"uniq\",\n [Column(\"value\")],\n alias,\n ),\n ),\n fields.MetricsFunction(\n \"uniqIf\",\n required_args=[\n fields.ColumnTagArg(\"if_col\"),\n fields.FunctionArg(\"if_val\"),\n ],\n calculated_args=[\n {\n \"name\": \"resolved_val\",\n \"fn\": lambda args: self.builder.resolve_tag_value(args[\"if_val\"]),\n }\n ],\n snql_set=lambda args, alias: Function(\n \"uniqIf\",\n [\n Column(\"value\"),\n Function(\"equals\", [args[\"if_col\"], args[\"resolved_val\"]]),\n ],\n alias,\n ),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n ],\n alias,\n ),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_web_vitals\",\n required_args=[\n fields.MetricArg(\n \"column\",\n allowed_columns=[\n \"measurements.fp\",\n \"measurements.fcp\",\n \"measurements.lcp\",\n \"measurements.fid\",\n \"measurements.cls\",\n ],\n allow_custom_measurements=False,\n ),\n fields.SnQLStringArg(\n \"quality\", allowed_strings=[\"good\", \"meh\", \"poor\", \"any\"]\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=self._resolve_web_vital_function,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"epm\",\n snql_distribution=lambda args, alias: Function(\n \"divide\",\n [\n Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n ],\n ),\n Function(\"divide\", [args[\"interval\"], 60]),\n ],\n alias,\n ),\n optional_args=[fields.IntervalDefault(\"interval\", 1, None)],\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"eps\",\n snql_distribution=lambda args, alias: Function(\n \"divide\",\n [\n Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n ],\n ),\n args[\"interval\"],\n ],\n alias,\n ),\n optional_args=[fields.IntervalDefault(\"interval\", 1, None)],\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"failure_count\",\n snql_distribution=self._resolve_failure_count,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"failure_rate\",\n snql_distribution=lambda args, alias: Function(\n \"divide\",\n [\n self._resolve_failure_count(args),\n Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n default_result_type=\"percentage\",\n ),\n fields.MetricsFunction(\n \"histogram\",\n required_args=[fields.MetricArg(\"column\")],\n calculated_args=[resolve_metric_id],\n snql_distribution=self._resolve_histogram_function,\n default_result_type=\"number\",\n private=True,\n ),\n ]\n }\n\n for alias, name in constants.FUNCTION_ALIASES.items():\n if name in function_converter:\n function_converter[alias] = function_converter[name].alias_as(alias)\n\n return function_converter\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 28, "n_whitespaces": 13912, "n_words": 736, "vocab_size": 198, "complexity": 4, "nloc": 546, "token_counts": 2117, "n_ast_nodes": 3289, "n_identifiers": 52, "random_cut": "def function_converter(self) -> Mapping[str, fields.MetricsFunction]:\n \n resolve_metric_id = {\n \"name\": \"metric_id\",\n \"fn\": lambda args: self.resolve_metric(args[\"column\"]),\n }\n\n function_converter = {\n function.name: function\n for function in [\n # Note while the discover version of apdex, count_miserable, user_misery\n # accepts arguments, because this is precomputed with tags no parameters\n # are available\n fields.MetricsFunction(\n \"apdex\",\n optional_args=[fields.NullableNumberRange(\"satisfaction\", 0, None)],\n snql_distribution=self._resolve_apdex_function,\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"avg\",\n required_args=[\n fields.MetricArg(\n \"column\",\n allowed_columns=constants.METRIC_DURATION_COLUMNS,\n )\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: Function(\n \"avgIf\",\n [\n Column(\"value\"),\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n args[\"metric_id\"],\n ],\n ),\n ],\n alias,\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_miserable\",\n required_args=[\n fields.MetricArg(\n \"column\", allowed_columns=[\"user\"], allow_custom_measurements=False\n )\n ],\n optional_args=[fields.NullableNumberRange(\"satisfaction\", 0, None)],\n calculated_args=[resolve_metric_id],\n snql_set=self._resolve_count_miserable_function,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_unparameterized_transactions\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"equals\",\n [\n self.builder.column(\"transaction\"),\n self.builder.resolve_tag_value(\"<< unparameterized >>\"),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n # Not yet exposed, need to add far more validation around tag&value\n private=True,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"count_null_transactions\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"equals\",\n [\n self.builder.column(\"transaction\"),\n \"\",\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n private=True,\n ),\n fields.MetricsFunction(\n \"count_has_transaction_name\",\n snql_distribution=lambda args, alias: Function(\n \"countIf\",\n [\n Column(\"value\"),\n Function(\n \"and\",\n [\n Function(\n \"equals\",\n [\n Column(\"metric_id\"),\n self.resolve_metric(\"transaction.duration\"),\n ],\n ),\n Function(\n \"and\",\n [\n Function(\n \"notEquals\",\n [\n self.builder.column(\"transaction\"),\n \"\",\n ],\n ),\n Function(\n \"notEquals\",\n [\n self.builder.column(\"transaction\"),\n self.builder.resolve_tag_value(\n \"<< unparameterized >>\"\n ),\n ],\n ),\n ],\n ),\n ],\n ),\n ],\n alias,\n ),\n private=True,\n default_result_type=\"integer\",\n ),\n fields.MetricsFunction(\n \"user_misery\",\n optional_args=[\n fields.NullableNumberRange(\"satisfaction\", 0, None),\n fields.with_default(\n constants.MISERY_ALPHA, fields.NumberRange(\"alpha\", 0, None)\n ),\n fields.with_default(\n constants.MISERY_BETA, fields.NumberRange(\"beta\", 0, None)\n ),\n ],\n calculated_args=[],\n snql_set=self._resolve_user_misery_function,\n default_result_type=\"number\",\n ),\n fields.MetricsFunction(\n \"p50\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._resolve_percentile(\n args, alias, 0.5\n ),\n result_type_fn=self.reflective_result_type(),\n default_result_type=\"duration\",\n ),\n fields.MetricsFunction(\n \"p75\",\n optional_args=[\n fields.with_default(\n \"transaction.duration\",\n fields.MetricArg(\n \"column\", allowed_columns=constants.METRIC_DURATION_COLUMNS\n ),\n ),\n ],\n calculated_args=[resolve_metric_id],\n snql_distribution=lambda args, alias: self._re" }, { "id": 149740, "commit_id": "111b04c9e65668067646265e614326f81aa1bf1c", "repo": "freqtrade", "path": "tests/exchange/test_ccxt_compat.py", "file_name": "test_ccxt_compat.py", "fun_name": "test_ccxt__async_get_candle_history", "commit_message": "Okx - conditional candle-length", "code": "def test_ccxt__async_get_candle_history(self, exchange):\n exchange, exchangename = exchange\n # For some weired reason, this test returns random lengths for bittrex.\n if not exchange._ft_has['ohlcv_has_history'] or exchangename == 'bittrex':\n return\n pair = EXCHANGES[exchangename]['pair']\n timeframe = EXCHANGES[exchangename]['timeframe']\n candle_type = CandleType.SPOT\n timeframe_ms = timeframe_to_msecs(timeframe)\n now = timeframe_to_prev_date(\n timeframe, datetime.now(timezone.utc))\n for offset in (360, 120, 30, 10, 5, 2):\n since = now - timedelta(days=offset)\n since_ms = int(since.timestamp() * 1000)\n\n res = exchange.loop.run_until_complete(exchange._async_get_candle_history(\n pair=pair,\n timeframe=timeframe,\n since_ms=since_ms,\n candle_type=candle_type\n )\n )\n assert res\n assert res[0] == pair\n assert res[1] == timeframe\n assert res[2] == candle_type\n candles = res[3]\n candle_count = exchange.ohlcv_candle_limit(timeframe, candle_type, since_ms) * 0.9\n candle_count1 = (now.timestamp() * 1000 - since_ms) // timeframe_ms\n assert len(candles) >= min(candle_count, candle_count1)\n assert candles[0][0] == since_ms or (since_ms + timeframe_ms)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 420, "n_words": 118, "vocab_size": 83, "complexity": 5, "nloc": 29, "token_counts": 225, "n_ast_nodes": 342, "n_identifiers": 35, "random_cut": "def test_ccxt__async_get_candle_history(self, exchange):\n exchange, exchangename = exchange\n # For some weired reason, this test returns random lengths for bittrex.\n if not exchange._ft_has['ohlcv_has_history'] or exchangename == 'bittrex':\n return\n pair = EXCHANGES[exchangename]['pair']\n timeframe = EXCHANGES[exchangename]['timeframe']\n candle_type = CandleType.SPOT\n timeframe_ms = timeframe_to_msecs(timeframe)\n now = timeframe_to_prev_date(\n timeframe, datetime.now(timezone.utc))\n for offset in (360, 120, 30, 10, 5, 2):\n since = now - timedelta(days=offset)\n since_ms = int(since.timestamp() * 1000)\n\n res = exchange.loop.run_until_complete(exchange._async_get_candle_history(\n pair=pair,\n timeframe=timeframe,\n since_ms=since_ms,\n candle_type=candle_type\n )\n )\n assert res\n assert res[0] == pair\n assert res[1] == timeframe\n assert res[2] == candle_type\n candles = res[3]\n candle_count = exchange.ohlcv_candle_limit(timeframe, candle_type, since_ms) * 0.9\n candle_count1 = (now.timestamp() * 1000 - since_ms) // " }, { "id": 144958, "commit_id": "606e2b2cde89a4869129dbca907bc14a7a9d1197", "repo": "ray", "path": "python/ray/_private/runtime_env/_clonevirtualenv.py", "file_name": "_clonevirtualenv.py", "fun_name": "fix_symlink_if_necessary", "commit_message": "Update license for MLflow's conda utils and virtualenv-clone (#22402)\n\nWhen we vendor third-party code, we should update LICENSE file. Previously we vendored two pieces of code:\r\n- conda utilities from MLflow\r\n- virtualenv-clone\r\nBut we only included the attribution in the relevant source files, not in our LICENSE file. This PR adds the necessary info to our LICENSE file.", "code": "def fix_symlink_if_necessary(src_dir, dst_dir):\n # sometimes the source virtual environment has symlinks that point to itself\n # one example is $OLD_VIRTUAL_ENV/local/lib points to $OLD_VIRTUAL_ENV/lib\n # this function makes sure\n # $NEW_VIRTUAL_ENV/local/lib will point to $NEW_VIRTUAL_ENV/lib\n # usually this goes unnoticed unless one tries to upgrade a package though pip, so this bug is hard to find.\n logger.info(\"scanning for internal symlinks that point to the original virtual env\")\n for dirpath, dirnames, filenames in os.walk(dst_dir):\n for a_file in itertools.chain(filenames, dirnames):\n full_file_path = os.path.join(dirpath, a_file)\n if os.path.islink(full_file_path):\n target = os.path.realpath(full_file_path)\n if target.startswith(src_dir):\n new_target = target.replace(src_dir, dst_dir)\n logger.debug(\"fixing symlink in %s\" % (full_file_path,))\n os.remove(full_file_path)\n os.symlink(new_target, full_file_path)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 256, "n_words": 101, "vocab_size": 75, "complexity": 5, "nloc": 12, "token_counts": 114, "n_ast_nodes": 184, "n_identifiers": 25, "random_cut": "def fix_symlink_if_necessary(src_dir, dst_dir):\n # sometimes the source virtual environment has symlinks that point to itself\n # one example is $OLD_VIRTUAL_ENV/local/lib points to $OLD_VIRTUAL_ENV/lib\n # this function makes sure\n # $NEW_VIRTUAL_ENV/local/lib will point to $NEW_VIRTUAL_ENV/lib\n # usually this goes unnoticed unless one tries to upgrade a package though pip, so this bug is hard to find.\n logger.inf" }, { "id": 87974, "commit_id": "618ae63cf2ba419e44e79ce578d88e8b062d7dd9", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events.py", "file_name": "test_organization_events.py", "fun_name": "test_issue_in_columns", "commit_message": "fix(tests): Discover backend test flakes (#41057)\n\n- `MetricsQueryBuilder` wasn't sorting environment tags\r\n- Consistent timestamps on test_organization_events\r\n- Updated `apply_feature_flag_on_cls` to only apply decorator on the run\r\nmethod", "code": "def test_issue_in_columns(self):\n project1 = self.create_project()\n project2 = self.create_project()\n event1 = self.store_event(\n data={\n \"event_id\": \"a\" * 32,\n \"transaction\": \"/example\",\n \"message\": \"how to make fast\",\n \"timestamp\": self.ten_mins_ago_iso,\n \"fingerprint\": [\"group_1\"],\n },\n project_id=project1.id,\n )\n event2 = self.store_event(\n data={\n \"event_id\": \"b\" * 32,\n \"transaction\": \"/example\",\n \"message\": \"how to make fast\",\n \"timestamp\": self.ten_mins_ago_iso,\n \"fingerprint\": [\"group_1\"],\n },\n project_id=project2.id,\n )\n\n features = {\"organizations:discover-basic\": True, \"organizations:global-views\": True}\n query = {\"field\": [\"id\", \"issue\"], \"orderby\": [\"id\"]}\n response = self.do_request(query, features=features)\n assert response.status_code == 200, response.content\n data = response.data[\"data\"]\n assert len(data) == 2\n assert data[0][\"id\"] == event1.event_id\n assert data[0][\"issue.id\"] == event1.group_id\n assert data[0][\"issue\"] == event1.group.qualified_short_id\n assert data[1][\"id\"] == event2.event_id\n assert data[1][\"issue.id\"] == event2.group_id\n assert data[1][\"issue\"] == event2.group.qualified_short_id\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 446, "n_words": 105, "vocab_size": 65, "complexity": 1, "nloc": 35, "token_counts": 248, "n_ast_nodes": 422, "n_identifiers": 23, "random_cut": "def test_issue_in_columns(self):\n project1 = self.create_project()\n project2 = self.create_project()\n event1 = self.store_event(\n data={\n \"event_id\": \"a\" * 32,\n \"transaction\": \"/example\",\n \"message\": \"how to ma" }, { "id": 156511, "commit_id": "1e783d9a714160e968936cb22d54d085959ab09e", "repo": "dask", "path": "dask/typing.py", "file_name": "typing.py", "fun_name": "__dask_graph__", "commit_message": "Collection Protocol (#8674)\n\n[PEP 544](https://www.python.org/dev/peps/pep-0544/) introduces the `Protocol` class to the `typing` module in Python 3.8 (the soon be the minimum supported version, https://github.com/dask/community/issues/213). Writing new Dask collections for [dask-awkward](https://github.com/ContinuumIO/dask-awkward/) has had me thinking about working on a `DaskCollection` protocol. I imagine the benefits to be:\r\n\r\n- usage with static type checkers\r\n - other activity in this area at\r\n - #8295 \r\n - #8706 \r\n - #8854\r\n - Python supporting IDEs take advantage of typing\r\n- self-documenting; some improvements to [the custom collections page](https://docs.dask.org/en/latest/custom-collections.html) of the docs. The protocol docs can be autogenerated and added to that page.\r\n- purely opt-in feature\r\n\r\nThe `typing.runtime_checkable` decorator allows use of `isinstance(x, DaskCollection)` in any code base\r\nthat uses Dask collections; for example:\r\n\r\n```python\r\n>>> from dask.typing import DaskCollection\r\n>>> import dask.array as da\r\n>>> x = da.zeros((10, 3))\r\n>>> isinstance(x, DaskCollection)\r\nTrue\r\n```\r\n(though this is an order of magnitude slower than `dask.base.is_dask_collection` which only checks for `x.__dask_graph__() is not None`; static typing checking & built-in interface documentation are the core benefits IMO)\r\n\r\nSomething else that came up in the brief discussion on a call last week was having `{Scheduler,Worker,Nanny}Plugin` protocols in `distributed`; and perhaps those are better places to start introducing protocols to Dask since on the user side typically more folks would write plugins than new collections.", "code": "def __dask_graph__(self) -> Mapping:\n \n raise NotImplementedError(\"Inheriting class must implement this method.\")\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 17, "token_counts": 13, "n_ast_nodes": 26, "n_identifiers": 4, "random_cut": "def __dask_graph__(self) -> Mapping:\n \n " }, { "id": 116827, "commit_id": "add8253659f2a16152fa513ae310b4b6b5242e1e", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/databend_handler/tests/test_databend_handler.py", "file_name": "test_databend_handler.py", "fun_name": "test_1_native_query_show_dbs", "commit_message": "added the unit tests for the handler", "code": "def test_1_native_query_show_dbs(self):\n result = self.handler.native_query(\"SHOW DATABASES;\")\n assert result.type is not RESPONSE_TYPE.ERROR\n\n # def test_2_wrong_native_query_returns_error(self):\n # result = self.handler.native_query(\"SHOW DATABASE1S;\")\n # assert result.type is RESPONSE_TYPE.ERROR\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 54, "n_words": 24, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 43, "n_identifiers": 8, "random_cut": "def test_1_native_query_show_dbs(self):\n result = self.handler.native_query(\"SHOW DATABASES;\")\n assert result.type is not RESPONSE_TYPE.ERROR\n\n # def test_2_wrong_native_query_returns_error(self):\n # result = self.handler.native_query(\"SHOW DATABASE1S;\")\n # assert result.type is RESPONSE_TY" }, { "id": 268743, "commit_id": "cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc", "repo": "ansible", "path": "test/lib/ansible_test/_internal/host_profiles.py", "file_name": "host_profiles.py", "fun_name": "setup", "commit_message": "ansible-test - Improve container management. (#78550)\n\nSee changelogs/fragments/ansible-test-container-management.yml for details.", "code": "def setup(self) -> None:\n \n bootstrapper = BootstrapDocker(\n controller=self.controller,\n python_versions=[self.python.version],\n ssh_key=SshKey(self.args),\n )\n\n setup_sh = bootstrapper.get_script()\n shell = setup_sh.splitlines()[0][2:]\n\n try:\n docker_exec(self.args, self.container_name, [shell], data=setup_sh, capture=False)\n except SubprocessError:\n display.info(f'Checking container \"{self.container_name}\" logs...')\n docker_logs(self.args, self.container_name)\n raise\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 158, "n_words": 32, "vocab_size": 30, "complexity": 2, "nloc": 15, "token_counts": 104, "n_ast_nodes": 169, "n_identifiers": 23, "random_cut": "def setup(self) -> None:\n \n bootstrapper = BootstrapDocker(\n controller=self.controller,\n python_versions=[self.python.version],\n ssh_key=SshKey(self.args),\n )\n\n setup_sh = bootstrapper.get_script()\n " }, { "id": 77878, "commit_id": "7b9531f9910ec8624ee66772805438e9f3084d3d", "repo": "wagtail", "path": "wagtail/snippets/tests.py", "file_name": "tests.py", "fun_name": "get_url", "commit_message": "Use ReportView for Snippets HistoryView and use filterset", "code": "def get_url(self, snippet, url_name, args=None):\n app_label = snippet._meta.app_label\n model_name = snippet._meta.model_name\n view_name = f\"wagtailsnippets_{app_label}_{model_name}:{url_name}\"\n if args is None:\n args = [quote(snippet.pk)]\n return reverse(view_name, args=args)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 69, "n_words": 24, "vocab_size": 20, "complexity": 2, "nloc": 7, "token_counts": 55, "n_ast_nodes": 96, "n_identifiers": 12, "random_cut": "def get_url(self, snippet, url_name, args=None):\n app_label = snippet._meta.app_label\n model_name = snippet._meta.model_name\n view_name = f\"wagtailsnippets_{app_label}_{model_name}:{url_name}\"\n if args is None:\n args = [quote(snippet.pk)]\n return reverse(view_name, args=args)\n" }, { "id": 64648, "commit_id": "00bfee97c766e771a1ab0b57d223ba9e87b70e9a", "repo": "erpnext", "path": "erpnext/accounts/report/cash_flow/custom_cash_flow.py", "file_name": "custom_cash_flow.py", "fun_name": "get_accounts_in_mappers", "commit_message": "refactor: convert raw sql to frappe.qb", "code": "def get_accounts_in_mappers(mapping_names):\n\tcfm = frappe.qb.DocType('Cash Flow Mapping')\n\tcfma = frappe.qb.DocType('Cash Flow Mapping Accounts')\n\tresult = (\n\t\tfrappe.qb\n\t\t\t.select(\n\t\t\t\tcfma.name, cfm.label, cfm.is_working_capital,\n\t\t\t\tcfm.is_income_tax_liability, cfm.is_income_tax_expense,\n\t\t\t\tcfm.is_finance_cost, cfm.is_finance_cost_adjustment, cfma.account\n\t\t\t)\n\t\t\t.from_(cfm)\n\t\t\t.join(cfma)\n\t\t\t.on(cfm.name == cfma.parent)\n\t\t\t.where(cfma.parent.isin(mapping_names))\n\t\t).run()\n\n\treturn result\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 20, "n_words": 36, "vocab_size": 31, "complexity": 1, "nloc": 16, "token_counts": 106, "n_ast_nodes": 164, "n_identifiers": 24, "random_cut": "def get_accounts_in_mappers(mapping_names):\n\tcfm = frappe.qb.DocType('Cash Flow Mapping')\n\tcfma = frappe.qb.DocType('Cash Flow Mapping Accounts')\n\tresult = (\n\t\tfrappe.qb\n\t\t\t.select(\n\t\t\t\tcfma.name, cfm.label, cfm.is_working_cap" }, { "id": 96791, "commit_id": "5e1cb0e215c061e13ec1262a814450a33d49a398", "repo": "sentry", "path": "tests/sentry/search/events/test_builder.py", "file_name": "test_builder.py", "fun_name": "_metric_conditions", "commit_message": "ref(mep): Some cleanup to the metric query builder (#32139)\n\n- This adds metric_id to the search conditions based on the aggregates\r\n added so that there's a top level filter instead of just the aggregate\r\n -if combinator filters. This should help with query performance\r\n- This also removes the combinator&merge from query construction since\r\n snuba can handle this for us, which makes the functions a bit cleaner", "code": "def _metric_conditions(metrics) -> List[Condition]:\n return [\n Condition(\n Column(\"metric_id\"),\n Op.IN,\n sorted(indexer.resolve(constants.METRICS_MAP[metric]) for metric in metrics),\n )\n ]\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 68, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 8, "token_counts": 44, "n_ast_nodes": 67, "n_identifiers": 13, "random_cut": "def _metric_conditions(metrics) -> List[Condition]:\n return [\n Condition(\n Column(\"metr" }, { "id": 14462, "commit_id": "594effa279668bd955e98f1cd5c036b37d3bbd40", "repo": "pydantic", "path": "tests/test_main.py", "file_name": "test_main.py", "fun_name": "test_nullable_strings_fails", "commit_message": "Switching to `pydantic_core` (#4516)\n\n* working on core schema generation\r\n\r\n* adapting main.py\r\n\r\n* getting tests to run\r\n\r\n* fix tests\r\n\r\n* disable pyright, fix mypy\r\n\r\n* moving to class-based model generation\r\n\r\n* working on validators\r\n\r\n* change how models are created\r\n\r\n* start fixing test_main.py\r\n\r\n* fixing mypy\r\n\r\n* SelfType\r\n\r\n* recursive models working, more tests fixed\r\n\r\n* fix tests on <3.10\r\n\r\n* get docs build to pass\r\n\r\n* starting to cleanup types.py\r\n\r\n* starting works on custom types\r\n\r\n* working on using annotated-types\r\n\r\n* using annoated types for constraints\r\n\r\n* lots of cleanup, fixing network tests\r\n\r\n* network tests passing :tada:\r\n\r\n* working on types\r\n\r\n* working on types and cleanup\r\n\r\n* fixing UUID type, restructing again\r\n\r\n* more types and newer pydantic-core\r\n\r\n* working on Iterable\r\n\r\n* more test_types tests\r\n\r\n* support newer pydantic-core, fixing more test_types.py\r\n\r\n* working through more test_types.py\r\n\r\n* test_types.py at last passing locally :tada:\r\n\r\n* fixing more tests in test_types.py\r\n\r\n* fix datetime_parse tests and linting\r\n\r\n* get tests running again, rename to test_datetime.py\r\n\r\n* renaming internal modules\r\n\r\n* working through mypy errors\r\n\r\n* fixing mypy\r\n\r\n* refactoring _generate_schema.py\r\n\r\n* test_main.py passing\r\n\r\n* uprev deps\r\n\r\n* fix conftest and linting?\r\n\r\n* importing Annotated\r\n\r\n* ltining\r\n\r\n* import Annotated from typing_extensions\r\n\r\n* fixing 3.7 compatibility\r\n\r\n* fixing tests on 3.9\r\n\r\n* fix linting\r\n\r\n* fixing SecretField and 3.9 tests\r\n\r\n* customising get_type_hints\r\n\r\n* ignore warnings on 3.11\r\n\r\n* spliting repr out of utils\r\n\r\n* removing unused bits of _repr, fix tests for 3.7\r\n\r\n* more cleanup, removing many type aliases\r\n\r\n* clean up repr\r\n\r\n* support namedtuples and typeddicts\r\n\r\n* test is_union\r\n\r\n* removing errors, uprev pydantic-core\r\n\r\n* fix tests on 3.8\r\n\r\n* fixing private attributes and model_post_init\r\n\r\n* renaming and cleanup\r\n\r\n* remove unnecessary PydanticMetadata inheritance\r\n\r\n* fixing forward refs and mypy tests\r\n\r\n* fix signatures, change how xfail works\r\n\r\n* revert mypy tests to 3.7 syntax\r\n\r\n* correct model title\r\n\r\n* try to fix tests\r\n\r\n* fixing ClassVar forward refs\r\n\r\n* uprev pydantic-core, new error format\r\n\r\n* add \"force\" argument to model_rebuild\r\n\r\n* Apply suggestions from code review\r\n\r\nSuggestions from @tiangolo and @hramezani :pray:\r\n\r\nCo-authored-by: Hasan Ramezani \r\nCo-authored-by: Sebastián Ramírez \r\n\r\n* more suggestions from @tiangolo\r\n\r\n* extra -> json_schema_extra on Field\r\n\r\nCo-authored-by: Hasan Ramezani \r\nCo-authored-by: Sebastián Ramírez ", "code": "def test_nullable_strings_fails(NoneCheckModel):\n with pytest.raises(ValidationError) as exc_info:\n NoneCheckModel(\n required_str_value=None,\n required_str_none_value=None,\n required_bytes_value=None,\n required_bytes_none_value=None,\n )\n assert exc_info.value.errors() == [\n {\n 'type': 'string_type',\n 'loc': ('required_str_value',),\n 'msg': 'Input should be a valid string',\n 'input': None,\n },\n {\n 'type': 'bytes_type',\n 'loc': ('required_bytes_value',),\n 'msg': 'Input should be a valid bytes',\n 'input': None,\n },\n ]\n\n\n@pytest.fixture(name='ParentModel', scope='session')", "url": "https://github.com/pydantic/pydantic.git", "language": "Python", "ast_errors": "@pytest.fixture(name='ParentModel', scope='session')", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 230, "n_words": 49, "vocab_size": 37, "complexity": 1, "nloc": 22, "token_counts": 89, "n_ast_nodes": 180, "n_identifiers": 15, "random_cut": "def test_nullable_strings_fails(NoneCheckModel):\n with pytest.raises(ValidationError) as exc_info:\n NoneCheckModel(\n required_str_value=None,\n required_str_none_value=None,\n required_bytes_value=None,\n required_bytes_none_value=None,\n )\n assert exc_info.value.errors() == [\n {\n 'type': 'string_type',\n 'loc': ('required_str_value',),\n 'msg': 'Input should be a valid string',\n 'input': None,\n },\n {\n 'type': 'bytes_type',\n 'loc': ('required_bytes_value',),\n 'msg': 'Input should be a valid bytes',\n 'input': None,\n },\n ]\n\n\n@pytest.fixture(name='Pare" }, { "id": 266778, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/host_configs.py", "file_name": "host_configs.py", "fun_name": "apply_defaults", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None\n \n assert isinstance(defaults, PosixCompletionConfig)\n\n super().apply_defaults(context, defaults)\n\n self.python = self.python or NativePythonConfig()\n self.python.apply_defaults(context, defaults)\n\n\n@dataclasses.dataclass", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "@dataclasses.dataclass", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 58, "n_words": 23, "vocab_size": 21, "complexity": 2, "nloc": 5, "token_counts": 48, "n_ast_nodes": 84, "n_identifiers": 11, "random_cut": "def apply_defaults(self, context, defaults): # type: (HostContext, CompletionConfig) -> None\n \n assert isinstance(defaults, PosixCompletionConfig)\n\n super().apply_defaults(context, defaults)\n\n self.python = self.python or NativePythonConfig()\n self.python.apply_defaults(context, defaults)\n\n\n@dataclasses.data" }, { "id": 53139, "commit_id": "fa64dff0102537b3d249af16c7ea7821982195dd", "repo": "prefect", "path": "tests/test_logging.py", "file_name": "test_logging.py", "fun_name": "test_flush_event_is_cleared", "commit_message": "Implement `flush(block: bool ...)`\n\nPreviously, this always blocked. The new implementaiton is non-blocking, but we need to block in tests so the data is present for assertions", "code": "def test_flush_event_is_cleared(self, worker):\n worker._flush_event = MagicMock(return_val=False)\n with temporary_settings(PREFECT_LOGGING_ORION_BATCH_INTERVAL=\"5\"):\n worker.start()\n worker.flush(block=True)\n\n worker._flush_event.wait.assert_called_with(5)\n worker._flush_event.clear.assert_called()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 61, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 7, "token_counts": 57, "n_ast_nodes": 97, "n_identifiers": 15, "random_cut": "def test_flush_event_is_cleared(self, worker):\n worker._flush_event = MagicMock(return_val=False)\n with temporary_settings(PREFECT_LOGGING_ORION_BATCH_INTERVAL=\"5\"):\n worker.start()\n worker.flush(block=True)\n\n worker._flush_event.wait.assert_called_with(5)\n worker._" }, { "id": 82010, "commit_id": "d3eb2c197595c29c4a3f7b38cd609ce953009623", "repo": "awx", "path": "awx/main/tests/unit/api/test_views.py", "file_name": "test_views.py", "fun_name": "test_get_endpoints", "commit_message": "Add new flak8 rules to do some meaningful corrections", "code": "def test_get_endpoints(self, mocker):\n endpoints = [\n 'ping',\n 'config',\n # 'settings',\n 'me',\n 'dashboard',\n 'organizations',\n 'users',\n 'projects',\n 'teams',\n 'credentials',\n 'inventory',\n 'inventory_sources',\n 'groups',\n 'hosts',\n 'job_templates',\n 'jobs',\n 'ad_hoc_commands',\n 'system_job_templates',\n 'system_jobs',\n 'schedules',\n 'notification_templates',\n 'notifications',\n 'labels',\n 'unified_job_templates',\n 'unified_jobs',\n 'activity_stream',\n 'workflow_job_templates',\n 'workflow_jobs',\n ]\n view = ApiVersionRootView()\n ret = view.get(mocker.MagicMock())\n assert ret.status_code == 200\n for endpoint in endpoints:\n assert endpoint in ret.data\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 414, "n_words": 54, "vocab_size": 49, "complexity": 2, "nloc": 35, "token_counts": 99, "n_ast_nodes": 181, "n_identifiers": 12, "random_cut": "def test_get_endpoints(self, mocker):\n endpoints = [\n 'ping',\n 'config',\n # 'settings',\n 'me',\n 'dashboard',\n 'organizations',\n 'users',\n 'projects',\n 'teams',\n 'credentials',\n 'inventory',\n 'inventory_sources',\n 'groups',\n 'hosts',\n 'job_templates',\n 'jobs',\n 'ad_hoc_commands',\n 'system_job_templates',\n 'system_jobs',\n 'schedules',\n 'notification_templates',\n " }, { "id": 153176, "commit_id": "39fbc57e809c2422b250f0be58d076a22bd45031", "repo": "modin", "path": "modin/pandas/test/test_series.py", "file_name": "test_series.py", "fun_name": "test_var", "commit_message": "FEAT-#4035: Upgrade pandas support to 1.4 (#4036)\n\nCo-authored-by: Igoshev, Yaroslav \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Rehan Durrani \r\nCo-authored-by: ienkovich \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Devin Petersohn ", "code": "def test_var(data, skipna, ddof):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series.var(skipna=skipna, ddof=ddof)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_series.var(skipna=skipna, ddof=ddof)\n else:\n modin_result = modin_series.var(skipna=skipna, ddof=ddof)\n df_equals(modin_result, pandas_result)\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 78, "n_words": 28, "vocab_size": 23, "complexity": 2, "nloc": 10, "token_counts": 83, "n_ast_nodes": 132, "n_identifiers": 16, "random_cut": "def test_var(data, skipna, ddof):\n modin_series, pandas_series = create_test_series(data)\n\n try:\n pandas_result = pandas_series" }, { "id": 275837, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/regularizers_test.py", "file_name": "regularizers_test.py", "fun_name": "test_zero_regularization", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_zero_regularization(self):\n # Verifies that training with zero regularization works.\n x, y = np.ones((10, 10)), np.ones((10, 3))\n model = test_utils.get_model_from_layers(\n [\n keras.layers.Dense(\n 3, kernel_regularizer=keras.regularizers.l2(0)\n )\n ],\n input_shape=(10,),\n )\n model.compile(\"sgd\", \"mse\", run_eagerly=test_utils.should_run_eagerly())\n model.fit(x, y, batch_size=5, epochs=1)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 158, "n_words": 35, "vocab_size": 32, "complexity": 1, "nloc": 12, "token_counts": 98, "n_ast_nodes": 150, "n_identifiers": 22, "random_cut": "def test_zero_regularization(self):\n # Verifies that training with zero regularization works.\n x, y = np.ones((10, 10)), np.ones((10, 3))\n model = t" }, { "id": 241591, "commit_id": "7eab379da2fdca542849ed4ad313d0851c2271e3", "repo": "lightning", "path": "tests/trainer/test_trainer.py", "file_name": "test_trainer.py", "fun_name": "test_best_ckpt_evaluate_raises_warning_with_multiple_ckpt_callbacks", "commit_message": "Raise a warning if evaulation is triggered with best ckpt in case of multiple checkpoint callbacks (#11274)\n\nCo-authored-by: Carlos Mocholí ", "code": "def test_best_ckpt_evaluate_raises_warning_with_multiple_ckpt_callbacks():\n \n\n ckpt_callback1 = ModelCheckpoint()\n ckpt_callback1.best_model_path = \"foo_best_model.ckpt\"\n ckpt_callback2 = ModelCheckpoint()\n ckpt_callback2.best_model_path = \"bar_best_model.ckpt\"\n trainer = Trainer(callbacks=[ckpt_callback1, ckpt_callback2])\n trainer.state.fn = TrainerFn.TESTING\n\n with pytest.warns(UserWarning, match=\"best checkpoint path from first checkpoint callback\"):\n trainer._Trainer__set_ckpt_path(ckpt_path=\"best\", model_provided=False, model_connected=True)\n\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 33, "vocab_size": 26, "complexity": 1, "nloc": 9, "token_counts": 74, "n_ast_nodes": 129, "n_identifiers": 20, "random_cut": "def test_best_ckpt_evaluate_raises_warning_with_multiple_ckpt_callbacks():\n \n\n ckpt_callback1 = ModelCheckpoint()\n ckpt_callback1.best_model_path = \"foo_best_model.ckpt\"\n ckpt_callback2 = ModelCheckpoint()\n ckpt_callback2.best_model_path = \"bar_best_model.ckpt\"\n train" }, { "id": 251820, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/net/test_server_spec.py", "file_name": "test_server_spec.py", "fun_name": "test_parse_with_mode", "commit_message": "make it black!", "code": "def test_parse_with_mode():\n assert server_spec.parse_with_mode(\"m:example.com\") == (\n \"m\",\n (\"https\", (\"example.com\", 443)),\n )\n with pytest.raises(ValueError):\n server_spec.parse_with_mode(\"moo\")\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 40, "n_ast_nodes": 73, "n_identifiers": 6, "random_cut": "def test_parse_with_mode():\n assert server_spec.parse_with_mode(\"m:example.com\") == (\n \"m\",\n (\"https\", (\"example.com\", 443)),\n )\n with pyte" }, { "id": 291888, "commit_id": "e2308fd15cec4dfdd25d843b72cd3071657fd5b8", "repo": "core", "path": "tests/components/matter/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "setup_entry_fixture", "commit_message": "Add matter integration BETA (#83064)\n\n* Add matter base (#79372)\r\n\r\nCo-authored-by: Marcel van der Veldt \r\n\r\n* Add matter server add-on flow (#82698)\r\n\r\n* Add matter server add-on flow\r\n\r\n* Fix stale error argument\r\n\r\n* Clean docstrings\r\n\r\n* Use localhost as default address\r\n\r\n* Add matter websocket api foundation (#82848)\r\n\r\n* Add matter config entry add-on management (#82865)\r\n\r\n* Use matter refactored server/client library (#83003)\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Bump python-matter-server to 1.0.6 (#83059)\r\n\r\n* Extend matter websocket api (#82948)\r\n\r\n* Extend matter websocket api\r\n\r\n* Finish docstring\r\n\r\n* Fix pin type\r\n\r\n* Adjust api after new client\r\n\r\n* Adjust api to frontend for now\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "def setup_entry_fixture() -> Generator[AsyncMock, None, None]:\n \n with patch(\n \"homeassistant.components.matter.async_setup_entry\", return_value=True\n ) as mock_setup_entry:\n yield mock_setup_entry\n\n\n@pytest.fixture(name=\"client_connect\", autouse=True)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"client_connect\", autouse=True)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 39, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 28, "n_ast_nodes": 72, "n_identifiers": 10, "random_cut": "def setup_entry_fixture() -> Generator[AsyncMock, None, None]:\n \n with patch(\n \"homeassistant.components.matter.async_setu" }, { "id": 110659, "commit_id": "b4e9e3131cdd7f1ad33ea06e21e7d3e51762af91", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_wx.py", "file_name": "backend_wx.py", "fun_name": "_on_motion", "commit_message": "Separately track modifier keys for mouse events.\n\nWhether the event modifiers are directly available on enter/leave events\ndepends on the backend, but all are handled here (except possibly for\nmacos, which I haven't checked).", "code": "def _on_motion(self, event):\n \n event.Skip()\n MouseEvent(\"motion_notify_event\", self,\n *self._mpl_coords(event),\n modifiers=self._mpl_modifiers(event),\n guiEvent=event)._process()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 84, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 44, "n_ast_nodes": 72, "n_identifiers": 10, "random_cut": "def _on_motion(self, event):\n \n event.Skip()\n MouseEvent(\"motion_notify_event\", self,\n *self._mpl_coords(event),\n modifiers=self._mpl_modifiers(event),\n gui" }, { "id": 188691, "commit_id": "60edbb36a19188570113f9b5b2b60d01412a280d", "repo": "jumpserver", "path": "apps/applications/models/application.py", "file_name": "application.py", "fun_name": "create_types_tree_nodes", "commit_message": "fix: 应用树隐藏mongodb节点", "code": "def create_types_tree_nodes(cls, pid, counts, show_empty=True, show_count=True):\n nodes = []\n temp_pid = pid\n type_category_mapper = const.AppType.type_category_mapper()\n types = const.AppType.type_category_mapper().keys()\n for tp in types:\n # TODO: Temporary exclude mongodb\n if tp == const.AppType.mongodb:\n continue\n if not settings.XPACK_ENABLED and const.AppType.is_xpack(tp):\n continue\n category = type_category_mapper.get(tp)\n pid = cls.create_tree_id(pid, 'category', category.value)\n i = cls.create_tree_id(pid, 'type', tp.value)\n node = cls.create_choice_node(\n tp, i, pid, tp='type', counts=counts, opened=False,\n show_empty=show_empty, show_count=show_count\n )\n pid = temp_pid\n if not node:\n continue\n nodes.append(node)\n return nodes\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 310, "n_words": 73, "vocab_size": 53, "complexity": 6, "nloc": 22, "token_counts": 161, "n_ast_nodes": 250, "n_identifiers": 27, "random_cut": "def create_types_tree_nodes(cls, pid, counts, show_empty=True, show_count=True):\n nodes = []\n temp_pid = pid\n type_category_mapper = const.AppType.type_category_mapper()\n types = const.AppType.type_category_mapper().keys()\n for tp in types:\n # TODO: Temporary exclude mongodb\n if tp == const.AppType.mongodb:\n continue\n if not settings.XPACK_ENABLED and const.AppType.is_xpack(tp):\n continue\n category = type_category_mapper.get(tp)\n pid = cls.create_tree_id(pid, 'category', category.value)\n i = cls.create_tree_id(pid, 'type', tp.value)\n node = cls.create_choice_node(\n tp, i, pid, tp='type', counts=counts, opened=False,\n show_empty=show_empty, show_count=show_count\n )\n pid = temp_pid\n i" }, { "id": 194367, "commit_id": "a8007dcdfb5159a711fa343d2ac4bb7df826975f", "repo": "vision", "path": "test/test_prototype_transforms.py", "file_name": "test_prototype_transforms.py", "fun_name": "test__transform", "commit_message": "rename features._Feature to datapoints._Datapoint (#7002)\n\n* rename features._Feature to datapoints.Datapoint\r\n\r\n* _Datapoint to Datapoint\r\n\r\n* move is_simple_tensor to transforms.utils\r\n\r\n* fix CI\r\n\r\n* move Datapoint out of public namespace", "code": "def test__transform(self, p, transform_cls, func_op_name, kwargs, mocker):\n transform = transform_cls(p=p, **kwargs)\n\n fn = mocker.patch(f\"torchvision.prototype.transforms.functional.{func_op_name}\")\n inpt = mocker.MagicMock(spec=datapoints.Image)\n _ = transform(inpt)\n if p > 0.0:\n fn.assert_called_once_with(inpt, **kwargs)\n else:\n assert fn.call_count == 0\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 94, "n_words": 31, "vocab_size": 27, "complexity": 2, "nloc": 9, "token_counts": 77, "n_ast_nodes": 119, "n_identifiers": 18, "random_cut": "def test__transform(self, p, transform_cls, func_op_name, kwargs, mocker):\n transform = transform_cls(p=p, **kwargs)\n\n fn = mocker.patch(f\"torchvision.prototype.transforms.functional.{func_op_name}\")\n inpt = mocker.MagicMock(" }, { "id": 130622, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/data/tests/test_context_propagation.py", "file_name": "test_context_propagation.py", "fun_name": "test_map_batches", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_map_batches(ray_start_regular_shared):\n context = DatasetContext.get_current()\n context.foo = 70003\n ds = ray.data.range(1).map_batches(lambda x: [DatasetContext.get_current().foo])\n assert ds.take_all()[0] == 70003\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 28, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 54, "n_ast_nodes": 88, "n_identifiers": 13, "random_cut": "def test_map_batches(ray_start_regular_shared):\n context = DatasetContext.get_current()\n context.foo = 70003\n ds = ray.data.range(1).map_batches(lambda x: [DatasetContext.get_current().foo])\n assert ds.take_all()[0] == 70003\n\n" }, { "id": 317506, "commit_id": "04c6b9c51963418ffebddc7753939700fbea7e42", "repo": "core", "path": "homeassistant/components/zha/core/channels/lighting.py", "file_name": "lighting.py", "fun_name": "hs_supported", "commit_message": "ZHA light entity cleanup (#75573)\n\n* use base class attributes\r\n\r\n* initial hue and saturation support\r\n\r\n* spec is 65536 not 65535\r\n\r\n* fixes\r\n\r\n* enhanced current hue\r\n\r\n* fix comparison\r\n\r\n* clean up\r\n\r\n* fix channel test\r\n\r\n* oops\r\n\r\n* report enhanced current hue", "code": "def hs_supported(self) -> bool:\n \n return (\n self.zcl_color_capabilities is not None\n and lighting.Color.ColorCapabilities.Hue_and_saturation\n in self.zcl_color_capabilities\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 7, "token_counts": 29, "n_ast_nodes": 47, "n_identifiers": 8, "random_cut": "def hs_supported(self) -> bool:\n \n return (\n self.zcl_color_capabilities is not None\n and lighting.Color.ColorCapabilities.Hue_and_saturation\n in self.zcl_color_capabilities\n" }, { "id": 132214, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/insufficient_resources_manager.py", "file_name": "insufficient_resources_manager.py", "fun_name": "_get_insufficient_resources_warning_threshold", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _get_insufficient_resources_warning_threshold() -> float:\n if is_ray_cluster():\n return float(\n os.environ.get(\n \"TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S_AUTOSCALER\", \"60\"\n )\n )\n else:\n # Set the default to 10s so that we don't prematurely determine that\n # a cluster cannot fulfill the resources requirements.\n # TODO(xwjiang): Change it back once #18608 is resolved.\n return float(os.environ.get(\"TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S\", \"60\"))\n\n\n# TODO(xwjiang): Consider having a help page with more detailed instructions.\n@lru_cache()", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@lru_cache()", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 141, "n_words": 59, "vocab_size": 50, "complexity": 2, "nloc": 9, "token_counts": 41, "n_ast_nodes": 86, "n_identifiers": 7, "random_cut": "def _get_insufficient_resources_warning_threshold() -> float:\n if is_ray_cluster():\n return float(\n os.environ.get(\n \"TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S_AUTOSCALER\", \"60\"\n )\n )\n else:\n # Set the default to 10s so that we don't prematurely determine that\n # a cluster cannot fulfill the resources requirements.\n # TODO(xwjiang): Change it back once #18608 is resolved.\n return float(os.environ.get(\"TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S\", \"60\"))\n\n\n# TODO(x" }, { "id": 289622, "commit_id": "da099532fe837604383d5e195be4a0320941a87c", "repo": "core", "path": "homeassistant/components/ecobee/notify.py", "file_name": "notify.py", "fun_name": "send_message", "commit_message": "Load ecobee notify platform via discovery (#78558)\n\n* Fix ecobee notify platform KeyError\r\n\r\n* set up notify platform via discovery\r\n\r\n* address comments\r\n\r\n* fix isort\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "def send_message(self, message=\"\", **kwargs):\n \n targets = kwargs.get(ATTR_TARGET)\n\n if not targets:\n raise ValueError(\"Missing required argument: target\")\n\n for target in targets:\n thermostat_index = int(target)\n self.ecobee.send_message(thermostat_index, message)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 85, "n_words": 24, "vocab_size": 22, "complexity": 3, "nloc": 7, "token_counts": 51, "n_ast_nodes": 86, "n_identifiers": 12, "random_cut": "def send_message(self, message=\"\", **kwargs):\n \n targets = kwargs.get(ATTR_TARGET)\n\n if not targets:\n raise ValueError(\"Missing required argument: target\")\n\n for target in targets:\n thermostat_index = int(target)\n self.ecobee.send_message(thermostat_index, mess" }, { "id": 259533, "commit_id": "7931262d737d89b795d1ea5790c44003c13c741c", "repo": "scikit-learn", "path": "sklearn/neighbors/tests/test_neighbors.py", "file_name": "test_neighbors.py", "fun_name": "test_same_knn_parallel", "commit_message": "TST use global_dtype in sklearn/neighbors/tests/test_neighbors.py (#22663)\n\nCo-authored-by: Jérémie du Boisberranger\r\nCo-authored-by: Olivier Grisel ", "code": "def test_same_knn_parallel(algorithm):\n X, y = datasets.make_classification(\n n_samples=30, n_features=5, n_redundant=0, random_state=0\n )\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n clf = neighbors.KNeighborsClassifier(n_neighbors=3, algorithm=algorithm)\n clf.fit(X_train, y_train)\n y = clf.predict(X_test)\n dist, ind = clf.kneighbors(X_test)\n graph = clf.kneighbors_graph(X_test, mode=\"distance\").toarray()\n\n clf.set_params(n_jobs=3)\n clf.fit(X_train, y_train)\n y_parallel = clf.predict(X_test)\n dist_parallel, ind_parallel = clf.kneighbors(X_test)\n graph_parallel = clf.kneighbors_graph(X_test, mode=\"distance\").toarray()\n\n assert_array_equal(y, y_parallel)\n assert_allclose(dist, dist_parallel)\n assert_array_equal(ind, ind_parallel)\n assert_allclose(graph, graph_parallel)\n\n\n@pytest.mark.parametrize(\"algorithm\", ALGORITHMS)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"algorithm\", ALGORITHMS)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 115, "n_words": 59, "vocab_size": 44, "complexity": 1, "nloc": 19, "token_counts": 173, "n_ast_nodes": 287, "n_identifiers": 40, "random_cut": "def test_same_knn_parallel(algorithm):\n X, y = datasets.make_classification(\n n_samples=3" }, { "id": 94821, "commit_id": "ab993b32614bb83d17d10e1041817e43dd6f5980", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events.py", "file_name": "test_organization_events.py", "fun_name": "test_count_miserable_new_alias_field", "commit_message": "fix(tests): Fix dnd backend test flakes (#37916)\n\nThis PR fixes 3 major flakes:\r\n\r\nFixes SENTRY-TESTS-3J5: Just sort the project id order\r\n\r\nFixes SENTRY-TESTS-3HQ: Flakes because we calculate the retention\r\nin the test once and the value returned in the response is calculated\r\na little while after. We don't need to test for seconds granularity\r\nso replacing seconds to 0.\r\n\r\nFixes SENTRY-TESTS-3J0: Successively calling before_now results in some flakes\r\nparticularly in tests that are calculating aggregates\r\non transaction.duration. Introduced a load_data method\r\nthat takes a datetime object timestamp and a timedelta duration\r\ncalculates the offset based on timestamp to get start_timestamp.", "code": "def test_count_miserable_new_alias_field(self):\n ProjectTransactionThreshold.objects.create(\n project=self.project,\n organization=self.project.organization,\n threshold=400,\n metric=TransactionMetric.DURATION.value,\n )\n\n events = [\n (\"one\", 400),\n (\"one\", 400),\n (\"two\", 3000),\n (\"two\", 3000),\n (\"three\", 300),\n (\"three\", 3000),\n ]\n for idx, event in enumerate(events):\n data = self.load_data(\n timestamp=before_now(minutes=(10 + idx)),\n duration=timedelta(milliseconds=event[1]),\n )\n data[\"event_id\"] = f\"{idx}\" * 32\n data[\"transaction\"] = f\"/count_miserable/horribilis/{event[0]}\"\n data[\"user\"] = {\"email\": f\"{idx}@example.com\"}\n self.store_event(data, project_id=self.project.id)\n\n query = {\n \"field\": [\n \"transaction\",\n \"count_miserable(user)\",\n ],\n \"query\": \"event.type:transaction\",\n \"project\": [self.project.id],\n \"sort\": \"count_miserable_user\",\n }\n\n response = self.do_request(\n query,\n )\n\n assert response.status_code == 200, response.content\n assert len(response.data[\"data\"]) == 3\n data = response.data[\"data\"]\n assert data[0][\"count_miserable(user)\"] == 0\n assert data[1][\"count_miserable(user)\"] == 1\n assert data[2][\"count_miserable(user)\"] == 2\n\n query[\"query\"] = \"event.type:transaction count_miserable(user):>0\"\n\n response = self.do_request(\n query,\n )\n\n assert response.status_code == 200, response.content\n assert len(response.data[\"data\"]) == 2\n data = response.data[\"data\"]\n assert abs(data[0][\"count_miserable(user)\"]) == 1\n assert abs(data[1][\"count_miserable(user)\"]) == 2\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 597, "n_words": 124, "vocab_size": 75, "complexity": 2, "nloc": 51, "token_counts": 332, "n_ast_nodes": 552, "n_identifiers": 34, "random_cut": "def test_count_miserable_new_alias_field(self):\n ProjectTransactionThreshold.objects.create(\n project=self.project,\n organization=self.project.organization,\n threshold=400,\n metric=TransactionMetric.DURATION.value,\n )\n\n events = [\n (\"one\", 400),\n (\"one\", 400),\n (\"two\", 3000),\n (\"two\", 3000),\n (\"three\", 300),\n (\"three\", 3000),\n ]\n for idx, event in enumerate(events):\n data = self.load_data(\n timestamp=before_now(minutes=(10 + idx)),\n duration=timedelta(milliseconds=event[1]),\n )\n data[\"event_id\"] = f\"{idx}\" * 32\n data[\"transaction\"] = f\"/count_miserable/horribilis/{event[0]}\"\n data[\"user\"] = {\"email\": f\"{idx}@example.com\"}\n self.store_event(data, project_id=self.project.id)\n\n query = {\n \"field\": [\n \"transaction\",\n \"count_miserable(user)\",\n ],\n \"query\": \"event.type:transaction\",\n \"project\": [self.project.id],\n \"sort\": \"count_miserable_user\",\n }\n\n response = self.do_request(\n query,\n )\n\n assert response.status_code == 200, response.content\n assert len(response.data[\"data\"]) == 3\n data = response.data[\"data\"]\n assert data[0][\"count_miserable(user)\"] == 0\n assert data[1][\"count_miserable(user)\"] == 1\n assert data[2][\"count_miserable(user)\"] == 2\n\n query[\"query\"] = \"event.type:trans" }, { "id": 286162, "commit_id": "3762693df7a1f8cdfeba5e14c4438f993a2eead0", "repo": "OpenBBTerminal", "path": "openbb_terminal/terminal_controller.py", "file_name": "terminal_controller.py", "fun_name": "call_intro", "commit_message": "Add intro command + improve plot style + internationalisation consistency (#2899)\n\n* improve style of plots\r\n\r\n* remove the blend which doesnt look nice in some machines\r\n\r\n* remove translation for arguments to be coherent\r\n\r\n* Add intro command\r\n\r\n* add check to understand if its first time user to run intro by default\r\n\r\n* intro doesnt require docs\r\n\r\n* silly catch\r\n\r\n* fix line length\r\n\r\n* anoter lengthy line\r\n\r\n* unused import\r\n\r\n* unused import\r\n\r\n* words\r\n\r\n* allow to quit earlier\r\n\r\n* style\r\n\r\n* fix thing for u didi\r\n\r\n* actual fix\r\n\r\n* last try\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: james ", "code": "def call_intro(self, _):\n \n console.print(panel.Panel(\"[purple]Welcome to the OpenBB Terminal.[/purple]\"))\n console.print(\n \"\\nThe following walkthrough will guide you towards making the most out of the OpenBB Terminal.\\n\\n\"\n \"Press Enter to continue or 'q' followed by Enter to exit.\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#1 - Commands vs menu.[/purple]\"))\n console.print(\n \"\\nMenus are a collection of 'commands' and 'sub-menus'.\\n\"\n \"You can identify them through their distinct color and a '>' at the beginning of the line\\n\\n\"\n \"For instance:\\n\"\n \"[menu]> stocks access historical pricing data, options, sector [/menu]\"\n \"[menu]and industry, and overall due diligence [/menu]\\n\\n\\n\"\n \"Commands are expected to return data either as a chart or table.\\n\"\n \"You can identify them through their distinct color\\n\\n\"\n \"For instance:\\n\"\n \"[cmds]> news display news articles based on term and data sources [/cmds]\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#2 - Using commands[/purple]\"))\n console.print(\n \"\\nCommands throughout the terminal can have additional arguments.\\n\\n\"\n \"Let's say that in the current menu, you want to have more information about the command 'news'. \\n\\n\"\n \"You can either see the available arguments in the terminal, using: [param]news -h[/param]\\n\\n\",\n \"or you can find out more about it with an output example on the browser, using: [param]about news[/param]\",\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#3 - Setting API Keys[/purple]\"))\n console.print(\n \"\\nThe OpenBB Terminal does not own any of the data you have access to.\\n\\n\"\n \"Instead, we provide the infrastructure to access over 100 different data sources from a single location.\\n\\n\"\n \"Thus, it is necessary for each user to set their own API keys for the various third party sources\\n\\n\"\n \"You can find more about this on the '[param]keys[/param]' menu.\\n\\n\"\n \"For many commands, there are multiple data sources that can be selected.\\n\\n\"\n \"The help menu shows the data sources supported by each command.\\n\\n\"\n \"For instance:\\n\"\n \"[cmds] load load a specific stock ticker and additional info for analysis [/cmds]\"\n \"[src][YahooFinance, IEXCloud, AlphaVantage, Polygon, EODHD] [/src]\\n\\n\"\n \"The user can go into the '[param]sources[/param]' menu and select their preferred default data source.\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(\n panel.Panel(\"[purple]#4 - Symbol dependent menus and commands[/purple]\")\n )\n console.print(\n \"\\nThroughout the terminal, you will see commands and menus greyed out.\\n\\n\"\n \"These menus or commands cannot be accessed until an object is loaded.\\n\\n\"\n \"Let's take as an example the '[param]stocks[/param]' menu.\\n\\n\"\n \"You will see that the command '[param]disc[/param]' is available as its goal is to discover new tickers:\\n\"\n \"[menu]> stocks access historical pricing data, options, sector [/menu]\\n\\n\"\n \"On the other hand, '[param]fa[/param]' menu (fundamental analysis) requires a ticker to be loaded.\\n\\n\"\n \"And therefore, appears as:\\n\"\n \"[dim]> fa fundamental analysis of loaded ticker [/dim]\\n\\n\"\n \"Once a ticker is loaded with: [param]load TSLA[/param]\\n\\n\"\n \"The '[param]fa[/param]' menu will be available as:\\n\"\n \"[menu]> fa fundamental analysis of loaded ticker [/menu]\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#5 - Terminal Navigation[/purple]\"))\n console.print(\n \"\\nThe terminal has a tree like structure, where menus branch off into new menus.\\n\\n\"\n \"The users current location is displayed before the text prompt.\\n\\n\"\n \"For instance, if the user is inside the menu disc which is inside stocks, the following prompt \"\n \"will appear: \\n2022 Oct 18, 21:53 (🦋) [param]/stocks/disc/[/param] $\\n\\n\"\n \"If the user wants to go back to the menu above, all they need to do is type '[param]q[/param]'.\\n\\n\"\n \"If the user wants to go back to the home of the terminal, they can type '[param]/[/param]' instead.\\n\\n\"\n \"Note: Always type '[param]h[/param]' to know what commands are available in each menu\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#6 - Command Pipeline[/purple]\"))\n console.print(\n \"\\nThe terminal offers the capability of allowing users to speed up their navigation and command execution.\"\n \"\\n\\nTherefore, typing the following prompt is valid:\\n\"\n \"2022 Oct 18, 21:53 (🦋) / $ [param]stocks/load TSLA/dd/pt[/param]\\n\\n\"\n \"In this example, the terminal - in a single action - will go into '[param]stocks[/param]' menu, \"\n \"run command '[param]load[/param]' with '[param]TSLA[/param]' as input, \\n\"\n \"go into sub-menu '[param]dd[/param]' (due diligence) and run the command '[param]pt[/param]' (price target).\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#6 - OpenBB Scripts[/purple]\"))\n console.print(\n \"\\nThe command pipeline capability is great, but the user experience wasn't great copy-pasting large \"\n \"lists of commands.\\n\\n\"\n \"We allow the user to create a text file of the form:\\n\\n\"\n \"[param]FOLDER_PATH/my_script.openbb[/param]\\n\"\n \"stocks\\nload TSLA\\ndd\\npt\\n\\n\"\n \"which can be run through the '[param]exe[/param]' command in the home menu, with:\\n\"\n \"2022 Oct 18, 22:33 (🦋) / $ [param]exe FOLDER_PATH/my_script.openbb[/param]\\n\\n\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(\n panel.Panel(\"[purple]#7 - OpenBB Scripts with Arguments[/purple]\")\n )\n console.print(\n \"\\nThe user can create a script that includes arguments for the commands.\\n\\n\"\n \"Example:\\n\\n\"\n \"[param]FOLDER_PATH/my_script_with_variable_input.openbb[/param]\\n\"\n \"stocks\\n# this is a comment\\nload $ARGV[0]\\ndd\\npt\\nq\\nload $ARGV[1]\\ncandle\\n\\n\"\n \"and then, if this script is run with:\\n\"\n \"2022 Oct 18, 22:33 (🦋) / $ [param]exe FOLDER_PATH/my_script_with_variable_input.openbb \"\n \"-i AAPL,MSFT[/param]\\n\\n\"\n \"This means that the [param]pt[/param] will run on [param]AAPL[/param] while \"\n \"[param]candle[/param] on [param]MSFT[/param]\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#8 - OpenBB Script Generation/purple]\"))\n console.print(\n \"\\n\"\n \"To make it easier for users to create scripts, we have created a \"\n \"command that 'records' user commands \"\n \"directly into a script.\\n\\n\"\n \"From the home menu, the user can run:\\n\"\n \"2022 Oct 18, 22:33 (🦋) / $ [param]record[/param]\\n\\n\"\n \"and then perform your typical investment research workflow before entering\\n\\n\"\n \"2022 Oct 18, 22:33 (🦋) / $ [param]stop[/param]\\n\\n\"\n \"After stopping, the script will be saved to the 'scripts' folder.\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#9 - Terminal Customization[/purple]\"))\n console.print(\n \"\\nUsers should explore the [param]settings[/param] and [param]featflags[/param] menus \"\n \"to configure their terminal.\\n\\n\"\n \"The fact that our terminal is fully open source allows users to be able to customize \"\n \"anything they want.\\n\\n\"\n \"If you are interested in contributing to the project, please check:\\n\"\n \"[param]https://github.com/OpenBB-finance/OpenBBTerminal[/param]\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#10 - Support[/purple]\"))\n console.print(\n \"\\n\"\n \"We are nothing without our community, hence we put a lot of effort in being here for you.\\n\\n\"\n \"If you find any bug that you wish to report to improve the terminal you can do so with:\\n\"\n \"2022 Oct 18, 22:33 (🦋) / $ [param]support CMD[/param]\\n\\n\"\n \"which should open a form in your browser where you can report the bug in said 'CMD'.\\n\\n\"\n \"If you want to know more, or have any further question. Please join us on Discord:\\n\"\n \"[param]https://openbb.co/discord[/param]\"\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 2640, "n_words": 1014, "vocab_size": 465, "complexity": 12, "nloc": 161, "token_counts": 454, "n_ast_nodes": 1122, "n_identifiers": 8, "random_cut": "def call_intro(self, _):\n \n console.print(panel.Panel(\"[purple]Welcome to the OpenBB Terminal.[/purple]\"))\n console.print(\n \"\\nThe following walkthrough will guide you towards making the most out of the OpenBB Terminal.\\n\\n\"\n \"Press Enter to continue or 'q' followed by Enter to exit.\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#1 - Commands vs menu.[/purple]\"))\n console.print(\n \"\\nMenus are a collection of 'commands' and 'sub-menus'.\\n\"\n \"You can identify them through their distinct color and a '>' at the beginning of the line\\n\\n\"\n \"For instance:\\n\"\n \"[menu]> stocks access historical pricing data, options, sector [/menu]\"\n \"[menu]and industry, and overall due diligence [/menu]\\n\\n\\n\"\n \"Commands are expected to return data either as a chart or table.\\n\"\n \"You can identify them through their distinct color\\n\\n\"\n \"For instance:\\n\"\n \"[cmds]> news display news articles based on term and data sources [/cmds]\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#2 - Using commands[/purple]\"))\n console.print(\n \"\\nCommands throughout the terminal can have additional arguments.\\n\\n\"\n \"Let's say that in the current menu, you want to have more information about the command 'news'. \\n\\n\"\n \"You can either see the available arguments in the terminal, using: [param]news -h[/param]\\n\\n\",\n \"or you can find out more about it with an output example on the browser, using: [param]about news[/param]\",\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#3 - Setting API Keys[/purple]\"))\n console.print(\n \"\\nThe OpenBB Terminal does not own any of the data you have access to.\\n\\n\"\n \"Instead, we provide the infrastructure to access over 100 different data sources from a single location.\\n\\n\"\n \"Thus, it is necessary for each user to set their own API keys for the various third party sources\\n\\n\"\n \"You can find more about this on the '[param]keys[/param]' menu.\\n\\n\"\n \"For many commands, there are multiple data sources that can be selected.\\n\\n\"\n \"The help menu shows the data sources supported by each command.\\n\\n\"\n \"For instance:\\n\"\n \"[cmds] load load a specific stock ticker and additional info for analysis [/cmds]\"\n \"[src][YahooFinance, IEXCloud, AlphaVantage, Polygon, EODHD] [/src]\\n\\n\"\n \"The user can go into the '[param]sources[/param]' menu and select their preferred default data source.\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(\n panel.Panel(\"[purple]#4 - Symbol dependent menus and commands[/purple]\")\n )\n console.print(\n \"\\nThroughout the terminal, you will see commands and menus greyed out.\\n\\n\"\n \"These menus or commands cannot be accessed until an object is loaded.\\n\\n\"\n \"Let's take as an example the '[param]stocks[/param]' menu.\\n\\n\"\n \"You will see that the command '[param]disc[/param]' is available as its goal is to discover new tickers:\\n\"\n \"[menu]> stocks access historical pricing data, options, sector [/menu]\\n\\n\"\n \"On the other hand, '[param]fa[/param]' menu (fundamental analysis) requires a ticker to be loaded.\\n\\n\"\n \"And therefore, appears as:\\n\"\n \"[dim]> fa fundamental analysis of loaded ticker [/dim]\\n\\n\"\n \"Once a ticker is loaded with: [param]load TSLA[/param]\\n\\n\"\n \"The '[param]fa[/param]' menu will be available as:\\n\"\n \"[menu]> fa fundamental analysis of loaded ticker [/menu]\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#5 - Terminal Navigation[/purple]\"))\n console.print(\n \"\\nThe terminal has a tree like structure, where menus branch off into new menus.\\n\\n\"\n \"The users current location is displayed before the text prompt.\\n\\n\"\n \"For instance, if the user is inside the menu disc which is inside stocks, the following prompt \"\n \"will appear: \\n2022 Oct 18, 21:53 (🦋) [param]/stocks/disc/[/param] $\\n\\n\"\n \"If the user wants to go back to the menu above, all they need to do is type '[param]q[/param]'.\\n\\n\"\n \"If the user wants to go back to the home of the terminal, they can type '[param]/[/param]' instead.\\n\\n\"\n \"Note: Always type '[param]h[/param]' to know what commands are available in each menu\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#6 - Command Pipeline[/purple]\"))\n console.print(\n \"\\nThe terminal offers the capability of allowing users to speed up their navigation and command execution.\"\n \"\\n\\nTherefore, typing the following prompt is valid:\\n\"\n \"2022 Oct 18, 21:53 (🦋) / $ [param]stocks/load TSLA/dd/pt[/param]\\n\\n\"\n \"In this example, the terminal - in a single action - will go into '[param]stocks[/param]' menu, \"\n \"run command '[param]load[/param]' with '[param]TSLA[/param]' as input, \\n\"\n \"go into sub-menu '[param]dd[/param]' (due diligence) and run the command '[param]pt[/param]' (price target).\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#6 - OpenBB Scripts[/purple]\"))\n console.print(\n \"\\nThe command pipeline capability is great, but the user experience wasn't great copy-pasting large \"\n \"lists of commands.\\n\\n\"\n \"We allow the user to create a text file of the form:\\n\\n\"\n \"[param]FOLDER_PATH/my_script.openbb[/param]\\n\"\n \"stocks\\nload TSLA\\ndd\\npt\\n\\n\"\n \"which can be run through the '[param]exe[/param]' command in the home menu, with:\\n\"\n \"2022 Oct 18, 22:33 (🦋) / $ [param]exe FOLDER_PATH/my_script.openbb[/param]\\n\\n\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(\n panel.Panel(\"[purple]#7 - OpenBB Scripts with Arguments[/purple]\")\n )\n console.print(\n \"\\nThe user can create a script that includes arguments for the commands.\\n\\n\"\n \"Example:\\n\\n\"\n \"[param]FOLDER_PATH/my_script_with_variable_input.openbb[/param]\\n\"\n \"stocks\\n# this is a comment\\nload $ARGV[0]\\ndd\\npt\\nq\\nload $ARGV[1]\\ncandle\\n\\n\"\n \"and then, if this script is run with:\\n\"\n \"2022 Oct 18, 22:33 (🦋) / $ [param]exe FOLDER_PATH/my_script_with_variable_input.openbb \"\n \"-i AAPL,MSFT[/param]\\n\\n\"\n \"This means that the [param]pt[/param] will run on [param]AAPL[/param] while \"\n \"[param]candle[/param] on [param]MSFT[/param]\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#8 - OpenBB Script Generation/purple]\"))\n console.print(\n \"\\n\"\n \"To make it easier for users to create scripts, we have created a \"\n \"command that 'records' user commands \"\n \"directly into a script.\\n\\n\"\n \"From the home menu, the user can run:\\n\"\n \"2022 Oct 18, 22:33 (🦋) / $ [param]record[/param]\\n\\n\"\n \"and then perform your typical investment research workflow before entering\\n\\n\"\n \"2022 Oct 18, 22:33 (🦋) / $ [param]stop[/param]\\n\\n\"\n \"After stopping, the script will be saved to the 'scripts' folder.\"\n )\n if input(\"\") == \"q\":\n return\n console.print(\"\\n\")\n\n console.print(panel.Panel(\"[purple]#9 - Terminal Customization[/purple]\"))\n console.print(\n \"\\nUsers should explore the [param]settings[/param] and [param]featflags[/param] menus \"\n \"to configure their terminal.\\n\\n\"\n \"The fact that our terminal is fully open source allows users to be able to customize \"\n \"anything they want.\\n\\n\"\n \"If you are interested in contributing to the project, please check:\\n\"\n \"[param]https://github.com/OpenBB-finance/Open" }, { "id": 114378, "commit_id": "0fd3b436c38f38bcae6fed9e14dc4d2a12e90793", "repo": "mindsdb", "path": "mindsdb/integrations/mlflow_handler/mlflow/mlflow_integration.py", "file_name": "mlflow_integration.py", "fun_name": "connect", "commit_message": "fix tests and reformat", "code": "def connect(self, **kwargs) -> Dict[str, int]:\n # noqa\n print(kwargs)\n self.mlflow_server_url = kwargs['mlflow_server_url']\n self.mlflow_server_path = kwargs['model_registry_path']\n self.connection = MlflowClient(self.mlflow_server_url, self.mlflow_server_path)\n self.storage = SqliteStorageHandler(context=self.name, config=kwargs['config'])\n return self.check_status()\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 75, "n_words": 25, "vocab_size": 22, "complexity": 1, "nloc": 8, "token_counts": 75, "n_ast_nodes": 123, "n_identifiers": 17, "random_cut": "def connect(self, **kwargs) -> Dict[str, int]:\n # noqa\n print(kwargs)\n self.mlflow_server_url = kwargs['mlflow_server_url']\n self.mlflow_server_path = kwargs['model_registry_path']\n self.connection = MlflowClient(self.mlflow_server_url, self.mlflow_server_path)\n self.storage = SqliteStorageHandler(context=self.name, config=kwarg" }, { "id": 136534, "commit_id": "c7115135ea131b29bd6ff3d32e4f90297e5e770e", "repo": "ray", "path": "python/ray/serve/handle.py", "file_name": "handle.py", "fun_name": "_make_router", "commit_message": "Revert \"[all_tests][python] Remove calling of get_event_loop from pyt… (#30382)\n\nThis reverts commit 784e66b.", "code": "def _make_router(self) -> Router:\n return Router(\n self.controller_handle,\n self.deployment_name,\n event_loop=asyncio.get_event_loop(),\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 27, "n_ast_nodes": 41, "n_identifiers": 8, "random_cut": "def _make_router(self) -> Router:\n ret" }, { "id": 92854, "commit_id": "15e21309086e5e97fa66f0d5234cb3110bcf34f1", "repo": "sentry", "path": "src/sentry/models/deploy.py", "file_name": "deploy.py", "fun_name": "notify_if_ready", "commit_message": "feat(release-activity): backend support for tracking release activity (#36608)\n\nAlpha Workflow 2.0 feature for tracking activity when an issue occurs in an active release-deployment time-window.\r\n\r\nThis adds the backend components needed to support surfacing the notification activity in the frontend.", "code": "def notify_if_ready(cls, deploy_id, fetch_complete=False):\n \n from sentry.models import (\n Activity,\n Environment,\n Organization,\n ReleaseActivity,\n ReleaseCommit,\n ReleaseHeadCommit,\n )\n\n lock_key = cls.get_lock_key(deploy_id)\n lock = locks.get(lock_key, duration=30, name=\"deploy_notify\")\n with TimedRetryPolicy(10)(lock.acquire):\n deploy = cls.objects.filter(id=deploy_id).select_related(\"release\").get()\n if deploy.notified:\n return\n\n release = deploy.release\n environment = Environment.objects.get(\n organization_id=deploy.organization_id, id=deploy.environment_id\n )\n\n if not fetch_complete:\n release_has_commits = ReleaseCommit.objects.filter(\n organization_id=release.organization_id, release=release\n ).exists()\n\n if not release_has_commits:\n # check if we have head commits, which\n # would indicate that we're waiting for\n # fetch_commits to complete\n if ReleaseHeadCommit.objects.filter(\n organization_id=release.organization_id, release=release\n ).exists():\n return\n\n activity = None\n for project in deploy.release.projects.all():\n activity = Activity.objects.create(\n type=ActivityType.DEPLOY.value,\n project=project,\n ident=Activity.get_version_ident(release.version),\n data={\n \"version\": release.version,\n \"deploy_id\": deploy.id,\n \"environment\": environment.name,\n },\n datetime=deploy.date_finished,\n )\n # Somewhat hacky, only send notification for one\n # Deploy Activity record because it will cover all projects\n if activity is not None:\n activity.send_notification()\n deploy.update(notified=True)\n # XXX(workflow): delete this after WF 2.0 experiment over\n try:\n org = Organization.objects.get_from_cache(id=deploy.organization_id)\n except Organization.DoesNotExist:\n org = None\n if org and features.has(\"organizations:active-release-monitor-alpha\", org):\n ReleaseActivity.objects.create(\n type=ReleaseActivityType.DEPLOYED.value,\n release=release,\n data={\"environment\": str(environment.name)},\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1066, "n_words": 154, "vocab_size": 119, "complexity": 10, "nloc": 54, "token_counts": 335, "n_ast_nodes": 535, "n_identifiers": 58, "random_cut": "def notify_if_ready(cls, deploy_id, fetch_complete=False):\n \n from sentry.models import (\n Activity,\n Environment,\n Organization,\n ReleaseActivity,\n ReleaseCommit,\n ReleaseHeadCommit,\n )\n\n lock_key = cls.get_lock_key(deploy_id)\n lock = locks.get(lock_key, duration=30, name=\"deploy_notify\")\n with TimedRetryPolicy(10)(lock.acquire):\n deploy = cls.objects.filter(id=deploy_id).select_related(\"release\").get()\n if deploy.notified:\n return\n\n release = deploy.release\n environment = Environment.objects.get(\n organization_id=deploy.organization_id, id=deploy.environment_id\n )\n\n if not fetch_complete:\n release_has_commits = ReleaseCommit.objects.filter(\n organization_id=release.organization_id, release=release\n ).exists()\n\n if not release_has_commits:\n # check if we have head commits, which\n # would indicate that we're waiting for\n # fetch_commits to complete\n if ReleaseHeadCommit.objects.filter(\n organization_id=release.organization_id, release=release\n ).exists():\n return\n\n activity = None\n for project in deploy.release.projects.all():\n activity = Activity.objects.create(\n type=ActivityType.DEPLOY.value,\n project=project,\n ident=Activity.get_version_ident(release.version),\n data={\n \"version\": release.version,\n \"deploy_id\": deploy.id,\n \"environment\": environment.name,\n },\n datetime=deploy.date_finished,\n )\n # Somewhat hacky, only send notification for one\n # Deploy Activity record because it will cover all projects\n if activity is not None:\n activity.send_notification()\n deploy.update(notified=True)\n # XXX(workflow): delete this after WF 2.0 experiment over\n try:\n org = Organization.objects.get_from_cache(id=deploy.organization_id)\n except Organization.DoesNotExist:\n org = None\n if org and features.has(\"organizations:active-release-monitor-alpha\", org):\n" }, { "id": 321359, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "tests/unit/keyinput/test_basekeyparser.py", "file_name": "test_basekeyparser.py", "fun_name": "test_binding_with_shift", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def test_binding_with_shift(self, prompt_keyparser):\n \n for key, modifiers in [(Qt.Key.Key_Y, Qt.KeyboardModifier.NoModifier),\n (Qt.Key.Key_Shift, Qt.KeyboardModifier.ShiftModifier),\n (Qt.Key.Key_Y, Qt.KeyboardModifier.ShiftModifier)]:\n info = keyutils.KeyInfo(key, modifiers)\n prompt_keyparser.handle(info.to_event())\n\n prompt_keyparser.execute.assert_called_once_with('yank -s', None)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 124, "n_words": 21, "vocab_size": 21, "complexity": 2, "nloc": 7, "token_counts": 87, "n_ast_nodes": 132, "n_identifiers": 19, "random_cut": "def test_binding_with_shift(self, prompt_keyparser):\n \n for key, modifiers in [(Qt.Key.Key_Y, Qt.KeyboardModifier.NoModifier),\n (Qt.Key.Key_Shift, Qt.KeyboardModifier.ShiftModifier),\n (Qt.Key.Key_Y, Qt.KeyboardModifier.ShiftModifier)]:\n info = keyutils.KeyInfo(key" }, { "id": 182308, "commit_id": "b2f7c2ac850ab43020706c3e5b6660db1f25507a", "repo": "textual", "path": "examples/dev_sandbox.py", "file_name": "dev_sandbox.py", "fun_name": "on_mount", "commit_message": "Displaying tabs with underline", "code": "def on_mount(self):\n self.tabs = Tabs(\n [\n Tab(\"One\", name=\"one\"),\n Tab(\"Two\", name=\"two\"),\n Tab(\"Three\", name=\"three\"),\n Tab(\"Four\", name=\"four\"),\n Tab(\"Five\", name=\"five\"),\n Tab(\"Six\", name=\"six\"),\n Tab(\"Seven\", name=\"seven\"),\n Tab(\"Eight\", name=\"eight\"),\n ],\n )\n self.tabs.active_tab_name = \"one\"\n \n self.mount(\n header=self.tabs,\n content=PanelWidget(),\n footer=Widget(),\n sidebar=Widget(),\n )\n\n\nBasicApp.run(css_file=\"dev_sandbox.scss\", watch_css=True, log=\"textual.log\")\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 263, "n_words": 36, "vocab_size": 34, "complexity": 1, "nloc": 21, "token_counts": 124, "n_ast_nodes": 241, "n_identifiers": 19, "random_cut": "def on_mount(self):\n self.tabs = Tabs(\n [\n Tab(\"One\", name=\"one\"),\n Tab(\"Two\", name=\"two\"),\n Tab(\"Three\", name=\"three\"),\n Tab(\"Four\", name=\"four\"),\n Tab(\"Five\", name=\"five\"),\n Tab(\"Six\", name=\"six\"),\n Tab(\"Seven\", name=\"seven\"),\n Tab(\"Eight\", name=\"eight\"),\n ],\n )\n self.tabs.active_tab_name = \"one\"\n \n sel" }, { "id": 223810, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/message.py", "file_name": "message.py", "fun_name": "get_filename", "commit_message": "add python 3.10.4 for windows", "code": "def get_filename(self, failobj=None):\n \n missing = object()\n filename = self.get_param('filename', missing, 'content-disposition')\n if filename is missing:\n filename = self.get_param('name', missing, 'content-type')\n if filename is missing:\n return failobj\n return utils.collapse_rfc2231_value(filename).strip()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 28, "vocab_size": 18, "complexity": 3, "nloc": 8, "token_counts": 62, "n_ast_nodes": 106, "n_identifiers": 10, "random_cut": "def get_filename(self, failobj=None):\n \n missing = object()\n filename = self.get_param('filename', missing, 'content-disposition')\n if filename is missing:\n filename = self.get_param('name', missing, 'content-type')\n if filename is missing:\n return failobj\n return utils.coll" }, { "id": 965, "commit_id": "0805df03a6a8e068bfbe039e0664a842f50ad5de", "repo": "PySyft", "path": "packages/syft/src/syft/core/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "_object2bytes", "commit_message": "working ndept pointer", "code": "def _object2bytes(self) -> bytes:\n schema = get_capnp_schema(schema_file=\"tensor.capnp\")\n tensor_struct: CapnpModule = schema.Tensor # type: ignore\n tensor_msg = tensor_struct.new_message()\n\n # this is how we dispatch correct deserialization of bytes\n tensor_msg.magicHeader = serde_magic_header(type(self))\n\n chunk_bytes(sy.serialize(self.child, to_bytes=True), \"child\", tensor_msg)\n\n tensor_msg.publicShape = sy.serialize(self.public_shape, to_bytes=True)\n tensor_msg.publicDtype = self.public_dtype\n tensor_msg.tagName = self.tag_name\n\n return tensor_msg.to_bytes_packed()\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 116, "n_words": 46, "vocab_size": 39, "complexity": 1, "nloc": 10, "token_counts": 95, "n_ast_nodes": 155, "n_identifiers": 26, "random_cut": "def _object2bytes(self) -> bytes:\n schema = get_capnp_schema(schema_file=\"tensor.capnp\")\n tensor_struct: CapnpModule = schema.Tensor # type: ignore\n tensor_msg = tensor_struct.new_message()\n\n # this is how we dispatch correct deserialization of bytes\n tensor_msg.magicHeader = serde_magic_header(type(self))\n\n chunk_bytes(sy.serialize(self.child, to_bytes=True), \"child\", tensor_msg)\n\n tensor_msg.publicShape = sy.serialize(self.pub" }, { "id": 160706, "commit_id": "a5535dc6242b0decae1e65a3d4feb220fefedc49", "repo": "numpy", "path": "numpy/polynomial/_polybase.py", "file_name": "_polybase.py", "fun_name": "_repr_latex_scalar", "commit_message": "MAINT: limit the number of decimals in Polynomial representation (#21654)\n\n* limit the number of decimals in Polynomial representation\r\n\r\n* tests pass\r\n\r\n* parenthesize exponential notation in polynomials\r\n\r\n* fixed a long line warning\r\n\r\n* added polynomial printoptions tests\r\n\r\n* polynomial printoptions typo fixed\r\n\r\n* made switch to exp notation in polynomial display more natural\r\n\r\n* added a test on switching polynomials to exp notation\r\n\r\n* fixed linter errors/warnings\r\n\r\n* support for nanstr and infstr printoptions in polynomials\r\n\r\n* 10^8 threshold for switching to exp notation when displaying polynomials\r\n\r\n* merged in PR #21696 fixing issue #21695\r\n\r\n* made linter happy\r\n\r\n* made some docstring tests pass\r\n\r\n* fixed the docs\r\n\r\nCo-authored-by: Lev Maximov ", "code": "def _repr_latex_scalar(x, parens=False):\n # TODO: we're stuck with disabling math formatting until we handle\n # exponents in this function\n return r'\\text{{{}}}'.format(pu.format_float(x, parens=parens))\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 42, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 2, "token_counts": 26, "n_ast_nodes": 42, "n_identifiers": 6, "random_cut": "def _repr_latex_scalar(x, parens=False):\n # TODO: we're stuck with disabling math formatting until we handle\n # exponents in this function\n return r'\\text{{{}}}'.format(pu.format_float(x, parens=" }, { "id": 19664, "commit_id": "9a3b3ce70621af6f9adaa9eeac9cf83fa149319c", "repo": "pipenv", "path": "pipenv/environment.py", "file_name": "environment.py", "fun_name": "reverse_dependency", "commit_message": "Issue 4993 Add standard pre commit hooks and apply linting. (#4994)\n\n* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.", "code": "def reverse_dependency(cls, node):\n new_node = {\n \"package_name\": node[\"package_name\"],\n \"installed_version\": node[\"installed_version\"],\n \"required_version\": node[\"required_version\"],\n }\n for dependency in node.get(\"dependencies\", []):\n for dep in cls.reverse_dependency(dependency):\n new_dep = dep.copy()\n new_dep[\"parent\"] = (node[\"package_name\"], node[\"installed_version\"])\n yield new_dep\n yield new_node\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 149, "n_words": 33, "vocab_size": 26, "complexity": 3, "nloc": 12, "token_counts": 82, "n_ast_nodes": 141, "n_identifiers": 9, "random_cut": "def reverse_dependency(cls, node):\n new_node = {\n \"package_name\": node[\"package_name\"],\n \"installed_version\": node[\"installed_version\"],\n \"required_version\": node[\"required_version\"],\n }\n for dependency in node.get(\"dependencies\", []):\n for dep in cls.reverse_dependency(dependency):\n new_dep = dep.copy()\n new_dep[\"parent" }, { "id": 91937, "commit_id": "a8264817cfe5a4c03515869c010c78d04a710f1e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_validate_include_meta_not_enabled_by_default", "commit_message": "fix(tests) - use _indexer_record instead of indexer.record (#35908)\n\nfix a test that was added after I branched off and changed this file structure for my feature", "code": "def test_validate_include_meta_not_enabled_by_default(self):\n self.create_release(version=\"foo\", project=self.project)\n for tag in (\"release\", \"environment\"):\n _indexer_record(self.project.organization_id, tag)\n response = self.get_success_response(\n self.project.organization.slug,\n project=self.project.id,\n field=\"sum(sentry.sessions.session)\",\n groupBy=\"environment\",\n query=\"\",\n )\n assert response.data[\"meta\"] == []\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 124, "n_words": 24, "vocab_size": 24, "complexity": 2, "nloc": 12, "token_counts": 83, "n_ast_nodes": 135, "n_identifiers": 17, "random_cut": "def test_validate_include_meta_not_enabled_by_default(self):\n self.create_release(version=\"foo\", project=self.project)\n for tag in (\"release\", \"environment\"):\n _indexer_record(self.project.organization_id, tag)\n response = self.get_success_response(\n self.project.organiz" }, { "id": 211348, "commit_id": "e55e41945d42db787a0f7c557d53d06a6b24536b", "repo": "PaddleDetection", "path": "ppdet/modeling/rbox_utils.py", "file_name": "rbox_utils.py", "fun_name": "rbox2poly_np", "commit_message": "Refactor rbox (#6704)\n\n* refactor rbox\r\n\r\n* modify the code of save results\r\n\r\n* fix some problem\r\n\r\n* add .gitignore in dataset/dota\r\n\r\n* fix test anno path", "code": "def rbox2poly_np(rboxes):\n \n polys = []\n for i in range(len(rboxes)):\n x_ctr, y_ctr, width, height, angle = rboxes[i][:5]\n tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2\n rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])\n R = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n poly = R.dot(rect)\n x0, x1, x2, x3 = poly[0, :4] + x_ctr\n y0, y1, y2, y3 = poly[1, :4] + y_ctr\n poly = np.array([x0, y0, x1, y1, x2, y2, x3, y3], dtype=np.float32)\n poly = get_best_begin_point_single(poly)\n polys.append(poly)\n polys = np.array(polys)\n return polys\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 196, "n_words": 90, "vocab_size": 61, "complexity": 2, "nloc": 16, "token_counts": 227, "n_ast_nodes": 327, "n_identifiers": 35, "random_cut": "def rbox2poly_np(rboxes):\n \n polys = []\n for i in range(len(rboxes)):\n x_ctr, y_ctr, width, height, angle = rboxes[i][:5]\n tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2\n rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])\n R = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n poly = R.dot(rect)\n x0, x1, x2, x3 = poly[0, :4] + x_ctr\n y0, y1, y" }, { "id": 105777, "commit_id": "d7dfbc83d68e87ba002c5eb2555f7a932e59038a", "repo": "datasets", "path": "tests/io/test_sql.py", "file_name": "test_sql.py", "fun_name": "iter_sql_file", "commit_message": "Add ability to read-write to SQL databases. (#4928)\n\n* Add ability to read-write to SQL databases.\r\n\r\n* Fix issue where pandas<1.4.0 doesn't return the number of rows\r\n\r\n* Fix issue where connections were not closed properly\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Change according to reviews\r\n\r\n* Change according to reviews\r\n\r\n* Inherit from AbstractDatasetInputStream in SqlDatasetReader\r\n\r\n* Revert typing in SQLDatasetReader as we do not support Connexion\r\n\r\n* Align API with Pandas/Daskk\r\n\r\n* Update tests\r\n\r\n* Update docs\r\n\r\n* Update some more tests\r\n\r\n* Missing comma\r\n\r\n* Small docs fix\r\n\r\n* Style\r\n\r\n* Update src/datasets/arrow_dataset.py\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Update src/datasets/packaged_modules/sql/sql.py\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Address some comments\r\n\r\n* Address the rest\r\n\r\n* Improve tests\r\n\r\n* sqlalchemy required tip\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: mariosasko ", "code": "def iter_sql_file(sqlite_path):\n with contextlib.closing(sqlite3.connect(sqlite_path)) as con:\n cur = con.cursor()\n cur.execute(\"SELECT * FROM dataset\")\n for row in cur:\n yield row\n\n\n@require_sqlalchemy", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "@require_sqlalchemy", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 53, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 6, "token_counts": 40, "n_ast_nodes": 75, "n_identifiers": 12, "random_cut": "def iter_sql_file(sqlite_path):\n with contextlib.closing(sqlite3.connect(sqlite_path)) as con:\n cur = con.cursor()\n cur.execute(\"SELECT * FROM dataset\")\n for row in cur:\n yield row\n\n" }, { "id": 92226, "commit_id": "fe544b17a269e5ff6f86208dcf38c492f904dbb7", "repo": "sentry", "path": "src/sentry/integrations/vercel/webhook.py", "file_name": "webhook.py", "fun_name": "verify_signature", "commit_message": "chore(vercel): Remove deprecated project webhooks (#36260)\n\nThis PR fulfills a plan from August 2021 in which Vercel had deprecated project webhooks and we held onto compatability for a fair bit longer than expected.\r\n\r\nWe will continue to support the /delete route since existing integrations for self hosted users should still work, but we can now update the self hosted docs to use /webhook as is the case for most of our integrations.", "code": "def verify_signature(request):\n signature = request.META.get(\"HTTP_X_VERCEL_SIGNATURE\")\n secret = options.get(\"vercel.client-secret\")\n\n expected = hmac.new(\n key=secret.encode(\"utf-8\"), msg=bytes(request.body), digestmod=hashlib.sha1\n ).hexdigest()\n\n return constant_time_compare(expected, signature)\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 39, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 7, "token_counts": 64, "n_ast_nodes": 106, "n_identifiers": 20, "random_cut": "def verify_signature(request):\n signature = request.META.get(\"HTTP_X_VERCEL_SIGNATURE\")\n secret = options.get(\"vercel.client-secret\")\n\n expected = hmac.new(\n key=secret.encode(\"utf-8\"), msg=bytes(request.body), digestmod=hashlib.sha1\n ).hexdigest()\n\n return constant_time_compare(expected, sig" }, { "id": 279541, "commit_id": "be73ac1a1e25d9abd4d793cba9707098d7adf231", "repo": "keras", "path": "keras/preprocessing/image_test.py", "file_name": "image_test.py", "fun_name": "test_dataframe_iterator_class_mode_raw", "commit_message": "Add f-string format and lint with flynt on the whole codebase", "code": "def test_dataframe_iterator_class_mode_raw(self):\n tmpdir = self.create_tempdir()\n all_test_images = _generate_test_images(include_rgba=True)\n # save the images in the paths\n filenames = []\n count = 0\n for test_images in all_test_images:\n for im in test_images:\n filename = f\"image-{count}.png\"\n im.save(os.path.join(tmpdir.full_path, filename))\n filenames.append(filename)\n count += 1\n # case for 1D output\n df = pd.DataFrame({\"filename\": filenames}).assign(\n output_0=np.random.uniform(size=len(filenames)),\n output_1=np.random.uniform(size=len(filenames)),\n )\n df_iterator = image.ImageDataGenerator().flow_from_dataframe(\n df,\n y_col=\"output_0\",\n directory=tmpdir.full_path,\n batch_size=3,\n shuffle=False,\n class_mode=\"raw\",\n )\n batch_x, batch_y = next(df_iterator)\n self.assertIsInstance(batch_x, np.ndarray)\n self.assertLen(batch_x.shape, 4)\n self.assertIsInstance(batch_y, np.ndarray)\n self.assertEqual(batch_y.shape, (3,))\n self.assertAllEqual(batch_y, df[\"output_0\"].values[:3])\n # case with a 2D output\n df_iterator = image.ImageDataGenerator().flow_from_dataframe(\n df,\n y_col=[\"output_0\", \"output_1\"],\n directory=tmpdir.full_path,\n batch_size=3,\n shuffle=False,\n class_mode=\"raw\",\n )\n batch_x, batch_y = next(df_iterator)\n self.assertIsInstance(batch_x, np.ndarray)\n self.assertLen(batch_x.shape, 4)\n self.assertIsInstance(batch_y, np.ndarray)\n self.assertEqual(batch_y.shape, (3, 2))\n self.assertAllEqual(batch_y, df[[\"output_0\", \"output_1\"]].values[:3])\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 513, "n_words": 107, "vocab_size": 67, "complexity": 3, "nloc": 43, "token_counts": 330, "n_ast_nodes": 520, "n_identifiers": 48, "random_cut": "def test_dataframe_iterator_class_mode_raw(self):\n tmpdir = self.create_tempdir()\n all_test_images = _generate_test_images(include_rgba=True)\n # save the images in the paths\n filenames = []\n count = 0\n for test_images in all_test_images:\n for im in test_images:\n filename = f\"image-{count}.png\"\n im.save(os.path.join(tmpdir.full_path, filename))\n filenames.append(filename)\n count += 1\n # case for 1D output\n df = pd.DataFrame({\"filename\": filenames}).assign(\n output_0=np.random.uniform(size=len(filenames)),\n output_1=np.random.uniform(size=len(filenames)),\n )\n df_iterator = image.ImageDataGenerator().flow_from_dataframe(\n df,\n y_col=\"output_0\",\n directory=tmpdir.full_path,\n batch_size=3,\n shuffle=False,\n class_mode=\"raw\",\n )\n batch_x, batch_y = next(df_iterator)\n self.assertIsInstance(batch_x, np.ndarray)\n self.assertLen(batch_x.shape, 4)\n self.assertIsInstance(batch_y, np.ndarray)\n self.assertEqual(batch_y.shape, (3,))\n self.assertAllEqual(batch_y, df[\"output_0\"].values[:3])\n # case with a 2D output\n df_iterator = image.ImageDataGenerator().flow_from_dataframe(\n df,\n y_col=[\"output_0\", \"output_1\"],\n directory=tmpdir.full_path,\n batch_size=3,\n shuffle=False,\n class_mode=\"raw\",\n )\n batch_x, batch_y = next(df_iterator)\n self.assertIsInstance(batch_x, np.ndarray)\n self.assertLen(batch_x.shape, 4)\n self.assertIsInstance(batch_y, np.ndarray)\n self.assertEqual(batch_y.shape, (3, 2))\n self.assertAllEqual(batch_y, df[[\"output_0\", \"output_1\"]].value" }, { "id": 211362, "commit_id": "486121eaa4ad142dde25ff7a77a2070f5a4571d4", "repo": "PaddleDetection", "path": "tools/eval.py", "file_name": "eval.py", "fun_name": "parse_args", "commit_message": "[smalldet] fix slice_infer (#6744)\n\n* fix slice_infer\r\n\r\n* fix doc, test=document_fix", "code": "def parse_args():\n parser = ArgsParser()\n parser.add_argument(\n \"--output_eval\",\n default=None,\n type=str,\n help=\"Evaluation directory, default is current directory.\")\n\n parser.add_argument(\n '--json_eval',\n action='store_true',\n default=False,\n help='Whether to re eval with already exists bbox.json or mask.json')\n\n parser.add_argument(\n \"--slim_config\",\n default=None,\n type=str,\n help=\"Configuration file of slim method.\")\n\n # TODO: bias should be unified\n parser.add_argument(\n \"--bias\",\n action=\"store_true\",\n help=\"whether add bias or not while getting w and h\")\n\n parser.add_argument(\n \"--classwise\",\n action=\"store_true\",\n help=\"whether per-category AP and draw P-R Curve or not.\")\n\n parser.add_argument(\n '--save_prediction_only',\n action='store_true',\n default=False,\n help='Whether to save the evaluation results only')\n\n parser.add_argument(\n \"--amp\",\n action='store_true',\n default=False,\n help=\"Enable auto mixed precision eval.\")\n\n # for smalldet slice_infer\n parser.add_argument(\n \"--slice_infer\",\n action='store_true',\n help=\"Whether to slice the image and merge the inference results for small object detection.\"\n )\n parser.add_argument(\n '--slice_size',\n nargs='+',\n type=int,\n default=[640, 640],\n help=\"Height of the sliced image.\")\n parser.add_argument(\n \"--overlap_ratio\",\n nargs='+',\n type=float,\n default=[0.25, 0.25],\n help=\"Overlap height ratio of the sliced image.\")\n parser.add_argument(\n \"--combine_method\",\n type=str,\n default='nms',\n help=\"Combine method of the sliced images' detection results, choose in ['nms', 'nmm', 'concat'].\"\n )\n parser.add_argument(\n \"--match_threshold\",\n type=float,\n default=0.6,\n help=\"Combine method matching threshold.\")\n parser.add_argument(\n \"--match_metric\",\n type=str,\n default='ios',\n help=\"Combine method matching metric, choose in ['iou', 'ios'].\")\n args = parser.parse_args()\n return args\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 594, "n_words": 178, "vocab_size": 121, "complexity": 1, "nloc": 70, "token_counts": 262, "n_ast_nodes": 436, "n_identifiers": 13, "random_cut": "def parse_args():\n parser = ArgsParser()\n parser.add_argument(\n \"--output_eval\",\n default=None,\n type=str,\n help=\"Evaluation directory, default is current directory.\")\n\n parser.add_argument(\n '--json_eval',\n action='store_true',\n default=False,\n help='Whether to re eval with already exists bbox.json or mask.json')\n\n parser.add_argument(\n \"--slim_config\",\n default=None,\n type=str,\n help=\"Configuration file of slim method.\")\n\n # TODO: bias should be unified\n parser.add_argument(\n \"--bias\",\n action=\"store_true\",\n help=\"whether add bias or not while getting w and h\")\n\n parser.add_argument(\n " }, { "id": 7938, "commit_id": "0ab41a299cc690940b750a79b704d69544315702", "repo": "ludwig", "path": "ludwig/data/preprocessing.py", "file_name": "preprocessing.py", "fun_name": "handle_missing_values", "commit_message": "Update missing value strategy to only allow bfill and ffill (#2457)\n\n* push changes\r\n\r\n* working missing value strategy\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Add type hints to backward compatibility transformations\r\n\r\n* Update test to test both missing value strategy updates\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def handle_missing_values(dataset_cols, feature, preprocessing_parameters):\n missing_value_strategy = preprocessing_parameters[\"missing_value_strategy\"]\n\n # Check for the precomputed fill value in the metadata\n computed_fill_value = preprocessing_parameters.get(\"computed_fill_value\")\n\n if (\n missing_value_strategy in {FILL_WITH_CONST, FILL_WITH_MODE, FILL_WITH_MEAN, FILL_WITH_FALSE}\n and computed_fill_value is not None\n ):\n dataset_cols[feature[COLUMN]] = dataset_cols[feature[COLUMN]].fillna(\n computed_fill_value,\n )\n elif missing_value_strategy in {BFILL, FFILL}:\n dataset_cols[feature[COLUMN]] = dataset_cols[feature[COLUMN]].fillna(\n method=missing_value_strategy,\n )\n elif missing_value_strategy == DROP_ROW:\n # Here we only drop from this series, but after preprocessing we'll do a second\n # round of dropping NA values from the entire output dataframe, which will\n # result in the removal of the rows.\n dataset_cols[feature[COLUMN]] = dataset_cols[feature[COLUMN]].dropna()\n else:\n raise ValueError(f\"Invalid missing value strategy {missing_value_strategy}\")\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 221, "n_words": 99, "vocab_size": 73, "complexity": 5, "nloc": 18, "token_counts": 128, "n_ast_nodes": 200, "n_identifiers": 19, "random_cut": "def handle_missing_values(dataset_cols, feature, preprocessing_parameters):\n missing_value_strategy = preprocessing_parameters[\"missing_value_strategy\"]\n\n # Check for the precomputed fill value in the metadata\n computed_fill_value = preprocessing_parameters.get(\"computed_fill_value\")\n\n if (\n missing_value_strategy in {FILL_WITH_CONST, FILL_WITH_MODE, FILL_WITH_MEAN, FILL_WITH_FAL" }, { "id": 99630, "commit_id": "04f013dd203f286aaf27b1c887bb72a2e24a498e", "repo": "sentry", "path": "tests/sentry/integrations/slack/notifications/test_new_processing_issues.py", "file_name": "test_new_processing_issues.py", "fun_name": "test_new_processing_issue", "commit_message": "Revert \"fix(notifications): Use `metrics_key` (#34572)\"\n\nThis reverts commit 1730c481f1a8a71446326fa1ff72e10663016385.\n\nCo-authored-by: marcos.gaeta via Slack ", "code": "def test_new_processing_issue(self, mock_func):\n \n\n notification = NewProcessingIssuesActivityNotification(\n Activity(\n project=self.project,\n user=self.user,\n type=ActivityType.NEW_PROCESSING_ISSUES,\n data={\n \"issues\": get_issues_data(),\n \"reprocessing_active\": True,\n },\n )\n )\n with self.tasks():\n notification.send()\n\n attachment, text = get_attachment()\n assert (\n text\n == f\"Processing issues on <{self.project.slug}|http://testserver/settings/{self.organization.slug}/projects/{self.project.slug}/processing-issues/\"\n )\n assert (\n attachment[\"text\"]\n == f\"Some events failed to process in your project {self.project.slug}\"\n )\n assert (\n attachment[\"footer\"]\n == f\"{self.project.slug} | \"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 346, "n_words": 57, "vocab_size": 45, "complexity": 1, "nloc": 27, "token_counts": 95, "n_ast_nodes": 208, "n_identifiers": 20, "random_cut": "def test_new_processing_issue(self, mock_func):\n \n\n notification = NewProcessingIssuesActivityNotification(\n Activity(\n project=self.project,\n user=self.user,\n type=ActivityType.NEW_PROCESSING_ISSUES,\n data={\n \"issues\": get_issues_data(),\n \"reprocessing_active\": True,\n },\n )\n )\n with self.tasks():\n notification.send()\n\n attachment, text = get_attachment()\n assert (\n text\n == f\"Processing issues on <{self.project.slug}|http://testserver/settings/{self.organization.slug}/projects/{self.project.slug}/processing-issues/\"\n )\n assert (\n attachment[\"text\"]\n == f\"Some events failed to process in your project {self.project.slug}\"\n )\n assert (\n attachment[\"footer\"]\n == f\"{self.project.slug} | \"PlacementGroupFactory\":\n \n from ray.tune.execution.placement_groups import PlacementGroupFactory\n\n trainer_resources = self._trainer_resources_not_none\n trainer_bundle = [trainer_resources]\n worker_resources = {\n \"CPU\": self.num_cpus_per_worker,\n \"GPU\": self.num_gpus_per_worker,\n }\n worker_resources_extra = (\n {} if self.resources_per_worker is None else self.resources_per_worker\n )\n worker_bundles = [\n {**worker_resources, **worker_resources_extra}\n for _ in range(self.num_workers if self.num_workers else 0)\n ]\n bundles = trainer_bundle + worker_bundles\n return PlacementGroupFactory(bundles, strategy=self.placement_strategy)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 194, "n_words": 55, "vocab_size": 45, "complexity": 4, "nloc": 18, "token_counts": 102, "n_ast_nodes": 160, "n_identifiers": 22, "random_cut": "def as_placement_group_factory(self) -> \"PlacementGroupFactory\":\n \n from ray.tune.execution.placement_groups import PlacementGroupFactory\n\n " }, { "id": 70345, "commit_id": "4a848bfb4e3ec1a84a3d36fda577c1ed784de498", "repo": "wagtail", "path": "wagtail/core/blocks/list_block.py", "file_name": "list_block.py", "fun_name": "bulk_to_python", "commit_message": "Implement a ListValue type for ListBlocks", "code": "def bulk_to_python(self, values):\n # 'values' is a list of lists of child block values; concatenate them into one list so that\n # we can make a single call to child_block.bulk_to_python\n lengths = [len(val) for val in values]\n raw_values = list(itertools.chain.from_iterable(values))\n converted_values = self.child_block.bulk_to_python(raw_values)\n\n # split converted_values back into sub-lists of the original lengths\n result = []\n offset = 0\n for sublist_len in lengths:\n result.append(ListValue(values=converted_values[offset:offset + sublist_len]))\n offset += sublist_len\n\n return result\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 162, "n_words": 71, "vocab_size": 53, "complexity": 3, "nloc": 10, "token_counts": 77, "n_ast_nodes": 124, "n_identifiers": 18, "random_cut": "def bulk_to_python(self, values):\n # 'values' is a list of lists of child block values; concatenate them into one list so that\n # we can make a single call to child_block.bulk_to_python\n lengths = [len(val) for val in values]\n raw_values = list(itertools.chain.from_iterable(values))\n converted_values = self.child_block.bulk_to_python(raw_values)\n\n # split converted_values back into sub-lists of the original lengths\n result = []\n offset = 0\n for sublist_len in lengths:\n result.append(ListValue(values=converted_values[offset:offset + sublist_len]))\n offset += sublist_len\n\n return " }, { "id": 212239, "commit_id": "14ea3e941229c5069232bf29b48a57f4fb44394a", "repo": "bokeh", "path": "tests/unit/bokeh/models/test_glyphs.py", "file_name": "test_glyphs.py", "fun_name": "test_Image_kwargs", "commit_message": "Fix passing of kwargs in Image glyph's constructor (#12081)", "code": "def test_Image_kwargs() -> None:\n glyph = Image(x=0, y=0, dw=10, dh=10)\n assert glyph.image == field(\"image\")\n assert glyph.x == 0\n assert glyph.y == 0\n assert glyph.dw == 10\n assert glyph.dh == 10\n assert glyph.dilate is False\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 54, "n_words": 34, "vocab_size": 23, "complexity": 1, "nloc": 8, "token_counts": 65, "n_ast_nodes": 100, "n_identifiers": 10, "random_cut": "def test_Image_kwargs() -> None:\n glyph " }, { "id": 213228, "commit_id": "ec8341197ccdd240a346a95c2a434e5ef9f9ef72", "repo": "ivy", "path": "ivy/core/general.py", "file_name": "general.py", "fun_name": "inplace_increment", "commit_message": "moved all inplace methods from gradients submodule to general submodule, as inplace ops are also relevant for non-Variable tensors.", "code": "def inplace_increment(x, val, f=None):\n \n return _cur_framework(x, f=f).inplace_increment(x, val)\n", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 28, "n_ast_nodes": 43, "n_identifiers": 5, "random_cut": "def inplace_increment(x, val, f=None):\n \n return _cur_fra" }, { "id": 64083, "commit_id": "c68c70f8bc88d9b05d64774ba070a34c059b7d30", "repo": "erpnext", "path": "erpnext/loan_management/doctype/loan/loan.py", "file_name": "loan.py", "fun_name": "make_refund_jv", "commit_message": "feat: Refund entry against loans", "code": "def make_refund_jv(loan, amount=0, reference_number=None, reference_date=None, submit=0):\n\tloan_details = frappe.db.get_value('Loan', loan, ['applicant_type', 'applicant',\n\t\t'loan_account', 'payment_account', 'posting_date', 'company', 'name',\n\t\t'total_payment', 'total_principal_paid'], as_dict=1)\n\n\tloan_details.doctype = 'Loan'\n\tloan_details[loan_details.applicant_type.lower()] = loan_details.applicant\n\n\tif not amount:\n\t\tamount = flt(loan_details.total_principal_paid - loan_details.total_payment)\n\n\t\tif amount < 0:\n\t\t\tfrappe.throw(_('No excess amount pending for refund'))\n\n\trefund_jv = get_payment_entry(loan_details, {\n\t\t\"party_type\": loan_details.applicant_type,\n\t\t\"party_account\": loan_details.loan_account,\n\t\t\"amount_field_party\": 'debit_in_account_currency',\n\t\t\"amount_field_bank\": 'credit_in_account_currency',\n\t\t\"amount\": amount,\n\t\t\"bank_account\": loan_details.payment_account\n\t})\n\n\tif reference_number:\n\t\trefund_jv.cheque_no = reference_number\n\n\tif reference_date:\n\t\trefund_jv.cheque_date = reference_date\n\n\tif submit:\n\t\trefund_jv.submit()\n\n\treturn refund_jv", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 51, "n_words": 76, "vocab_size": 63, "complexity": 6, "nloc": 25, "token_counts": 170, "n_ast_nodes": 287, "n_identifiers": 26, "random_cut": "def make_refund_jv(loan, amount=0, reference_number=None, reference_date=None, submit=0):\n\tloan_details = frappe.db.get_value('Loan', loan, ['applicant_type', 'applicant',\n\t\t'loan_account', 'payment_account', 'posting_date', 'company', 'name',\n\t\t'total_payment', 'total_principal_paid'], as_dict=1)\n\n\tloan_details.doctype = 'Loan'\n\tloan_details[loan_details.applicant_type.lower()] = loan_details.applicant\n\n\tif not amount:\n\t\tamount = flt(loan_details.total_principal_paid - loan_details.total_payment)\n\n\t\tif amount < 0:\n\t\t\tfrappe.throw(_('No excess amount pending for refund'))\n\n\trefund_jv = get_payment_entry(loan_details, {\n\t\t\"party_type\": loan_details.applicant_type,\n\t\t\"party_account\": loan_details.loan_account,\n\t\t\"amount_field_party\": 'debit_in_account_currency',\n\t\t\"amount_field_bank\": 'credit_in_account_currency',\n\t\t\"amount\": amount,\n\t\t\"bank_account\": loan_details.payment_account\n\t})\n\n\tif reference_number:\n\t\tr" }, { "id": 336306, "commit_id": "051b34635fda2fc310898a6a602c89be8663b77f", "repo": "diffusers", "path": "tests/test_modeling_utils.py", "file_name": "test_modeling_utils.py", "fun_name": "test_stable_diffusion", "commit_message": "[Half precision] Make sure half-precision is correct (#182)\n\n* [Half precision] Make sure half-precision is correct\r\n\r\n* Update src/diffusers/models/unet_2d.py\r\n\r\n* Update src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py\r\n\r\n* correct some tests\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Suraj Patil \r\n\r\n* finalize\r\n\r\n* finish\r\n\r\nCo-authored-by: Suraj Patil ", "code": "def test_stable_diffusion(self):\n sd_pipe = StableDiffusionPipeline.from_pretrained(\"CompVis/stable-diffusion-v1-1-diffusers\")\n\n prompt = \"A painting of a squirrel eating a burger\"\n generator = torch.Generator(device=torch_device).manual_seed(0)\n with torch.autocast(\"cuda\"):\n output = sd_pipe(\n [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type=\"np\"\n )\n\n image = output[\"sample\"]\n\n image_slice = image[0, -3:, -3:, -1]\n\n assert image.shape == (1, 512, 512, 3)\n expected_slice = np.array([0.898, 0.9194, 0.91, 0.8955, 0.915, 0.919, 0.9233, 0.9307, 0.8887])\n assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 161, "n_words": 62, "vocab_size": 52, "complexity": 1, "nloc": 13, "token_counts": 167, "n_ast_nodes": 224, "n_identifiers": 26, "random_cut": "def test_stable_diffusion(self):\n sd_pipe = StableDiffusionPipeline.from_pretrained(\"CompVis/stable-diffusion-v1-1-diffusers\")\n\n prompt = \"A painting of a squirrel eating a burger\"\n generator = torch.Generator(device=torc" }, { "id": 167324, "commit_id": "dc36ce1b3f6578833ca44cc7a5e49a75ddb02ec7", "repo": "pandas", "path": "pandas/tests/extension/test_arrow.py", "file_name": "test_arrow.py", "fun_name": "test_fillna_limit_backfill", "commit_message": "ENH/TST: Add BaseInterfaceTests tests for ArrowExtensionArray PT2 (#47468)", "code": "def test_fillna_limit_backfill(self, data_missing, using_array_manager, request):\n if using_array_manager and pa.types.is_duration(\n data_missing.dtype.pyarrow_dtype\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=\"Checking ndim when using arraymanager with duration type\"\n )\n )\n super().test_fillna_limit_backfill(data_missing)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 126, "n_words": 24, "vocab_size": 23, "complexity": 3, "nloc": 10, "token_counts": 52, "n_ast_nodes": 83, "n_identifiers": 17, "random_cut": "def test_fillna_limit_backfill(self, data_missing, using_array_manager, request):\n if using_array_manager and pa.types.is_duration(\n data_missing.dtype.pyarrow_dtype\n ):\n request.node.add_marker(\n pytest.mark.xfail(\n reason=\"" }, { "id": 304169, "commit_id": "7a497c1e6e5a0d44b9418a754470ca9dd35e9719", "repo": "core", "path": "homeassistant/components/landisgyr_heat_meter/sensor.py", "file_name": "sensor.py", "fun_name": "async_added_to_hass", "commit_message": "Add Landis+Gyr Heat Meter integration (#73363)\n\n* Add Landis+Gyr Heat Meter integration\r\n\r\n* Add contant for better sensor config\r\n\r\n* Add test for init\r\n\r\n* Refactor some of the PR suggestions in config_flow\r\n\r\n* Apply small fix\r\n\r\n* Correct total_increasing to total\r\n\r\n* Add test for restore state\r\n\r\n* Add MWh entity that can be added as gas on the energy dashoard\r\n\r\n* Remove GJ as unit\r\n\r\n* Round MWh to 5 iso 3 digits\r\n\r\n* Update homeassistant/components/landisgyr_heat_meter/const.py\r\n\r\n* Update CODEOWNERS\r\n\r\nCo-authored-by: Erik Montnemery ", "code": "async def async_added_to_hass(self):\n \n await super().async_added_to_hass()\n state = await self.async_get_last_sensor_data()\n if state:\n self._attr_native_value = state.native_value\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 32, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "async def async_added_to_hass(self):\n \n await super().async_added_to_hass()\n state = await" }, { "id": 106072, "commit_id": "a528472ad7e164588f568b75b3b3e36ff71996d2", "repo": "datasets", "path": "src/datasets/formatting/formatting.py", "file_name": "formatting.py", "fun_name": "__repr__", "commit_message": "Support for decoding Image/Audio types in map when format type is not default one (#5252)\n\n* Add iter to arrow dataset and iterable dataset\r\n\r\n* Remove decoded from formatters\r\n\r\n* First attempt\r\n\r\n* Minor fix in np formatter\r\n\r\n* Fixes\r\n\r\n* Add tests for iter method\r\n\r\n* Minor test refactor\r\n\r\n* Add (and update) tests\r\n\r\n* Code fixes\r\n\r\n* Fix iter for drop_last_batch=True and pyarrow<=8\r\n\r\n* Make `supports_lazy_decoding` class attribute of formatters\r\n\r\n* Update docs\r\n\r\n* Update lazy dict\r\n\r\n* Test lazy formatting\r\n\r\n* Lazily extract columns\r\n\r\n* Update tests\r\n\r\n* Update iter benchmark in docs\r\n\r\n* Fix CI\r\n\r\n* Add select to docs\r\n\r\n* Add array_concat\r\n\r\n* CI fix\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Style\r\n\r\n* Add comments from code review\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Add test with sliced arrays\r\n\r\n* Use array_concat only for extension arrays\r\n\r\n* fix None -> empty array warning\r\n\r\n* fix map with mix of lazy dict and regular dict\r\n\r\n* fix benchmarks\r\n\r\n* fix tests\r\n\r\n* fix tests\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest ", "code": "def __repr__(self):\n self._format_all()\n return repr(self.data)\n\n if config.PY_VERSION >= version.parse(\"3.9\"):\n # merging with the union (\"|\") operator is supported in Python 3.9+\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 44, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 51, "n_identifiers": 9, "random_cut": "def __repr__(self):\n self._format_all()\n return repr(self.d" }, { "id": 23569, "commit_id": "f6532a0e51222c4385dd41a0f9de169f188ac29a", "repo": "PaddleOCR", "path": "ppocr/modeling/backbones/__init__.py", "file_name": "__init__.py", "fun_name": "build_backbone", "commit_message": "add ppocrv3 rec (#6033)\n\n* add ppocrv3 rec", "code": "def build_backbone(config, model_type):\n if model_type == \"det\" or model_type == \"table\":\n from .det_mobilenet_v3 import MobileNetV3\n from .det_resnet_vd import ResNet\n from .det_resnet_vd_sast import ResNet_SAST\n support_dict = [\"MobileNetV3\", \"ResNet\", \"ResNet_SAST\"]\n elif model_type == \"rec\" or model_type == \"cls\":\n from .rec_mobilenet_v3 import MobileNetV3\n from .rec_resnet_vd import ResNet\n from .rec_resnet_fpn import ResNetFPN\n from .rec_mv1_enhance import MobileNetV1Enhance\n from .rec_nrtr_mtb import MTB\n from .rec_resnet_31 import ResNet31\n from .rec_resnet_aster import ResNet_ASTER\n from .rec_micronet import MicroNet\n from .rec_efficientb3_pren import EfficientNetb3_PREN\n from .rec_svtrnet import SVTRNet\n support_dict = [\n 'MobileNetV1Enhance', 'MobileNetV3', 'ResNet', 'ResNetFPN', 'MTB',\n \"ResNet31\", \"ResNet_ASTER\", 'MicroNet', 'EfficientNetb3_PREN',\n 'SVTRNet'\n ]\n elif model_type == \"e2e\":\n from .e2e_resnet_vd_pg import ResNet\n support_dict = ['ResNet']\n elif model_type == 'kie':\n from .kie_unet_sdmgr import Kie_backbone\n support_dict = ['Kie_backbone']\n elif model_type == \"table\":\n from .table_resnet_vd import ResNet\n from .table_mobilenet_v3 import MobileNetV3\n support_dict = [\"ResNet\", \"MobileNetV3\"]\n elif model_type == 'vqa':\n from .vqa_layoutlm import LayoutLMForSer, LayoutLMv2ForSer, LayoutLMv2ForRe, LayoutXLMForSer, LayoutXLMForRe\n support_dict = [\n \"LayoutLMForSer\", \"LayoutLMv2ForSer\", 'LayoutLMv2ForRe',\n \"LayoutXLMForSer\", 'LayoutXLMForRe'\n ]\n else:\n raise NotImplementedError\n\n module_name = config.pop(\"name\")\n assert module_name in support_dict, Exception(\n \"when model typs is {}, backbone only support {}\".format(model_type,\n support_dict))\n module_class = eval(module_name)(**config)\n return module_class\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 522, "n_words": 175, "vocab_size": 100, "complexity": 9, "nloc": 46, "token_counts": 245, "n_ast_nodes": 470, "n_identifiers": 46, "random_cut": "def build_backbone(config, model_type):\n if model_type == \"det\" or model_type == \"table\":\n from .det_mobilenet_v3 import MobileNetV3\n from .det_resnet_vd import ResNet\n from .det_resnet_vd_sast import ResNet_SAST\n support_dict = [\"MobileNetV3\", \"ResNet\", \"ResNet_SAST\"]\n elif model_type == \"rec\" or model_type == \"cls\":\n from .rec_mobilenet_v3 import MobileNetV3\n from .rec_resnet_vd import ResNet\n from .rec_resnet_fpn import ResNetFPN\n from .rec_mv1_enhance import Mobil" }, { "id": 317621, "commit_id": "fd6ffef52f337df71542b48565a95300c0ab2766", "repo": "core", "path": "tests/components/recorder/test_websocket_api.py", "file_name": "test_websocket_api.py", "fun_name": "test_recorder_info", "commit_message": "Support non-live database migration (#72433)\n\n* Support non-live database migration\r\n\r\n* Tweak startup order, add test\r\n\r\n* Address review comments\r\n\r\n* Fix typo\r\n\r\n* Clarify comment about promoting dependencies\r\n\r\n* Tweak\r\n\r\n* Fix merge mistake\r\n\r\n* Fix some tests\r\n\r\n* Fix additional test\r\n\r\n* Fix additional test\r\n\r\n* Adjust tests\r\n\r\n* Improve test coverage", "code": "async def test_recorder_info(hass, hass_ws_client, recorder_mock):\n \n client = await hass_ws_client()\n\n # Ensure there are no queued events\n await async_wait_recording_done(hass)\n\n await client.send_json({\"id\": 1, \"type\": \"recorder/info\"})\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n \"backlog\": 0,\n \"max_backlog\": 40000,\n \"migration_in_progress\": False,\n \"migration_is_live\": False,\n \"recording\": True,\n \"thread_running\": True,\n }\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 115, "n_words": 46, "vocab_size": 39, "complexity": 1, "nloc": 14, "token_counts": 81, "n_ast_nodes": 147, "n_identifiers": 9, "random_cut": "async def test_recorder_info(hass, hass_ws_client, recorder_mock):\n \n client = await hass_ws_client()\n\n # Ensure there are no queued events\n await async_wait_recording_done(hass)\n\n await client.send_json({\"id\": 1, \"type\": \"recorder" }, { "id": 202341, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/contenttypes_tests/test_operations.py", "file_name": "test_operations.py", "fun_name": "test_existing_content_type_rename_other_database", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_existing_content_type_rename_other_database(self):\n ContentType.objects.using(\"other\").create(\n app_label=\"contenttypes_tests\", model=\"foo\"\n )\n other_content_types = ContentType.objects.using(\"other\").filter(\n app_label=\"contenttypes_tests\"\n )\n call_command(\n \"migrate\",\n \"contenttypes_tests\",\n database=\"other\",\n interactive=False,\n verbosity=0,\n )\n self.assertFalse(other_content_types.filter(model=\"foo\").exists())\n self.assertTrue(other_content_types.filter(model=\"renamedfoo\").exists())\n call_command(\n \"migrate\",\n \"contenttypes_tests\",\n \"zero\",\n database=\"other\",\n interactive=False,\n verbosity=0,\n )\n self.assertTrue(other_content_types.filter(model=\"foo\").exists())\n self.assertFalse(other_content_types.filter(model=\"renamedfoo\").exists())\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 256, "n_words": 30, "vocab_size": 21, "complexity": 1, "nloc": 26, "token_counts": 149, "n_ast_nodes": 261, "n_identifiers": 17, "random_cut": "def test_existing_content_type_rename_other_database(self):\n ContentType.objects.using(\"other\").create(\n app_label=\"contenttypes_tests\", model=\"foo\"\n )\n other_content_types = ContentType.objects.using(\"other\").filter(\n app_label=\"contenttypes_tests\"\n )\n call_command(\n \"migrate\",\n \"contenttypes_tests\",\n database=\"other\",\n interactive=False,\n verbosity=0,\n )\n self.assertFalse(other_content_types.filter(model=\"foo\").exists())\n self.assertTrue(other_content_types.filter(model=\"renamedfoo\").exists())\n call_command(\n \"migrate\",\n \"contenttypes_tests\",\n \"zero\",\n database=\"other\",\n interactive=False,\n verbosity=0,\n )\n self.assertTrue(other_content_types.filter(model=\"foo\").exists())\n self.assertFalse(other_content_types.filter(model=\"renamedfoo\").exists())\n" }, { "id": 149669, "commit_id": "108903f7f0c968f88a3b2520a8cc8e7753c4c2e1", "repo": "freqtrade", "path": "tests/test_integration.py", "file_name": "test_integration.py", "fun_name": "test_dca_order_adjust", "commit_message": "Add DCA order adjust test", "code": "def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:\n default_conf_usdt['position_adjustment_enable'] = True\n\n freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)\n mocker.patch.multiple(\n 'freqtrade.exchange.Exchange',\n fetch_ticker=ticker_usdt,\n get_fee=fee,\n amount_to_precision=lambda s, x, y: y,\n price_to_precision=lambda s, x, y: y,\n )\n mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)\n\n patch_get_signal(freqtrade)\n freqtrade.strategy.custom_entry_price = lambda **kwargs: ticker_usdt['ask'] * 0.96\n\n freqtrade.enter_positions()\n\n assert len(Trade.get_trades().all()) == 1\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 1\n assert trade.open_order_id is not None\n assert pytest.approx(trade.stake_amount) == 60\n assert trade.open_rate == 1.96\n # No adjustment\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 1\n assert trade.open_order_id is not None\n assert pytest.approx(trade.stake_amount) == 60\n\n # Cancel order and place new one\n freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.99)\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 2\n assert trade.open_order_id is not None\n # Open rate is not adjusted yet\n assert trade.open_rate == 1.96\n\n # Fill order\n mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=True)\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 2\n assert trade.open_order_id is None\n # Open rate is not adjusted yet\n assert trade.open_rate == 1.99\n\n # 2nd order - not filling\n freqtrade.strategy.adjust_trade_position = MagicMock(return_value=120)\n mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)\n\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 3\n assert trade.open_order_id is not None\n assert trade.open_rate == 1.99\n assert trade.orders[-1].price == 1.96\n assert trade.orders[-1].cost == 120\n\n # Replace new order with diff. order at a lower price\n freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.95)\n\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 4\n assert trade.open_order_id is not None\n assert trade.open_rate == 1.99\n assert trade.orders[-1].price == 1.95\n assert pytest.approx(trade.orders[-1].cost) == 120\n\n # Fill DCA order\n freqtrade.strategy.adjust_trade_position = MagicMock(return_value=None)\n mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=True)\n freqtrade.strategy.adjust_entry_price = MagicMock(side_effect=ValueError)\n\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 4\n assert trade.open_order_id is None\n assert pytest.approx(trade.open_rate) == 1.963153456\n assert trade.orders[-1].price == 1.95\n assert pytest.approx(trade.orders[-1].cost) == 120\n assert trade.orders[-1].status == 'closed'\n\n assert pytest.approx(trade.amount) == 91.689215\n # Check the 2 filled orders equal the above amount\n assert pytest.approx(trade.orders[1].amount) == 30.150753768\n assert pytest.approx(trade.orders[-1].amount) == 61.538461232\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 530, "n_words": 283, "vocab_size": 109, "complexity": 1, "nloc": 68, "token_counts": 654, "n_ast_nodes": 995, "n_identifiers": 44, "random_cut": "def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:\n default_conf_usdt['position_adjustment_enable'] = True\n\n freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)\n mocker.patch.multiple(\n 'freqtrade.exchange.Exchange',\n fetch_ticker=ticker_usdt,\n get_fee=fee,\n amount_to_precision=lambda s, x, y: y,\n price_to_precision=lambda s, x, y: y,\n )\n mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)\n\n patch_get_signal(freqtrade)\n freqtrade.strategy.custom_entry_price = lambda **kwargs: ticker_usdt['ask'] * 0.96\n\n freqtrade.enter_positions()\n\n assert len(Trade.get_trades().all()) == 1\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 1\n assert trade.open_order_id is not None\n assert pytest.approx(trade.stake_amount) == 60\n assert trade.open_rate == 1.96\n # No adjustment\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 1\n assert trade.open_order_id is not None\n assert pytest.approx(trade.stake_amount) == 60\n\n # Cancel order and place new one\n freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.99)\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 2\n assert trade.open_order_id is not None\n # Open rate is not adjusted yet\n assert trade.open_rate == 1.96\n\n # Fill order\n mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=True)\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 2\n assert trade.open_order_id is None\n # Open rate is not adjusted yet\n assert trade.open_rate == 1.99\n\n # 2nd order - not filling\n freqtrade.strategy.adjust_trade_position = MagicMock(return_value=120)\n mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)\n\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 3\n assert trade.open_order_id is not None\n assert trade.open_rate == 1.99\n assert trade.orders[-1].price == 1.96\n assert trade.orders[-1].cost == 120\n\n # Replace new order with diff. order at a lower price\n freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.95)\n\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 4\n assert trade.open_order_id is not None\n assert trade.open_rate == 1.99\n assert trade.orders[-1].price == 1.95\n assert pytest.approx(trade.orders[-1].cost) == 120\n\n # Fill DCA order\n freqtrade.strategy.adjust_trade_position = MagicMock(return_value=None)\n mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=True)\n freqtrade.strategy.adjust_entry_price = MagicMock(side_effect=ValueError)\n\n freqtrade.process()\n trade = Trade.get_trades().first()\n assert len(trade.orders) == 4\n assert trade.open_order_id is None\n assert pytest.approx(trade.open_rate) == 1.963153456\n assert trade.orders[-1].price == 1.95\n assert pytest.approx(trade.orders[-1].cost) == 120\n a" }, { "id": 241752, "commit_id": "82c8875f33addb0becd7761c95e9674ccc98c7ee", "repo": "lightning", "path": "tests/models/test_hooks.py", "file_name": "test_hooks.py", "fun_name": "_auto_train_batch", "commit_message": "Add `LightningModule.lr_scheduler_step` (#10249)\n\nCo-authored-by: Carlos Mocholi ", "code": "def _auto_train_batch(trainer, model, batches, device=torch.device(\"cpu\"), current_epoch=0, **kwargs):\n using_native_amp = kwargs.get(\"amp_backend\") == \"native\"\n using_deepspeed = kwargs.get(\"strategy\") == \"deepspeed\"\n out = []\n for i in range(batches):\n out.extend(\n [\n dict(name=\"on_before_batch_transfer\", args=(ANY, 0)),\n dict(name=\"transfer_batch_to_device\", args=(ANY, device, 0)),\n dict(name=\"on_after_batch_transfer\", args=(ANY, 0)),\n # TODO: `on_batch_{start,end}`\n dict(name=\"Callback.on_batch_start\", args=(trainer, model)),\n dict(name=\"Callback.on_train_batch_start\", args=(trainer, model, ANY, i)),\n dict(name=\"on_train_batch_start\", args=(ANY, i)),\n dict(name=\"forward\", args=(ANY,)),\n dict(name=\"training_step\", args=(ANY, i)),\n dict(name=\"training_step_end\", args=(dict(loss=ANY),)),\n dict(name=\"Callback.on_before_zero_grad\", args=(trainer, model, ANY)),\n dict(name=\"on_before_zero_grad\", args=(ANY,)),\n dict(name=\"optimizer_zero_grad\", args=(current_epoch, i, ANY, 0)),\n dict(name=\"Callback.on_before_backward\", args=(trainer, model, ANY)),\n dict(name=\"on_before_backward\", args=(ANY,)),\n # DeepSpeed handles backward internally\n *([dict(name=\"backward\", args=(ANY, ANY, 0))] if not using_deepspeed else []),\n dict(name=\"Callback.on_after_backward\", args=(trainer, model)),\n dict(name=\"on_after_backward\"),\n # note: unscaling happens here in the case of AMP\n dict(name=\"Callback.on_before_optimizer_step\", args=(trainer, model, ANY, 0)),\n dict(name=\"on_before_optimizer_step\", args=(ANY, 0)),\n *([dict(name=\"log_grad_norm\", args=ANY)] if not using_deepspeed else []),\n dict(\n name=\"clip_gradients\",\n args=(ANY,),\n kwargs=dict(gradient_clip_val=None, gradient_clip_algorithm=None),\n ),\n dict(\n name=\"configure_gradient_clipping\",\n args=(ANY, 0),\n kwargs=dict(gradient_clip_val=None, gradient_clip_algorithm=None),\n ),\n # this is after because it refers to the `LightningModule.optimizer_step` hook which encapsulates\n # the actual call to `PrecisionPlugin.optimizer_step`\n dict(\n name=\"optimizer_step\",\n args=(current_epoch, i, ANY, 0, ANY),\n kwargs=dict(on_tpu=False, using_lbfgs=False, using_native_amp=using_native_amp),\n ),\n *(\n [dict(name=\"lr_scheduler_step\", args=(ANY, 0, None))]\n if i == (trainer.num_training_batches - 1)\n else []\n ),\n dict(name=\"Callback.on_train_batch_end\", args=(trainer, model, dict(loss=ANY), ANY, i)),\n dict(name=\"on_train_batch_end\", args=(dict(loss=ANY), ANY, i)),\n dict(name=\"Callback.on_batch_end\", args=(trainer, model)),\n ]\n )\n return out\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1237, "n_words": 191, "vocab_size": 118, "complexity": 5, "nloc": 53, "token_counts": 591, "n_ast_nodes": 873, "n_identifiers": 25, "random_cut": "def _auto_train_batch(trainer, model, batches, device=torch.device(\"cpu\"), current_epoch=0, **kwargs):\n using_native_amp = kwargs.get(\"amp_backend\") == \"native\"\n using_deepspeed = kwargs.get(\"strategy\") == \"deepspeed\"\n out = []\n for i in range(batches):\n out.extend(\n [\n dict(name=\"on_before_batch_transfer\", args=(ANY, 0)),\n dict(name=\"transfer_batch_to_device\", args=(ANY, device, 0)),\n dict(name=\"on_after_batch_transfer\", args=(ANY, 0)),\n # TODO: `on_batch_{start,end}`\n dict(name=\"Callback.on_batch_start\", args=(trainer, model)),\n dict(name=\"Callback.on_train_batch_start\", args=(trainer, model, ANY, i)),\n dict(name=\"on_train_batch_start\", args=(ANY, i)),\n dict(name=\"forward\", args=(ANY,)),\n dict(name=\"training_step\", args=(ANY, i)),\n dict(name=\"training_step_end\", args=(dict(loss=ANY),)),\n dict(name=\"Callback.on_before_zero_grad\", args=(trainer, model, ANY)),\n dict(name=\"on_before_zero_grad\", args=(ANY,)),\n dict(name=\"optimizer_zero_grad\", args=(current_epoch, i, ANY, 0)),\n dict(name=\"Callback.on_before_backward\", args=(trainer," }, { "id": 157508, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/modules/image_degradation/bsrgan.py", "file_name": "bsrgan.py", "fun_name": "degradation_bsrgan_variant", "commit_message": "release more models", "code": "def degradation_bsrgan_variant(image, sf=4, isp_model=None):\n \n image = util.uint2single(image)\n isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25\n sf_ori = sf\n\n h1, w1 = image.shape[:2]\n image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop\n h, w = image.shape[:2]\n\n hq = image.copy()\n\n if sf == 4 and random.random() < scale2_prob: # downsample1\n if np.random.rand() < 0.5:\n image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),\n interpolation=random.choice([1, 2, 3]))\n else:\n image = util.imresize_np(image, 1 / 2, True)\n image = np.clip(image, 0.0, 1.0)\n sf = 2\n\n shuffle_order = random.sample(range(7), 7)\n idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)\n if idx1 > idx2: # keep downsample3 last\n shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]\n\n for i in shuffle_order:\n\n if i == 0:\n image = add_blur(image, sf=sf)\n\n elif i == 1:\n image = add_blur(image, sf=sf)\n\n elif i == 2:\n a, b = image.shape[1], image.shape[0]\n # downsample2\n if random.random() < 0.75:\n sf1 = random.uniform(1, 2 * sf)\n image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),\n interpolation=random.choice([1, 2, 3]))\n else:\n k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))\n k_shifted = shift_pixel(k, sf)\n k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel\n image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')\n image = image[0::sf, 0::sf, ...] # nearest downsampling\n image = np.clip(image, 0.0, 1.0)\n\n elif i == 3:\n # downsample3\n image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))\n image = np.clip(image, 0.0, 1.0)\n\n elif i == 4:\n # add Gaussian noise\n image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)\n\n elif i == 5:\n # add JPEG noise\n if random.random() < jpeg_prob:\n image = add_JPEG_noise(image)\n\n # elif i == 6:\n # # add processed camera sensor noise\n # if random.random() < isp_prob and isp_model is not None:\n # with torch.no_grad():\n # img, hq = isp_model.forward(img.copy(), hq)\n\n # add final JPEG compression noise\n image = add_JPEG_noise(image)\n image = util.single2uint(image)\n example = {\"image\":image}\n return example\n\n\n# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 896, "n_words": 338, "vocab_size": 165, "complexity": 14, "nloc": 50, "token_counts": 619, "n_ast_nodes": 915, "n_identifiers": 56, "random_cut": "def degradation_bsrgan_variant(image, sf=4, isp_model=None):\n \n image = util.uint2single(image)\n isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25\n sf_ori = sf\n\n h1, w1 = image.shape[:2]\n image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop\n h, w = image.shape[:2]\n\n hq = image.copy()\n\n if sf == 4 and random.random() < scale2_prob: # downsample1\n if np.random.rand() < 0.5:\n image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),\n interpolation=random.choice([1, 2, 3]))\n else:\n image = util.imresize_np(image, 1 / 2, True)\n image = np.clip(image, 0.0, 1.0)\n sf = 2\n\n shu" }, { "id": 94119, "commit_id": "be379770e90f6b7f97109f1cbdffd1e4749402ba", "repo": "sentry", "path": "src/sentry/integrations/msteams/integration.py", "file_name": "integration.py", "fun_name": "build_integration", "commit_message": "feat(msteams): Allow personal installation for Sentry in MS Teams (#36749)\n\n- Allow personal installation of Sentry for Microsoft Teams. This would allow MS Teams users to receive personal notifications for Sentry events like Slack. Currently, it is only possible to receive issue and incident notifications in `teams`.\r\n- The installation message/card is modified to allow the user to set up an integration from the personal chat as well. Upon successful installation a card will be presented with an option to configure notification settings.\r\n- Create an integration with `tenant_id` as the `external_id`, as we don't know the user's team from personal chat context. All users with the same `tenant_id` would be scoped under this integration.", "code": "def build_integration(self, state):\n data = state[self.key]\n external_id = data[\"external_id\"]\n external_name = data[\"external_name\"]\n service_url = data[\"service_url\"]\n user_id = data[\"user_id\"]\n conversation_id = data[\"conversation_id\"]\n\n # TODO: add try/except for request errors\n token_data = get_token_data()\n\n integration = {\n \"name\": external_name,\n \"external_id\": external_id,\n \"metadata\": {\n \"access_token\": token_data[\"access_token\"],\n \"expires_at\": token_data[\"expires_at\"],\n \"service_url\": service_url,\n \"installation_type\": data[\"installation_type\"],\n \"tenant_id\": data[\"tenant_id\"],\n },\n \"user_identity\": {\n \"type\": \"msteams\",\n \"external_id\": user_id,\n \"scopes\": [],\n \"data\": {},\n },\n \"post_install_data\": {\"conversation_id\": conversation_id},\n }\n return integration\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 356, "n_words": 68, "vocab_size": 56, "complexity": 1, "nloc": 27, "token_counts": 132, "n_ast_nodes": 238, "n_identifiers": 13, "random_cut": "def build_integration(self, state):\n data = state[self.key]\n external_id = data[\"external_id\"]\n external_name = data[\"external_name\"]\n service_url = data[\"service_url\"]\n " }, { "id": 252168, "commit_id": "2426d3d03847e0273707436268d79c24616b3e74", "repo": "mitmproxy", "path": "mitmproxy/proxy/layers/quic.py", "file_name": "quic.py", "fun_name": "transmit", "commit_message": "[quic] bugfixes and simplified connection opening", "code": "def transmit(self) -> layer.CommandGenerator[None]:\n assert self.quic is not None\n\n # send all queued datagrams\n for data, addr in self.quic.datagrams_to_send(now=self._loop.time()):\n yield commands.SendData(self.conn, data, addr)\n\n # mark an existing wakeup command as obsolete if it now longer matches the time\n timer = self.quic.get_timer()\n if self._request_wakeup_command_and_timer is not None:\n command, existing_timer = self._request_wakeup_command_and_timer\n if existing_timer != timer:\n self._obsolete_wakeup_commands.add(command)\n self._request_wakeup_command_and_timer = None\n\n # request a new wakeup if necessary\n if timer is not None and self._request_wakeup_command_and_timer is None:\n command = commands.RequestWakeup(timer - self._loop.time())\n self._request_wakeup_command_and_timer = (command, timer)\n yield command\n\n _handle_event = state_start\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 242, "n_words": 88, "vocab_size": 58, "complexity": 6, "nloc": 14, "token_counts": 135, "n_ast_nodes": 218, "n_identifiers": 24, "random_cut": "def transmit(self) -> layer.CommandGenerator[None]:\n assert self.quic is not None\n\n # send all queued datagrams\n for data, addr in self.quic.datagrams_to_send(now=self._loop.time()):\n yield commands.SendData(self.conn, data, addr)\n\n # mark an existing wakeup command as obsolete if it now longer matches the time\n timer = self.quic.get_timer()\n if self._request_wakeup_command_and_timer is not None:\n command, existing_ti" }, { "id": 215506, "commit_id": "25c2ae356bcf684cbe20f776e1ffcab0f8aeb80c", "repo": "salt", "path": "salt/utils/event.py", "file_name": "event.py", "fun_name": "get_master_event", "commit_message": "Address docs and hard coded strings", "code": "def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False):\n \n return MasterEvent(\n sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors\n )\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 55, "n_identifiers": 7, "random_cut": "def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False):\n \n return MasterEvent(\n sock_dir, opts, listen=listen, io_loop=io_l" }, { "id": 13920, "commit_id": "87912a37ce7ab3c3b63c12b48d6cdfe31f81742c", "repo": "jina", "path": "jina/parsers/orchestrate/runtimes/remote.py", "file_name": "remote.py", "fun_name": "_add_host", "commit_message": "fix: list-like args passed as string (#5464)\n\nCo-authored-by: Alaeddine Abdessalem ", "code": "def _add_host(arg_group):\n arg_group.add_argument(\n '--host',\n '--host-in',\n type=str,\n default=__default_host__,\n help=f'The host address of the runtime, by default it is {__default_host__}.',\n )\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 27, "n_ast_nodes": 47, "n_identifiers": 8, "random_cut": "def _add_host(arg_group):\n arg_group.add_argument(\n '--host',\n '--host-in',\n type=str,\n default=__default_host__,\n help=f'The host address of the runtime, by default it is {__default_host__}.',\n " }, { "id": 188681, "commit_id": "34e75099a3bc8b32d6e823660f5162094d17e511", "repo": "jumpserver", "path": "apps/users/signal_handlers.py", "file_name": "signal_handlers.py", "fun_name": "on_user_create_set_default_system_role", "commit_message": "perf: 设置默认的角色,系统用户角色添加权限 (#7898)\n\n* perf: 修改 role handler\r\n\r\n* perf: 设置默认的角色,系统用户角色添加权限\r\n\r\n* perf: authentication 还是放到系统中吧\r\n\r\nCo-authored-by: ibuler \r\nCo-authored-by: Jiangjie.Bai <32935519+BaiJiangJie@users.noreply.github.com>", "code": "def on_user_create_set_default_system_role(sender, instance, created, **kwargs):\n if not created:\n return\n has_system_role = instance.system_roles.all().exists()\n if not has_system_role:\n logger.debug(\"Receive user create signal, set default role\")\n instance.set_default_system_role()\n\n\n@receiver(post_user_create)", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "@receiver(post_user_create)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 52, "n_words": 24, "vocab_size": 22, "complexity": 3, "nloc": 7, "token_counts": 45, "n_ast_nodes": 85, "n_identifiers": 14, "random_cut": "def on_user_create_set_default_system_role(sender, instance, created, **kwargs):\n if not created:\n " }, { "id": 161280, "commit_id": "6a793cea8488ad40fcad6ab30f9d82bc920ac114", "repo": "MockingBird", "path": "vocoder/fregan/stft_loss.py", "file_name": "stft_loss.py", "fun_name": "stft", "commit_message": "Added missing files for Fre-GAN (#579)\n\n* The new vocoder Fre-GAN is now supported\r\n\r\n* Improved some fregan details\r\n\r\n* Fixed the problem that the existing model could not be loaded to continue training when training GAN\r\n\r\n* Updated reference papers\r\n\r\n* GAN training now supports DistributedDataParallel (DDP)\r\n\r\n* Added requirements.txt\r\n\r\n* GAN training uses single card training by default\r\n\r\n* Added note about GAN vocoder training with multiple GPUs\r\n\r\n* Added missing files for Fre-GAN", "code": "def stft(x, fft_size, hop_size, win_length, window):\n \n x_stft = torch.stft(x, fft_size, hop_size, win_length, window)\n real = x_stft[..., 0]\n imag = x_stft[..., 1]\n\n # NOTE(kan-bayashi): clamp is needed to avoid nan or inf\n return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)\n\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 59, "n_words": 41, "vocab_size": 33, "complexity": 1, "nloc": 5, "token_counts": 77, "n_ast_nodes": 108, "n_identifiers": 14, "random_cut": "def stft(x, fft_size, hop_size, win_length, window):\n \n x_stft = t" }, { "id": 308401, "commit_id": "d0c4f0fec4216e4193da716001b5e13e1e3f2106", "repo": "core", "path": "homeassistant/components/mqtt/cover.py", "file_name": "cover.py", "fun_name": "async_close_cover", "commit_message": "Add mqtt encoding support for publishing (#62739)\n\n* encoding support for mqtt publishing - todo tests\r\n\r\n* signature allows None values for qos and retain\r\n\r\n* common test for mqtt publishing encoding\r\n\r\n* better test with command templates\r\n\r\n* more tests\r\n\r\n* fix tests alarm control panel+tests light basic\r\n\r\n* tests light json and template\r\n\r\n* add tests vacuum and fix tests light_template", "code": "async def async_close_cover(self, **kwargs):\n \n await mqtt.async_publish(\n self.hass,\n self._config.get(CONF_COMMAND_TOPIC),\n self._config[CONF_PAYLOAD_CLOSE],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n if self._optimistic:\n # Optimistically assume that cover has changed state.\n self._state = STATE_CLOSED\n if self._config.get(CONF_GET_POSITION_TOPIC):\n self._position = self.find_percentage_in_range(\n self._config[CONF_POSITION_CLOSED], COVER_PAYLOAD\n )\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 222, "n_words": 35, "vocab_size": 32, "complexity": 3, "nloc": 16, "token_counts": 98, "n_ast_nodes": 150, "n_identifiers": 22, "random_cut": "async def async_close_cover(self, **kwargs):\n \n await mqtt.async_publish(\n self.hass,\n self._config.get(CONF_COMMAND_TOPIC),\n self._config[CONF_PAYLOAD_CLOSE],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n if self._optimistic:\n # Optimistically assume that cover has changed state.\n se" }, { "id": 60304, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py", "file_name": "test_coord_map.py", "fun_name": "test_rect", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def test_rect(self):\n \n n3x3 = coord_net_spec(ks=3, stride=1, pad=0)\n n5x5 = coord_net_spec(ks=5, stride=2, pad=10)\n n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])\n ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)\n ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)\n ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)\n self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)\n self.assertEquals(a_3x3, a_3x5[0])\n self.assertEquals(b_3x3, b_3x5[0])\n self.assertEquals(a_5x5, a_3x5[1])\n self.assertEquals(b_5x5, b_3x5[1])\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 135, "n_words": 51, "vocab_size": 45, "complexity": 1, "nloc": 12, "token_counts": 168, "n_ast_nodes": 245, "n_identifiers": 23, "random_cut": "def test_rect(self):\n \n n3x3 = coord_net_spec(ks=3, stride=1, pad=0)\n n5x5 = coord_net_spec(ks=5, stride=2, pad=10)\n n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])\n ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)\n ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)\n ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)\n self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)\n self.assertEquals(a_3x3, a_3x5[0])\n " }, { "id": 23088, "commit_id": "c267107926bbbd977fc9d67d3c39fb5e1b77028b", "repo": "PaddleOCR", "path": "PPOCRLabel/libs/autoDialog.py", "file_name": "autoDialog.py", "fun_name": "handleProgressBarSingal", "commit_message": "Add [ time left ] while predicting the image for user to know the situation", "code": "def handleProgressBarSingal(self, i):\n self.pb.setValue(i)\n\n # calculate time left of auto labeling\n avg_time = (time.time() - self.time_start) / i # Use average time to prevent time fluctuations\n time_left = str(datetime.timedelta(seconds=avg_time * (self.lender - i)))\n self.setWindowTitle(\"PPOCRLabel -- \" + f\"Time Left: {time_left}\") # show\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 80, "n_words": 42, "vocab_size": 36, "complexity": 1, "nloc": 5, "token_counts": 60, "n_ast_nodes": 105, "n_identifiers": 15, "random_cut": "def handleProgressBarSingal(self, i):\n self.pb.setValue(i)\n\n " }, { "id": 17268, "commit_id": "ff158ebe7e1ed14772139737d13bb5edfd6d9430", "repo": "ccxt", "path": "python/ccxt/async_support/zaif.py", "file_name": "zaif.py", "fun_name": "describe", "commit_message": "1.71.83\n\n[ci skip]", "code": "def describe(self):\n return self.deep_extend(super(zaif, self).describe(), {\n 'id': 'zaif',\n 'name': 'Zaif',\n 'countries': ['JP'],\n 'rateLimit': 2000,\n 'version': '1',\n 'has': {\n 'CORS': None,\n 'spot': True,\n 'margin': None, # has but unimplemented\n 'swap': False,\n 'future': False,\n 'option': False,\n 'cancelOrder': True,\n 'createMarketOrder': None,\n 'createOrder': True,\n 'fetchBalance': True,\n 'fetchClosedOrders': True,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchOpenOrders': True,\n 'fetchOrderBook': True,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchTicker': True,\n 'fetchTrades': True,\n 'withdraw': True,\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/27766927-39ca2ada-5eeb-11e7-972f-1b4199518ca6.jpg',\n 'api': 'https://api.zaif.jp',\n 'www': 'https://zaif.jp',\n 'doc': [\n 'https://techbureau-api-document.readthedocs.io/ja/latest/index.html',\n 'https://corp.zaif.jp/api-docs',\n 'https://corp.zaif.jp/api-docs/api_links',\n 'https://www.npmjs.com/package/zaif.jp',\n 'https://github.com/you21979/node-zaif',\n ],\n 'fees': 'https://zaif.jp/fee?lang=en',\n },\n 'fees': {\n 'trading': {\n 'percentage': True,\n 'taker': self.parse_number('0.001'),\n 'maker': self.parse_number('0'),\n },\n },\n 'api': {\n 'public': {\n 'get': [\n 'depth/{pair}',\n 'currencies/{pair}',\n 'currencies/all',\n 'currency_pairs/{pair}',\n 'currency_pairs/all',\n 'last_price/{pair}',\n 'ticker/{pair}',\n 'trades/{pair}',\n ],\n },\n 'private': {\n 'post': [\n 'active_orders',\n 'cancel_order',\n 'deposit_history',\n 'get_id_info',\n 'get_info',\n 'get_info2',\n 'get_personal_info',\n 'trade',\n 'trade_history',\n 'withdraw',\n 'withdraw_history',\n ],\n },\n 'ecapi': {\n 'post': [\n 'createInvoice',\n 'getInvoice',\n 'getInvoiceIdsByOrderNumber',\n 'cancelInvoice',\n ],\n },\n 'tlapi': {\n 'post': [\n 'get_positions',\n 'position_history',\n 'active_positions',\n 'create_position',\n 'change_position',\n 'cancel_position',\n ],\n },\n 'fapi': {\n 'get': [\n 'groups/{group_id}',\n 'last_price/{group_id}/{pair}',\n 'ticker/{group_id}/{pair}',\n 'trades/{group_id}/{pair}',\n 'depth/{group_id}/{pair}',\n ],\n },\n },\n 'options': {\n # zaif schedule defines several market-specific fees\n 'fees': {\n 'BTC/JPY': {'maker': 0, 'taker': 0},\n 'BCH/JPY': {'maker': 0, 'taker': 0.3 / 100},\n 'BCH/BTC': {'maker': 0, 'taker': 0.3 / 100},\n 'PEPECASH/JPY': {'maker': 0, 'taker': 0.01 / 100},\n 'PEPECASH/BT': {'maker': 0, 'taker': 0.01 / 100},\n },\n },\n 'exceptions': {\n 'exact': {\n 'unsupported currency_pair': BadRequest, # {\"error\": \"unsupported currency_pair\"}\n },\n 'broad': {\n },\n },\n })\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 2407, "n_words": 232, "vocab_size": 142, "complexity": 1, "nloc": 126, "token_counts": 443, "n_ast_nodes": 806, "n_identifiers": 7, "random_cut": "def describe(self):\n return self.deep_extend(super(zaif, self).describe(), {\n 'id': 'zaif',\n 'name': 'Zaif',\n 'countries': ['JP'],\n 'rateLimit': 2000,\n 'version': '1',\n 'has': {\n 'CORS': None,\n 'spot': True,\n 'margin': None, # has but unimplemented\n 'swap': False,\n 'future': False,\n 'option': False,\n 'cancelOrder': True,\n 'createMarketOrder': None,\n 'createOrder': True,\n 'fetchBalance': True,\n 'fetchClosedOrders': True,\n 'fetchFundingHistory': False,\n 'fetchFundingRate': False,\n 'fetchFundingRateHistory': False,\n 'fetchFundingRates': False,\n 'fetchIndexOHLCV': False,\n 'fetchMarkets': True,\n 'fetchMarkOHLCV': False,\n 'fetchOpenOrders': True,\n 'fetchOrderBook': True,\n 'fetchPremiumIndexOHLCV': False,\n 'fetchTicker': True,\n 'fetchTrades': True,\n 'withdraw': True,\n },\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/27766927-39ca2ada-5eeb-11e7-972f-1b4199518ca6.jpg',\n 'api': 'https://api.zaif.jp',\n 'www': 'https://zaif.jp',\n 'doc': [\n 'https://techbureau-api-document.readthedocs.io/ja/latest/index.html',\n 'https://corp.zaif.jp/api-docs',\n 'https://corp.zaif.jp/api-docs/api_links',\n 'https://www.npmjs.com/package/zaif.jp',\n 'https://github.com/you21979/node-zaif',\n ],\n 'fees': 'https://zaif.jp/fee?lang=en',\n },\n 'fees': {\n 'trading': {\n 'percentage': True,\n 'taker': self.parse_number('0.001'),\n 'maker': self.parse_number('0'),\n },\n },\n 'api': {\n 'public': {\n 'get': [\n 'depth/{pair}',\n 'currencies/{pair}',\n 'currencies/all',\n 'currency_pairs/{pair}',\n 'currency_pairs/all',\n 'last_price/{pair}',\n 'ticker/{pair}',\n 'trades/{pair}',\n ],\n },\n 'private': {\n 'post': [\n 'active_orders',\n 'cancel_order',\n 'deposit_history',\n 'get_id_info',\n 'get_info',\n 'get_info2',\n 'get_personal_info',\n 'trade',\n 'trade_history',\n 'withdraw',\n 'withdraw_history',\n ],\n },\n 'ecapi': {\n 'post': [\n 'createInvoice',\n 'getInvoice',\n 'getInvoiceIdsByOrderNumber',\n 'cancelInvoice',\n ],\n },\n 'tlapi': {\n 'post': [\n 'get_positions',\n 'position_history',\n 'active_positions',\n 'create_position',\n 'change_position',\n 'cancel_position',\n ],\n },\n 'fapi': {\n 'get': [\n 'groups/{group_id}',\n 'last_price/{group_id}/{pair}',\n 'ticker/{group_id}/{pair}',\n 'trades/{group_id}/{pair}',\n 'depth/{group_id}/{pair}',\n ],\n },\n },\n 'options': {\n # zaif schedule defines several market-specific fees\n 'fees': {\n 'BTC/JPY': {'maker': 0, 'taker': 0},\n 'BCH/JPY': {'maker': 0, 'taker': 0.3 / 100},\n 'BCH/BTC': {'maker': 0, 'taker': 0.3 / 100},\n 'PEPECASH/JPY': {'maker':" }, { "id": 131486, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_failure_2.py", "file_name": "test_failure_2.py", "fun_name": "test_raylet_node_manager_server_failure", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_raylet_node_manager_server_failure(ray_start_cluster_head, log_pubsub):\n cluster = ray_start_cluster_head\n redis_port = int(cluster.address.split(\":\")[1])\n # Reuse redis port to make node manager grpc server fail to start.\n with pytest.raises(Exception):\n cluster.add_node(wait=False, node_manager_port=redis_port)\n\n # wait for max 10 seconds.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 53, "n_words": 32, "vocab_size": 29, "complexity": 1, "nloc": 8, "token_counts": 71, "n_ast_nodes": 78, "n_identifiers": 14, "random_cut": "def test_raylet_node_manager_server_failure(ray_start_cluster_head, log_pubsub):\n cluster = ray_start_cluster_head\n redis_port = int(cluster.address.split(\":\")[1])\n # Reuse redis port to make node manager grpc server fail to start.\n with pytest.raises(Exception):\n cluster.add_node(w" }, { "id": 90853, "commit_id": "b9f5a910dc841b85f58d46266ec049ae5a7fd305", "repo": "sentry", "path": "src/sentry/runner/commands/repair.py", "file_name": "repair.py", "fun_name": "fix_group_counters", "commit_message": "ref(models): `ActivityType` (#34978)\n\n## Objective:\r\nWe want to separate enum logic from Model logic. This breaks a lot of circular dependencies.", "code": "def fix_group_counters():\n from django.db import connection\n\n click.echo(\"Correcting Group.num_comments counter\")\n cursor = connection.cursor()\n cursor.execute(\n ,\n [ActivityType.NOTE.value],\n )\n\n\n@click.command()\n@click.option(\n \"--with-docs/--without-docs\",\n default=False,\n help=\"Synchronize and repair embedded documentation. This \" \"is disabled by default.\",\n)\n@configuration", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@click.command()\n@click.option(\n \"--with-docs/--without-docs\",\n default=False,\n help=\"Synchronize and repair embedded documentation. This \" \"is disabled by default.\",\n)\n@configuration", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 66, "n_words": 33, "vocab_size": 32, "complexity": 1, "nloc": 13, "token_counts": 38, "n_ast_nodes": 109, "n_identifiers": 16, "random_cut": "def fix_group_counters():\n from django.db import connection\n\n click.echo(\"Correcting Group.num_comments counter\")\n cursor = connection.cursor()\n cursor.execute(\n ,\n [ActivityType.NOTE.value],\n )\n\n\n@click.command()\n@click.option(\n \"--with-do" }, { "id": 289512, "commit_id": "67d1dde69fbacf33f2c39ea14d89f2afa425ed18", "repo": "core", "path": "tests/test_config.py", "file_name": "test_config.py", "fun_name": "test_igration_and_updating_configuration", "commit_message": "Rename IMPERIAL_SYSTEM to US_CUSTOMARY_SYSTEM (#80253)\n\n* Rename IMPERIAL_SYSTEM\r\n\r\n* Deprecate is_metric property and adjust tests\r\n\r\n* Adjust unit_system config validation\r\n\r\n* Add yaml tests\r\n\r\n* Add tests for private name\r\n\r\n* Fix incorrect rebase\r\n\r\n* Adjust docstring\r\n\r\n* Add store migration\r\n\r\n* Update unit_system.py\r\n\r\n* Minimise test tweaks\r\n\r\n* Fix tests\r\n\r\n* Add conversion to migration\r\n\r\n* Rename new key and adjust tests\r\n\r\n* Adjust websocket_detect_config\r\n\r\n* Move original_unit_system tracking to subclass", "code": "async def test_igration_and_updating_configuration(hass, hass_storage):\n \n core_data = {\n \"data\": {\n \"elevation\": 10,\n \"latitude\": 55,\n \"location_name\": \"Home\",\n \"longitude\": 13,\n \"time_zone\": \"Europe/Copenhagen\",\n \"unit_system\": \"imperial\",\n \"external_url\": \"https://www.example.com\",\n \"internal_url\": \"http://example.local\",\n \"currency\": \"BTC\",\n },\n \"key\": \"core.config\",\n \"version\": 1,\n \"minor_version\": 1,\n }\n hass_storage[\"core.config\"] = dict(core_data)\n await config_util.async_process_ha_core_config(\n hass, {\"allowlist_external_dirs\": \"/etc\"}\n )\n await hass.config.async_update(latitude=50, currency=\"USD\")\n\n expected_new_core_data = copy.deepcopy(core_data)\n # From async_update above\n expected_new_core_data[\"data\"][\"latitude\"] = 50\n expected_new_core_data[\"data\"][\"currency\"] = \"USD\"\n # 1.1 -> 1.2 store migration with migrated unit system\n expected_new_core_data[\"data\"][\"unit_system_v2\"] = \"us_customary\"\n expected_new_core_data[\"minor_version\"] = 2\n assert hass_storage[\"core.config\"] == expected_new_core_data\n assert hass.config.latitude == 50\n assert hass.config.currency == \"USD\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 280, "n_words": 88, "vocab_size": 70, "complexity": 1, "nloc": 30, "token_counts": 166, "n_ast_nodes": 314, "n_identifiers": 14, "random_cut": "async def test_igration_and_updating_configuration(hass, hass_storage):\n \n core_data = {\n \"data\": {\n \"elevation\": 10,\n \"latitude\": 55,\n \"location_name\": \"Home\",\n \"longitude\": 13,\n \"time_zone\": \"Europe/Copenhagen\",\n \"unit_system\": \"imperial\",\n \"external_url\": \"https://www.example.com\",\n \"internal_url\": \"http://example.local\",\n \"currency\": \"BTC\",\n },\n \"key\": \"core.config\",\n \"version\": 1,\n \"minor_version\": 1,\n }\n hass_storage[\"core.config\"] = dict(core_data)\n await config_util.async_process_ha_core_config(\n hass, {\"allowlist_external_dirs\": \"/etc\"}\n )\n await hass.config.async_update(latitude=50, currency=\"USD\")\n\n expected_new_core_data = copy.deepcopy(core_data)\n # From async_update above\n expected_new_core_data[\"data\"][\"latitude\"] = 50\n expected_new_core_data[\"data\"][\"currency\"] = \"USD\"\n # 1.1 -> 1.2 store migration with migrated unit system\n expected_new_core_data[\"data\"][\"unit_system_v2\"] = \"us_customary\"\n expected_new_core_data[\"minor_version\"] = 2\n assert hass_storage[\"core.config\"] == expected_new_core_data\n assert hass.config.latitude == 50\n assert hass.config.currency == \"USD\"\n\n" }, { "id": 244636, "commit_id": "46430db4f965a2d7e2853ce52cad828344e84ec7", "repo": "mmdetection", "path": "mmdet/models/detectors/base.py", "file_name": "base.py", "fun_name": "with_shared_head", "commit_message": "Refactor interface of two-stage detector", "code": "def with_shared_head(self) -> bool:\n \n return hasattr(self, 'roi_head') and self.roi_head.with_shared_head\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 3, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def with_shared_head(self) -> bool:\n \n return hasattr(self, 'roi_head') and self.roi_head.with_shared_head\n" }, { "id": 253443, "commit_id": "8c2428c9d355ca5fbc3dd90e9820ceb1cc795837", "repo": "mitmproxy", "path": "mitmproxy/proxy/layers/quic.py", "file_name": "quic.py", "fun_name": "receive_close", "commit_message": "[autofix.ci] apply automated fixes", "code": "def receive_close(self) -> layer.CommandGenerator[None]:\n assert self.quic\n # if `_close_event` is not set, the underlying connection has been closed\n # we turn this into a QUIC close event as well\n close_event = self.quic._close_event or quic_events.ConnectionTerminated(\n QuicErrorCode.NO_ERROR, None, \"Connection closed.\"\n )\n yield from self.event_to_child(\n QuicConnectionClosed(\n self.conn,\n close_event.error_code,\n close_event.frame_type,\n close_event.reason_phrase,\n )\n )\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 190, "n_words": 49, "vocab_size": 46, "complexity": 2, "nloc": 13, "token_counts": 62, "n_ast_nodes": 95, "n_identifiers": 17, "random_cut": "def receive_close(self) -> layer.CommandGenerator[None]:\n assert self.quic\n # if `_close_event` is not set, the underlying connection has been closed\n # we turn this into a QUIC close event as well\n close_event = self.quic._close_event or quic_events.ConnectionTerminated(\n QuicErrorCod" }, { "id": 282023, "commit_id": "683a8bdd83c1b931df111a5b2b8b19350930b73a", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/economy/test_economy_controller.py", "file_name": "test_economy_controller.py", "fun_name": "test_call_cls", "commit_message": "Tests : Economy + Conftest (#1260)\n\n* Updating tests : economy\r\n\r\n* Updating tests : removing breaklines\r\n\r\n* Updating tests : economy\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : economy", "code": "def test_call_cls(mocker):\n mocker.patch(\"os.system\")\n\n controller = economy_controller.EconomyController(queue=None)\n controller.call_cls([])\n\n assert controller.queue == []\n os.system.assert_called_once_with(\"cls||clear\")\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"func, queue, expected_queue\",\n [\n (\n \"call_exit\",\n [],\n [\"quit\", \"quit\"],\n ),\n (\"call_exit\", [\"help\"], [\"quit\", \"quit\", \"help\"]),\n (\"call_home\", [], [\"quit\"]),\n (\"call_help\", [], []),\n (\"call_quit\", [], [\"quit\"]),\n (\"call_quit\", [\"help\"], [\"quit\", \"help\"]),\n (\n \"call_reset\",\n [],\n [\n \"quit\",\n \"reset\",\n \"economy\",\n ],\n ),\n (\n \"call_reset\",\n [\"help\"],\n [\n \"quit\",\n \"reset\",\n \"economy\",\n \"help\",\n ],\n ),\n ],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")\n@pytest.mark.parametrize(\n \"func, queue, expected_queue\",\n [\n (\n \"call_exit\",\n [],\n [\"quit\", \"quit\"],\n ),\n (\"call_exit\", [\"help\"], [\"quit\", \"quit\", \"help\"]),\n (\"call_home\", [], [\"quit\"]),\n (\"call_help\", [], []),\n (\"call_quit\", [], [\"quit\"]),\n (\"call_quit\", [\"help\"], [\"quit\", \"help\"]),\n (\n \"call_reset\",\n [],\n [\n \"quit\",\n \"reset\",\n \"economy\",\n ],\n ),\n (\n \"call_reset\",\n [\"help\"],\n [\n \"quit\",\n \"reset\",\n \"economy\",\n \"help\",\n ],\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 386, "n_words": 63, "vocab_size": 39, "complexity": 1, "nloc": 6, "token_counts": 43, "n_ast_nodes": 307, "n_identifiers": 16, "random_cut": "def test_call_cls(mocker):\n mocker.patch(\"os.system\")\n\n controller = economy_controller.EconomyController(queue=None)\n controller.call_cls([])\n\n assert controller.queue == []\n os.system.assert_called_once_with(\"cls||clear\")\n\n\n@pytest.mark.vcr(record_mode=\"none\")\n@pyt" }, { "id": 165699, "commit_id": "fa7e31b8b19eb03dceebd09d03798363daae07d9", "repo": "pandas", "path": "pandas/tests/io/xml/test_xml.py", "file_name": "test_xml.py", "fun_name": "test_url_path_error", "commit_message": "ENH: Add large file support for read_xml (#45724)\n\n* ENH: Add large file support for read_xml\r\n\r\n* Combine tests, slightly fix docs\r\n\r\n* Adjust pytest decorator on URL test; fix doc strings\r\n\r\n* Adjust tests for helper function\r\n\r\n* Add iterparse feature to some tests\r\n\r\n* Add IO docs link in docstring", "code": "def test_url_path_error(parser):\n url = \"https://www.w3schools.com/xml/books.xml\"\n with pytest.raises(\n ParserError, match=(\"iterparse is designed for large XML files\")\n ):\n read_xml(\n url,\n parser=parser,\n iterparse={\"row\": [\"shape\", \"degrees\", \"sides\", \"date\"]},\n )\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 87, "n_words": 25, "vocab_size": 25, "complexity": 1, "nloc": 10, "token_counts": 47, "n_ast_nodes": 84, "n_identifiers": 9, "random_cut": "def test_url_path_error(parser):\n url = \"https://www.w3schools.com/xml/books.xml\"\n with pytest.raises(\n ParserError, match=(\"iterparse is designed for large XML fi" }, { "id": 308240, "commit_id": "420285f7ef1e170f599cf22c031987e2ceefa353", "repo": "core", "path": "tests/components/forked_daapd/test_media_player.py", "file_name": "test_media_player.py", "fun_name": "test_master_state", "commit_message": "Support announce and enqueue in forked-daapd (#77744)", "code": "def test_master_state(hass, mock_api_object):\n \n state = hass.states.get(TEST_MASTER_ENTITY_NAME)\n assert state.state == STATE_PAUSED\n assert state.attributes[ATTR_FRIENDLY_NAME] == \"forked-daapd server\"\n assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORTED_FEATURES\n assert not state.attributes[ATTR_MEDIA_VOLUME_MUTED]\n assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.2\n assert state.attributes[ATTR_MEDIA_CONTENT_ID] == 12322\n assert state.attributes[ATTR_MEDIA_CONTENT_TYPE] == MediaType.MUSIC\n assert state.attributes[ATTR_MEDIA_DURATION] == 0.05\n assert state.attributes[ATTR_MEDIA_POSITION] == 0.005\n assert state.attributes[ATTR_MEDIA_TITLE] == \"No album\" # reversed for url\n assert state.attributes[ATTR_MEDIA_ARTIST] == \"Some artist\"\n assert state.attributes[ATTR_MEDIA_ALBUM_NAME] == \"Some song\" # reversed\n assert state.attributes[ATTR_MEDIA_ALBUM_ARTIST] == \"The xx\"\n assert state.attributes[ATTR_MEDIA_TRACK] == 1\n assert not state.attributes[ATTR_MEDIA_SHUFFLE]\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 128, "n_words": 75, "vocab_size": 45, "complexity": 1, "nloc": 17, "token_counts": 156, "n_ast_nodes": 235, "n_identifiers": 26, "random_cut": "def test_master_state(hass, mock_api_object):\n \n state = hass.states.get(TEST_MASTER_ENTITY_NAME)\n assert state.state == STATE_PAUSED\n assert state.attributes[ATTR_FRIENDLY_NAME]" }, { "id": 49756, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/resize_right/interp_methods.py", "file_name": "interp_methods.py", "fun_name": "lanczos2", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def lanczos2(x):\n fw, to_dtype, eps = set_framework_dependencies(x)\n return (((fw.sin(pi * x) * fw.sin(pi * x / 2) + eps) / ((pi**2 * x**2 / 2) + eps)) * to_dtype(abs(x) < 2))\n\n\n@support_sz(6)", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "@support_sz(6)", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 36, "n_words": 32, "vocab_size": 24, "complexity": 1, "nloc": 3, "token_counts": 69, "n_ast_nodes": 116, "n_identifiers": 10, "random_cut": "def lanczos2(x):\n fw, to_dtype, eps = set_framework_dependencies(x)\n return (((fw.sin(pi * x) * fw.sin(pi * x / 2) + eps) / ((pi**2 * x**2 / 2" }, { "id": 74321, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_page_model.py", "file_name": "test_page_model.py", "fun_name": "test_copy_page_copies_recursively_to_the_same_tree", "commit_message": "Reformat with black", "code": "def test_copy_page_copies_recursively_to_the_same_tree(self):\n events_index = EventIndex.objects.get(url_path=\"/home/events/\")\n old_christmas_event = (\n events_index.get_children().filter(slug=\"christmas\").first().specific\n )\n old_christmas_event.save_revision()\n\n with self.assertRaises(Exception) as exception:\n events_index.copy(\n recursive=True,\n update_attrs={\"title\": \"New events index\", \"slug\": \"new-events-index\"},\n to=events_index,\n )\n self.assertEqual(\n str(exception.exception),\n \"You cannot copy a tree branch recursively into itself\",\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 185, "n_words": 37, "vocab_size": 34, "complexity": 1, "nloc": 16, "token_counts": 93, "n_ast_nodes": 162, "n_identifiers": 23, "random_cut": "def test_copy_page_copies_recursively_to_the_same_tree(self):\n events_index = EventIndex.objects.get(url_path=\"/home/events/\")\n old_christmas_event = (\n events_index.get_children().filter(slug=\"christmas\").first().specific\n )\n old_christmas_event.save_revision()\n\n with self.assertRaises(Exception) as exception:\n events_index.copy(\n recursive=True,\n update_attrs={\"title\": \"New events index\", \"slug\": \"new-events-index\"},\n to=events_index,\n )\n self.assertEqual(\n str(exception.exception),\n \"You cannot copy a tree branch recursively into itself\",\n )\n" }, { "id": 189211, "commit_id": "3c7e82860c6bd219f67d4373c715efb805500074", "repo": "aws-cli", "path": "awscli/utils.py", "file_name": "utils.py", "fun_name": "_find_quote_char_in_part", "commit_message": "Improved readablity and simplified logic to find first quote character.\n\n* Updated \"_find_quote_char_in_part\" function, previously it was scanning input\nstring multiple times however proposed logic does the same thing in single\niteration.\n* Code branching is also reduced, only one if block is required.", "code": "def _find_quote_char_in_part(part):\n \n quote_char = None\n for ch in part:\n if ch in ('\"', \"'\"):\n quote_char = ch\n break\n return quote_char\n\n", "url": "https://github.com/aws/aws-cli.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 20, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 29, "n_ast_nodes": 52, "n_identifiers": 4, "random_cut": "def _find_quote_char_in_part(part):\n \n quote_char = None\n for ch in part:\n if ch in ('\"', \"'\"):\n quote_char" }, { "id": 163800, "commit_id": "57d7768c205d30cc50ba9b42d60d24d1e32eb249", "repo": "pandas", "path": "pandas/core/arrays/numeric.py", "file_name": "numeric.py", "fun_name": "_arith_method", "commit_message": "BUG: NumericArray * td64_array (#45622)", "code": "def _arith_method(self, other, op):\n op_name = op.__name__\n omask = None\n\n if isinstance(other, BaseMaskedArray):\n other, omask = other._data, other._mask\n\n elif is_list_like(other):\n if not isinstance(other, ExtensionArray):\n other = np.asarray(other)\n if other.ndim > 1:\n raise NotImplementedError(\"can only perform ops with 1-d structures\")\n\n # We wrap the non-masked arithmetic logic used for numpy dtypes\n # in Series/Index arithmetic ops.\n other = ops.maybe_prepare_scalar_for_op(other, (len(self),))\n pd_op = ops.get_array_op(op)\n other = ensure_wrapped_if_datetimelike(other)\n\n mask = self._propagate_mask(omask, other)\n\n if other is libmissing.NA:\n result = np.ones_like(self._data)\n if \"truediv\" in op_name and self.dtype.kind != \"f\":\n # The actual data here doesn't matter since the mask\n # will be all-True, but since this is division, we want\n # to end up with floating dtype.\n result = result.astype(np.float64)\n else:\n # Make sure we do this before the \"pow\" mask checks\n # to get an expected exception message on shape mismatch.\n if self.dtype.kind in [\"i\", \"u\"] and op_name in [\"floordiv\", \"mod\"]:\n # ATM we don't match the behavior of non-masked types with\n # respect to floordiv-by-zero\n pd_op = op\n\n with np.errstate(all=\"ignore\"):\n result = pd_op(self._data, other)\n\n if op_name == \"pow\":\n # 1 ** x is 1.\n mask = np.where((self._data == 1) & ~self._mask, False, mask)\n # x ** 0 is 1.\n if omask is not None:\n mask = np.where((other == 0) & ~omask, False, mask)\n elif other is not libmissing.NA:\n mask = np.where(other == 0, False, mask)\n\n elif op_name == \"rpow\":\n # 1 ** x is 1.\n if omask is not None:\n mask = np.where((other == 1) & ~omask, False, mask)\n elif other is not libmissing.NA:\n mask = np.where(other == 1, False, mask)\n # x ** 0 is 1.\n mask = np.where((self._data == 0) & ~self._mask, False, mask)\n\n return self._maybe_mask_result(result, mask, other, op_name)\n\n _HANDLED_TYPES = (np.ndarray, numbers.Number)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 813, "n_words": 282, "vocab_size": 141, "complexity": 16, "nloc": 36, "token_counts": 360, "n_ast_nodes": 596, "n_identifiers": 41, "random_cut": "def _arith_method(self, other, op):\n op_name = op.__name__\n omask = None\n\n if isinstance(other, BaseMaskedArray):\n other, omask = other._data, other._mask\n\n elif is_list_like(other):\n if not isinstance(other, ExtensionArray):\n other = np.asarray(other)\n if other.ndim > 1:\n raise NotImplementedError(\"can only perform ops with 1-d structures\")\n\n # We wrap the non-masked arithmetic logic used for numpy dtypes\n # in Series/Index arithmetic ops.\n other = ops.maybe_prepare_scalar_for_op(other, (len(self),))\n pd_op = ops.get_array_op(op)\n other " }, { "id": 110632, "commit_id": "b4e9e3131cdd7f1ad33ea06e21e7d3e51762af91", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_gtk3.py", "file_name": "backend_gtk3.py", "fun_name": "button_press_event", "commit_message": "Separately track modifier keys for mouse events.\n\nWhether the event modifiers are directly available on enter/leave events\ndepends on the backend, but all are handled here (except possibly for\nmacos, which I haven't checked).", "code": "def button_press_event(self, widget, event):\n MouseEvent(\"button_press_event\", self,\n *self._mpl_coords(event), event.button,\n modifiers=self._mpl_modifiers(event.state),\n guiEvent=event)._process()\n return False # finish event propagation?\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 84, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 73, "n_identifiers": 12, "random_cut": "def button_press_event(self, widget, event):\n MouseEvent(\"button_press_event\", self,\n " }, { "id": 133157, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/inspect.py", "file_name": "inspect.py", "fun_name": "is_cython", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def is_cython(obj):\n \n\n # TODO(suo): We could split these into two functions, one for Cython\n # functions and another for Cython methods.\n # TODO(suo): There doesn't appear to be a Cython function 'type' we can\n # check against via isinstance. Please correct me if I'm wrong.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 60, "n_words": 45, "vocab_size": 38, "complexity": 3, "nloc": 5, "token_counts": 29, "n_ast_nodes": 17, "n_identifiers": 2, "random_cut": "def is_cython(obj):\n \n\n # TODO(suo): We could split these into two functions, one for Cython\n # functions and another for Cyth" }, { "id": 47547, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/models/test_cleartasks.py", "file_name": "test_cleartasks.py", "fun_name": "test_clear_task_instances_without_task", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_clear_task_instances_without_task(self, dag_maker):\n with dag_maker(\n 'test_clear_task_instances_without_task',\n start_date=DEFAULT_DATE,\n end_date=DEFAULT_DATE + datetime.timedelta(days=10),\n ) as dag:\n task0 = EmptyOperator(task_id='task0')\n task1 = EmptyOperator(task_id='task1', retries=2)\n\n dr = dag_maker.create_dagrun(\n state=State.RUNNING,\n run_type=DagRunType.SCHEDULED,\n )\n\n ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)\n ti0.refresh_from_task(task0)\n ti1.refresh_from_task(task1)\n\n ti0.run()\n ti1.run()\n\n # Remove the task from dag.\n dag.task_dict = {}\n assert not dag.has_task(task0.task_id)\n assert not dag.has_task(task1.task_id)\n\n with create_session() as session:\n # we use order_by(task_id) here because for the test DAG structure of ours\n # this is equivalent to topological sort. It would not work in general case\n # but it works for our case because we specifically constructed test DAGS\n # in the way that those two sort methods are equivalent\n qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()\n clear_task_instances(qry, session)\n\n # When dag is None, max_tries will be maximum of original max_tries or try_number.\n ti0.refresh_from_db()\n ti1.refresh_from_db()\n # Next try to run will be try 2\n assert ti0.try_number == 2\n assert ti0.max_tries == 1\n assert ti1.try_number == 2\n assert ti1.max_tries == 2\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 451, "n_words": 155, "vocab_size": 111, "complexity": 1, "nloc": 29, "token_counts": 216, "n_ast_nodes": 359, "n_identifiers": 46, "random_cut": "def test_clear_task_instances_without_task(self, dag_maker):\n with dag_maker(\n 'test_clear_task_instances_without_task',\n start_date=DEFAULT_DATE,\n end_date=DEFAULT_DATE + datetime.timedelta(days=10),\n ) as dag:\n task0 = EmptyOperator(task_id='task0')\n task1 = EmptyOperator(task_id='task1', retries=2)\n\n dr = dag_maker.create_dagrun(\n state=State.RUNNING,\n run_type=DagRunType.SCHEDULED,\n )\n\n ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)\n ti0.refresh_from_task(task0)\n ti1.refresh_from_task(task1)\n\n ti0.run()\n ti1.run()\n\n # Remove the task from dag.\n dag.task_dict = {}\n assert not dag.has_task(task0.task_id)\n assert not dag.has_task(task1.task_id)\n\n with create_session() as session:\n # we use order_by(task_id) here because for the test DAG structure of ours\n # this is equivalent to topological sort. It would not work in general case\n # but it works for our case because we specifically constructed test DAGS\n # in the way that those two sort methods are equivalent\n qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()\n clear_task_instances(qry, session)\n\n # When dag is None" }, { "id": 242742, "commit_id": "ee85e387bab535e2339b9d3cd1ab87c61d23af15", "repo": "Pillow", "path": "src/PIL/ImageFile.py", "file_name": "ImageFile.py", "fun_name": "set_as_raw", "commit_message": "Remove redundant parentheses", "code": "def set_as_raw(self, data, rawmode=None):\n \n\n if not rawmode:\n rawmode = self.mode\n d = Image._getdecoder(self.mode, \"raw\", rawmode)\n d.setimage(self.im, self.state.extents())\n s = d.decode(data)\n\n if s[0] >= 0:\n raise ValueError(\"not enough image data\")\n if s[1] != 0:\n raise ValueError(\"cannot decode image data\")\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 120, "n_words": 38, "vocab_size": 30, "complexity": 4, "nloc": 10, "token_counts": 85, "n_ast_nodes": 139, "n_identifiers": 15, "random_cut": "def set_as_raw(self, data, rawmode=None):\n " }, { "id": 11965, "commit_id": "2ce767517532ebbf85ade4b84cfba0f7bb69c4f9", "repo": "jina", "path": "setup.py", "file_name": "setup.py", "fun_name": "register_ac", "commit_message": "refactor: remove jinad (#4550)", "code": "def register_ac():\n import os\n import re\n from pathlib import Path\n\n home = str(Path.home())\n resource_path = 'jina/resources/completions/jina.%s'\n regex = r'#\\sJINA_CLI_BEGIN(.*)#\\sJINA_CLI_END'\n _check = {'zsh': '.zshrc', 'bash': '.bashrc', 'fish': '.fish'}\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 27, "vocab_size": 22, "complexity": 3, "nloc": 14, "token_counts": 69, "n_ast_nodes": 87, "n_identifiers": 10, "random_cut": "def register_ac():\n import os\n import re\n from pathlib import Path\n\n home = str(Path.home())\n resource_path = 'jina/resources/completions/jina.%s'\n regex" }, { "id": 16851, "commit_id": "1cfeeec8240c4c7e93e632cd9e42be2a1be33b16", "repo": "ccxt", "path": "python/ccxt/async_support/ndax.py", "file_name": "ndax.py", "fun_name": "fetch_markets", "commit_message": "1.70.82\n\n[ci skip]", "code": "async def fetch_markets(self, params={}):\n omsId = self.safe_integer(self.options, 'omsId', 1)\n request = {\n 'omsId': omsId,\n }\n response = await self.publicGetGetInstruments(self.extend(request, params))\n #\n # [\n # {\n # \"OMSId\":1,\n # \"InstrumentId\":3,\n # \"Symbol\":\"LTCBTC\",\n # \"Product1\":3,\n # \"Product1Symbol\":\"LTC\",\n # \"Product2\":1,\n # \"Product2Symbol\":\"BTC\",\n # \"InstrumentType\":\"Standard\",\n # \"VenueInstrumentId\":3,\n # \"VenueId\":1,\n # \"SortIndex\":0,\n # \"SessionStatus\":\"Running\",\n # \"PreviousSessionStatus\":\"Stopped\",\n # \"SessionStatusDateTime\":\"2020-11-25T19:42:15.245Z\",\n # \"SelfTradePrevention\":true,\n # \"QuantityIncrement\":0.0000000100000000000000000000,\n # \"PriceIncrement\":0.0000000100000000000000000000,\n # \"MinimumQuantity\":0.0100000000000000000000000000,\n # \"MinimumPrice\":0.0000010000000000000000000000,\n # \"VenueSymbol\":\"LTCBTC\",\n # \"IsDisable\":false,\n # \"MasterDataId\":0,\n # \"PriceCollarThreshold\":0.0000000000000000000000000000,\n # \"PriceCollarPercent\":0.0000000000000000000000000000,\n # \"PriceCollarEnabled\":false,\n # \"PriceFloorLimit\":0.0000000000000000000000000000,\n # \"PriceFloorLimitEnabled\":false,\n # \"PriceCeilingLimit\":0.0000000000000000000000000000,\n # \"PriceCeilingLimitEnabled\":false,\n # \"CreateWithMarketRunning\":true,\n # \"AllowOnlyMarketMakerCounterParty\":false,\n # \"PriceCollarIndexDifference\":0.0000000000000000000000000000,\n # \"PriceCollarConvertToOtcEnabled\":false,\n # \"PriceCollarConvertToOtcClientUserId\":0,\n # \"PriceCollarConvertToOtcAccountId\":0,\n # \"PriceCollarConvertToOtcThreshold\":0.0000000000000000000000000000,\n # \"OtcConvertSizeThreshold\":0.0000000000000000000000000000,\n # \"OtcConvertSizeEnabled\":false,\n # \"OtcTradesPublic\":true,\n # \"PriceTier\":0\n # },\n # ]\n #\n result = []\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'InstrumentId')\n # lowercaseId = self.safe_string_lower(market, 'symbol')\n baseId = self.safe_string(market, 'Product1')\n quoteId = self.safe_string(market, 'Product2')\n base = self.safe_currency_code(self.safe_string(market, 'Product1Symbol'))\n quote = self.safe_currency_code(self.safe_string(market, 'Product2Symbol'))\n sessionStatus = self.safe_string(market, 'SessionStatus')\n isDisable = self.safe_value(market, 'IsDisable')\n sessionRunning = (sessionStatus == 'Running')\n result.append({\n 'id': id,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': (sessionRunning and not isDisable),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': self.safe_number(market, 'QuantityIncrement'),\n 'price': self.safe_number(market, 'PriceIncrement'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.safe_number(market, 'MinimumQuantity'),\n 'max': None,\n },\n 'price': {\n 'min': self.safe_number(market, 'MinimumPrice'),\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 2071, "n_words": 260, "vocab_size": 151, "complexity": 3, "nloc": 66, "token_counts": 370, "n_ast_nodes": 681, "n_identifiers": 28, "random_cut": "async def fetch_markets(self, params={}):\n omsId = self.safe_integer(self.options, 'omsId', 1)\n request = {\n 'omsId': omsId,\n }\n response = await self.publicGetGetInstruments(self.extend(request, params))\n #\n # [\n # {\n # \"OMSId\":1,\n # \"InstrumentId\":3,\n # \"Symbol\":\"LTCBTC\",\n # \"Product1\":3,\n # \"Product1Symbol\":\"LTC\",\n # \"Product2\":1,\n # \"Product2Symbol\":\"BTC\",\n # \"InstrumentType\":\"Standard\",\n # \"VenueInstrumentId\":3,\n # \"VenueId\":1,\n # \"SortIndex\":0,\n # \"SessionStatus\":\"Running\",\n # \"PreviousSessionStatus\":\"Stopped\",\n # \"SessionStatusDateTime\":\"2020-11-25T19:42:15.245Z\",\n # \"SelfTradePrevention\":true,\n # \"QuantityIncrement\":0.0000000100000000000000000000,\n # \"PriceIncrement\":0.0000000100000000000000000000,\n # \"MinimumQuantity\":0.0100000000000000000000000000,\n # \"MinimumPrice\":0.0000010000000000000000000000,\n # \"VenueSymbol\":\"LTCBTC\",\n # \"IsDisable\":false,\n # \"MasterDataId\":0,\n # \"PriceCollarThreshold\":0.0000000000000000000000000000,\n # \"PriceCollarPercent\":0.0000000000000000000000000000,\n # \"PriceCollarEnabled\":false,\n # \"PriceFloorLimit\":0.0000000000000000000000000000,\n # \"PriceFloorLimitEnabled\":false,\n # \"PriceCeilingLimit\":0.0000000000000000000000000000,\n # \"PriceCeilingLimitEnabled\":false,\n # \"CreateWithMarketRunning\":true,\n # \"AllowOnlyMarketMakerCounterParty\":false,\n # \"PriceCollarIndexDifference\":0.0000000000000000000000000000,\n # \"PriceCollarConvertToOtcEnabled\":false,\n # \"PriceCollarCon" }, { "id": 218385, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getinnerframes", "commit_message": "add python 3.10.4 for windows", "code": "def getinnerframes(tb, context=1):\n \n framelist = []\n while tb:\n frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)\n framelist.append(FrameInfo(*frameinfo))\n tb = tb.tb_next\n return framelist\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 53, "n_words": 20, "vocab_size": 17, "complexity": 2, "nloc": 7, "token_counts": 49, "n_ast_nodes": 80, "n_identifiers": 10, "random_cut": "def getinnerframes(tb, context=1):\n \n framelist = []\n while tb:\n frameinfo = (tb.tb_frame,) + g" }, { "id": 67091, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/india/e_invoice/utils.py", "file_name": "utils.py", "fun_name": "show_link_to_error_log", "commit_message": "style: format code with black", "code": "def show_link_to_error_log(invoice, einvoice):\n\terr_log = log_error(einvoice)\n\tlink_to_error_log = get_link_to_form(\"Error Log\", err_log.name, \"Error Log\")\n\tfrappe.throw(\n\t\t_(\n\t\t\t\"An error occurred while creating e-invoice for {}. Please check {} for more information.\"\n\t\t).format(invoice.name, link_to_error_log),\n\t\ttitle=_(\"E Invoice Creation Failed\"),\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 27, "n_words": 36, "vocab_size": 34, "complexity": 1, "nloc": 9, "token_counts": 51, "n_ast_nodes": 85, "n_identifiers": 13, "random_cut": "def show_link_to_error_log(invoice, einvoice):\n\terr_log = log_error(einvoice)\n\tlink_to_error_log = get_link_to_form(\"Error Log\", err_" }, { "id": 100128, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_user_notification_details.py", "file_name": "test_user_notification_details.py", "fun_name": "test_lookup_other_user", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_lookup_other_user(self):\n user_b = self.create_user(email=\"b@example.com\")\n self.get_error_response(user_b.id, status_code=403)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 20, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 8, "random_cut": "def test_lookup_other_user(self):\n user_b = s" }, { "id": 27171, "commit_id": "107cfb229e75f14efc75a20c3d1f421ccb50f244", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py", "file_name": "fixtures.py", "fun_name": "subscription_page_created_webhook", "commit_message": "Improve subscription payload tests and structure (#9719)\n\n* Refactor and update tests for subscriptions\r\n\r\n* Refactor and update tests for subscriptions\r\n\r\n* fixes after rebasing\r\n\r\n* fix category payload genertor\r\n\r\n* small fixes\r\n\r\n* fixes after rebase\r\n\r\n* fix linters errors\r\n\r\n* add sorting to payload generators, fixes after review\r\n\r\n* fix linters\r\n\r\n* remove commented", "code": "def subscription_page_created_webhook(subscription_webhook):\n return subscription_webhook(\n subscription_queries.PAGE_CREATED, WebhookEventAsyncType.PAGE_CREATED\n )\n\n\n@pytest.fixture", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 19, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 16, "n_ast_nodes": 32, "n_identifiers": 7, "random_cut": "def subscription_page_created_webhook(subscription_webhook):\n return subscription_webhook(\n subscription_queries.PAGE_CREATED, WebhookEventAsyncType.PAGE_CREATED\n )\n\n\n@pytest.fi" }, { "id": 254738, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/dropout.py", "file_name": "dropout.py", "fun_name": "export_training_default_ratio_mask", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export_training_default_ratio_mask() -> None:\n seed = np.int64(0)\n node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n )\n\n x = np.random.randn(3, 4, 5).astype(np.float32)\n r = np.float32(0.5)\n t = np.bool_(True)\n y, z = dropout(x, r, training_mode=t, return_mask=True)\n expect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_default_mask')\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 142, "n_words": 43, "vocab_size": 37, "complexity": 1, "nloc": 13, "token_counts": 129, "n_ast_nodes": 197, "n_identifiers": 25, "random_cut": "def export_training_default_ratio_mask() -> None:\n seed = np.int64(0)\n node = onnx.helper.make_node(\n 'Dropout',\n inputs=['x', 'r', 't'],\n outputs=['y', 'z'],\n seed=seed\n )\n\n x = np.random.randn(3, 4, 5).astype(np.float32)\n r = np.float32(0.5)\n t = np.bool_(True)\n y, z = dropout(x, r, training_mode=t, return_mask=True)\n expect(node, inputs=[x, r, t]," }, { "id": 165707, "commit_id": "fa7e31b8b19eb03dceebd09d03798363daae07d9", "repo": "pandas", "path": "pandas/tests/io/xml/test_xml_dtypes.py", "file_name": "test_xml_dtypes.py", "fun_name": "test_dtype_nullable_int", "commit_message": "ENH: Add large file support for read_xml (#45724)\n\n* ENH: Add large file support for read_xml\r\n\r\n* Combine tests, slightly fix docs\r\n\r\n* Adjust pytest decorator on URL test; fix doc strings\r\n\r\n* Adjust tests for helper function\r\n\r\n* Add iterparse feature to some tests\r\n\r\n* Add IO docs link in docstring", "code": "def test_dtype_nullable_int(parser):\n df_result = read_xml(xml_types, dtype={\"sides\": \"Int64\"}, parser=parser)\n df_iter = read_xml_iterparse(\n xml_types,\n parser=parser,\n dtype={\"sides\": \"Int64\"},\n iterparse={\"row\": [\"shape\", \"degrees\", \"sides\"]},\n )\n\n df_expected = DataFrame(\n {\n \"shape\": [\"square\", \"circle\", \"triangle\"],\n \"degrees\": [360, 360, 180],\n \"sides\": Series([4.0, float(\"nan\"), 3.0]).astype(\"Int64\"),\n }\n )\n\n tm.assert_frame_equal(df_result, df_expected)\n tm.assert_frame_equal(df_iter, df_expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 137, "n_words": 42, "vocab_size": 36, "complexity": 1, "nloc": 17, "token_counts": 124, "n_ast_nodes": 202, "n_identifiers": 16, "random_cut": "def test_dtype_nullable_int(parser):\n df_result = read_xml(xml_types, dtype={\"sides\": \"Int64\"}, parser=parser)\n df_iter = read_xml_iterparse(\n xml_types,\n parser=parser,\n dtype={\"sides\": \"Int64\"},\n iterparse={\"row\": [\"shape\", \"degrees\", \"sides\"]},\n )\n\n df_expected = DataFrame(\n {\n \"shape\": [\"square\", \"circle\", \"triangle\"],\n \"degrees\": [360, 360, 180],\n \"sides\": Series([4.0, float(\"nan\"), 3.0]).astype(\"Int64\"),\n " }, { "id": 60294, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py", "file_name": "test_coord_map.py", "fun_name": "test_nd_conv", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def test_nd_conv(self):\n \n n = caffe.NetSpec()\n # define data with 3 spatial dimensions, otherwise the same net\n n.data = L.Input(shape=dict(dim=[2, 3, 100, 100, 100]))\n n.conv = L.Convolution(\n n.data, num_output=10, kernel_size=[3, 3, 3], stride=[1, 1, 1],\n pad=[0, 1, 2])\n n.pool = L.Pooling(\n n.conv, pool=P.Pooling.MAX, kernel_size=2, stride=2, pad=0)\n n.deconv = L.Deconvolution(\n n.pool, num_output=10, kernel_size=4, stride=2, pad=0)\n ax, a, b = coord_map_from_to(n.deconv, n.data)\n self.assertEquals(ax, 1)\n self.assertTrue(len(a) == len(b))\n self.assertTrue(np.all(a == 1))\n self.assertEquals(b[0] - 1, b[1])\n self.assertEquals(b[1] - 1, b[2])\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 210, "n_words": 75, "vocab_size": 60, "complexity": 1, "nloc": 16, "token_counts": 229, "n_ast_nodes": 333, "n_identifiers": 32, "random_cut": "def test_nd_conv(self):\n \n n = caffe.NetSpec()\n # define data with 3 spatial dimensions, otherwise the same net\n n.data = L.Input(shape=dict(dim=[2, 3, 100, 100, 100]))\n n.conv = L.Convolution(\n n.data, " }, { "id": 299623, "commit_id": "b8442d9340b569d10e0593bb0576bdcdb9ea55e3", "repo": "core", "path": "tests/components/logbook/test_init.py", "file_name": "test_init.py", "fun_name": "test_unsupported_attributes_in_cache_throws", "commit_message": "Add json decode caching to logbook (#71080)", "code": "def test_unsupported_attributes_in_cache_throws(hass):\n \n entity_attr_cache = logbook.EntityAttributeCache(hass)\n event = MockLazyEventPartialState(EVENT_STATE_CHANGED)\n with pytest.raises(ValueError):\n entity_attr_cache.get(\"sensor.xyz\", \"not_supported\", event)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 38, "n_ast_nodes": 69, "n_identifiers": 12, "random_cut": "def test_unsupported_attributes_in_cache_throws(hass):\n \n entity_attr_cache = logbook.EntityAttributeCache(hass)\n event = MockLazyEventPartialState(EVENT_STATE_CHANGED)\n with pytest.raises(ValueError):\n ent" }, { "id": 209511, "commit_id": "08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf", "repo": "scapy", "path": "scapy/contrib/http2.py", "file_name": "http2.py", "fun_name": "_parse_multi_byte", "commit_message": "E275 - Missing whitespace after keyword (#3711)\n\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: plorinquer \r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: speakinghedge \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>\r\n\r\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>", "code": "def _parse_multi_byte(self, s):\n # type: (str) -> int\n \n\n assert len(s) >= 2\n\n tmp_len = len(s)\n\n value = 0\n i = 1\n byte = orb(s[i])\n # For CPU sake, stops at an arbitrary large number!\n max_value = 1 << 64\n # As long as the MSG is set, an another byte must be read\n while byte & 0x80:\n value += (byte ^ 0x80) << (7 * (i - 1))\n if value > max_value:\n raise error.Scapy_Exception(\n 'out-of-bound value: the string encodes a value that is too large (>2^{{64}}): {}'.format(value) # noqa: E501\n )\n i += 1\n assert i < tmp_len, 'EINVAL: x: out-of-bound read: the string ends before the AbstractUVarIntField!' # noqa: E501\n byte = orb(s[i])\n value += byte << (7 * (i - 1))\n value += self._max_value\n\n assert value >= 0\n return value\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 343, "n_words": 132, "vocab_size": 83, "complexity": 3, "nloc": 20, "token_counts": 125, "n_ast_nodes": 202, "n_identifiers": 14, "random_cut": "def _parse_multi_byte(self, s):\n # type: (str) -> int\n \n\n assert len(s) >= 2\n\n tmp_len = len(s)\n\n value = 0\n i = 1\n byte = orb(s[i])\n # For CPU sake, stops at an arbitrary large numb" }, { "id": 223770, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/iterators.py", "file_name": "iterators.py", "fun_name": "walk", "commit_message": "add python 3.10.4 for windows", "code": "def walk(self):\n \n yield self\n if self.is_multipart():\n for subpart in self.get_payload():\n yield from subpart.walk()\n\n\n\f\n# These two functions are imported into the Iterators.py interface module.", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 50, "n_words": 24, "vocab_size": 23, "complexity": 3, "nloc": 5, "token_counts": 31, "n_ast_nodes": 56, "n_identifiers": 5, "random_cut": "def walk(self):\n \n yield self\n if self.is_multipart():\n for subpart in self.get_payload():\n yield from subpart.walk()\n\n\n\f\n# These two functions are " }, { "id": 254930, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/reduce_log_sum.py", "file_name": "reduce_log_sum.py", "fun_name": "export_keepdims", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export_keepdims() -> None:\n node = onnx.helper.make_node(\n 'ReduceLogSum',\n inputs=['data'],\n outputs=[\"reduced\"]\n )\n data = np.random.ranf([3, 4, 5]).astype(np.float32)\n reduced = np.log(np.sum(data, keepdims=True))\n expect(node, inputs=[data], outputs=[reduced],\n name='test_reduce_log_sum_default')\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 105, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 10, "token_counts": 88, "n_ast_nodes": 139, "n_identifiers": 19, "random_cut": "def export_keepdims() -> None:\n node = onnx.helper.make_node(\n 'ReduceLogSum',\n " }, { "id": 289917, "commit_id": "d50795af2b861e28e717f0479ad6e800b7030620", "repo": "core", "path": "homeassistant/components/upnp/coordinator.py", "file_name": "coordinator.py", "fun_name": "_async_update_data", "commit_message": "Move upnp derived sensors to library, be more robust about failing getting some data (#79955)", "code": "async def _async_update_data(self) -> Mapping[str, Any]:\n \n try:\n return await self.device.async_get_data()\n except UpnpCommunicationError as exception:\n LOGGER.debug(\n \"Caught exception when updating device: %s, exception: %s\",\n self.device,\n exception,\n )\n raise UpdateFailed(\n f\"Unable to communicate with IGD at: {self.device.device_url}\"\n ) from exception\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 174, "n_words": 38, "vocab_size": 35, "complexity": 2, "nloc": 13, "token_counts": 50, "n_ast_nodes": 93, "n_identifiers": 13, "random_cut": "async def _async_update_data(self) -> Mapping[str, Any]:\n \n try:\n return await self.device.async_get_data()\n except UpnpCommunicationError as exception:\n LOGGER.debug(\n \"Caught exception when updating device: %s, exception: %s\",\n self.device,\n exception,\n )\n raise UpdateFailed(\n f\"Unable to communicate with IGD at: {self.de" }, { "id": 320217, "commit_id": "057f6016cc92f6d21b04b9a16dc6f0b255c8b401", "repo": "paperless-ngx", "path": "src/documents/consumer.py", "file_name": "consumer.py", "fun_name": "run_pre_consume_script", "commit_message": "Adds further testing to cover scripts with non-zero exit codes", "code": "def run_pre_consume_script(self):\n if not settings.PRE_CONSUME_SCRIPT:\n return\n\n if not os.path.isfile(settings.PRE_CONSUME_SCRIPT):\n self._fail(\n MESSAGE_PRE_CONSUME_SCRIPT_NOT_FOUND,\n f\"Configured pre-consume script \"\n f\"{settings.PRE_CONSUME_SCRIPT} does not exist.\",\n )\n\n self.log(\"info\", f\"Executing pre-consume script {settings.PRE_CONSUME_SCRIPT}\")\n\n filepath_arg = os.path.normpath(self.path)\n\n script_env = os.environ.copy()\n script_env[\"DOCUMENT_SOURCE_PATH\"] = filepath_arg\n\n try:\n completed_proc = run(\n args=[\n settings.PRE_CONSUME_SCRIPT,\n filepath_arg,\n ],\n env=script_env,\n capture_output=True,\n )\n\n self._log_script_outputs(completed_proc)\n\n # Raises exception on non-zero output\n completed_proc.check_returncode()\n\n except Exception as e:\n self._fail(\n MESSAGE_PRE_CONSUME_SCRIPT_ERROR,\n f\"Error while executing pre-consume script: {e}\",\n exc_info=True,\n exception=e,\n )\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 436, "n_words": 68, "vocab_size": 55, "complexity": 4, "nloc": 31, "token_counts": 133, "n_ast_nodes": 231, "n_identifiers": 27, "random_cut": "def run_pre_consume_script(self):\n if not settings.PRE_CONSUME_SCRIPT:\n return\n\n if not os.path.isfile(settings.PRE_CONSUME_SCRIPT):\n self._fail(\n MESSAGE_PRE_CONSUME_SCRIPT_NOT_FOUND,\n f\"Configured pre-consume script \"\n f\"{settings.PRE_CONSUME_SCRIPT} does not exist.\",\n )\n\n self.log(\"info\", f\"Executing pre-consume script {settings.PRE_CONSUME_SCRIPT}\")\n\n filepath_arg = os.path.normpath(self.path)\n\n script_env = os.environ.copy()\n script_env[\"DOCUMENT_SOURCE_PATH\"] = filepath_arg\n\n try:\n completed_proc = run(\n args=[\n settings.PRE_CONSUME_SCRIPT,\n filepath_arg,\n ],\n env=script_env,\n capture_output=True,\n )\n\n self._log_script_outputs(completed_proc)\n\n # Raises exception on non-zero output\n completed_proc.check_returncode()\n\n except Exception as e:\n self._fail(\n MESSAGE_PRE_CONSUME_SCRIPT_ERROR,\n f\"Error while executing pre-consume script: {e}\",\n exc_info=True,\n exception=e,\n " }, { "id": 270468, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/mirrored_strategy_test.py", "file_name": "mirrored_strategy_test.py", "fun_name": "testTrainAndServeWithKPL", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def testTrainAndServeWithKPL(self, distribution):\n use_adapt = False\n test_utils_obj = kpl_test_utils.DistributeKplTestUtils()\n with distribution.scope():\n (\n feature_mapper,\n label_mapper,\n ) = test_utils_obj.define_kpls_for_training(use_adapt)\n model = test_utils_obj.define_model()\n optimizer = rmsprop.RMSprop(learning_rate=0.1)\n accuracy = keras.metrics.Accuracy()\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 131, "n_words": 26, "vocab_size": 21, "complexity": 3, "nloc": 31, "token_counts": 168, "n_ast_nodes": 104, "n_identifiers": 21, "random_cut": "def testTrainAndServeWithKPL(self, distribution):\n use_adapt = False\n test_utils_obj = kpl_test_utils.DistributeKplTestUtils(" }, { "id": 113550, "commit_id": "071dfb2dcff2f2a0b66a60f5faf51a97dc135328", "repo": "nni", "path": "setup_ts.py", "file_name": "setup_ts.py", "fun_name": "copy_nni_node", "commit_message": "Bump node.js to 18 (#5206)", "code": "def copy_nni_node(version):\n \n _print('Copying files')\n\n if sys.version_info >= (3, 8):\n shutil.copytree('ts/nni_manager/dist', 'nni_node', dirs_exist_ok=True)\n else:\n for item in os.listdir('ts/nni_manager/dist'):\n subsrc = os.path.join('ts/nni_manager/dist', item)\n subdst = os.path.join('nni_node', item)\n if os.path.isdir(subsrc):\n shutil.copytree(subsrc, subdst)\n else:\n shutil.copy2(subsrc, subdst)\n shutil.copyfile('ts/nni_manager/package-lock.json', 'nni_node/package-lock.lock')\n Path('nni_node/nni_manager.tsbuildinfo').unlink()\n\n package_json = json.load(open('ts/nni_manager/package.json'))\n if version:\n while len(version.split('.')) < 3: # node.js semver requires at least three parts\n version = version + '.0'\n package_json['version'] = version\n json.dump(package_json, open('nni_node/package.json', 'w'), indent=2)\n\n if sys.platform == 'win32':\n # On Windows, manually install node-gyp for sqlite3.\n _npm('ts/nni_manager', 'install', '--global', 'node-gyp')\n\n # reinstall without development dependencies\n prod_path = Path('nni_node').resolve()\n _yarn(str(prod_path), 'install', '--production')\n\n shutil.copytree('ts/webui/build', 'nni_node/static')\n\n if jupyter_lab_major_version == '2':\n shutil.copytree('ts/jupyter_extension/build', 'nni_node/jupyter-extension/build')\n shutil.copytree(os.path.join(sys.exec_prefix, 'share/jupyter/lab/extensions'), 'nni_node/jupyter-extension/extensions')\n elif version or Path('ts/jupyter_extension/dist').exists():\n shutil.copytree('ts/jupyter_extension/dist', 'nni_node/jupyter-extension')\n\n\n_yarn_env = dict(os.environ)\n# `Path('nni_node').resolve()` does not work on Windows if the directory not exists\n_yarn_env['PATH'] = str(Path().resolve() / 'nni_node') + path_env_seperator + os.environ['PATH']\n_yarn_path = Path().resolve() / 'toolchain/yarn/bin' / yarn_executable\n_npm_path = Path().resolve() / 'toolchain/node' / npm_executable\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 338, "n_words": 146, "vocab_size": 112, "complexity": 10, "nloc": 30, "token_counts": 266, "n_ast_nodes": 590, "n_identifiers": 45, "random_cut": "def copy_nni_node(version):\n \n _print('Copying files')\n\n if sys.version_info >= (3, 8):\n shutil.copytree('ts/nni_manager/dist', 'nni_node', dirs_exist_ok=True)\n else:\n for item in os.listdir('ts/nni_manager/dist'):\n subsrc = os.path.join('ts/nni_manager/dist', item)\n subdst = os.path.join('nni_node', item)\n if os.path.isdir(subsrc):\n shutil.copytree(subsrc, subdst)\n else:\n shutil.copy2(subsrc, subdst)\n shutil.copyfile('ts/nni_manager/package-lock.json', 'nni_node/package-lock.lock')\n Path('nni_node/nni_manager.tsbuildinfo').unlink()\n\n package_json = json.load(open('ts/nni_manager/package.json'))\n if version:\n while len(version.split('.')) < 3: # node.js semver requires at least three parts\n version = version + '.0'\n package_json['version'] = version\n json.dump(package_json, open('nni_node/package.json', 'w'), indent=2)\n\n if sys.platform == 'win32':\n # On Windows, manually install node-gyp for sqlite3.\n _npm('ts/nni_manager', 'install', '--global', 'node-gyp')\n\n # reinstall without development dependencies\n prod_path = Path('nni_node').resolve()\n _yarn(str(prod_path), 'install', '--production')\n\n shutil.copytree('ts/webui/build', 'nni_node/static')\n\n if jupyter_lab_major_version == '2':\n shutil.copytree('ts/jupyter_extension/build', 'nni_node/jupyter-extension/build')\n shutil.copytree(os.path.join(sys.exec_prefix, 'share/jupyter/lab/extensions'), 'nni_node/jupyter-extension/extensions')\n elif version or Path('ts/jupyter_extension/dist').exists():\n sh" }, { "id": 170454, "commit_id": "22e591f2d142b20ba294c40236954d377c7b22ed", "repo": "pandas", "path": "pandas/tests/io/excel/test_readers.py", "file_name": "test_readers.py", "fun_name": "test_use_nullable_dtypes", "commit_message": "ENH: Add use nullable dtypes to read_excel (#49091)", "code": "def test_use_nullable_dtypes(self, read_ext):\n # GH#36712\n if read_ext == \".xlsb\":\n pytest.skip(\"No engine for filetype: 'xlsb'\")\n\n df = DataFrame(\n {\n \"a\": Series([1, 3], dtype=\"Int64\"),\n \"b\": Series([2.5, 4.5], dtype=\"Float64\"),\n \"c\": Series([True, False], dtype=\"boolean\"),\n \"d\": Series([\"a\", \"b\"], dtype=\"string\"),\n \"e\": Series([pd.NA, 6], dtype=\"Int64\"),\n \"f\": Series([pd.NA, 7.5], dtype=\"Float64\"),\n \"g\": Series([pd.NA, True], dtype=\"boolean\"),\n \"h\": Series([pd.NA, \"a\"], dtype=\"string\"),\n \"i\": Series([pd.Timestamp(\"2019-12-31\")] * 2),\n \"j\": Series([pd.NA, pd.NA], dtype=\"Int64\"),\n }\n )\n with tm.ensure_clean(read_ext) as file_path:\n df.to_excel(file_path, \"test\", index=False)\n result = pd.read_excel(\n file_path, sheet_name=\"test\", use_nullable_dtypes=True\n )\n tm.assert_frame_equal(result, df)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 348, "n_words": 76, "vocab_size": 65, "complexity": 2, "nloc": 23, "token_counts": 240, "n_ast_nodes": 389, "n_identifiers": 22, "random_cut": "def test_use_nullable_dtypes(self, read_ext):\n # GH#36712\n if read_ext == \".xlsb\":\n pytest.skip(\"No engine for filetype: 'xlsb'\")\n\n df = DataFrame(\n {\n \"a\": Seri" }, { "id": 210477, "commit_id": "c612935d8d7431f3a730cf5e213159f6b20938d1", "repo": "PaddleDetection", "path": "ppdet/modeling/heads/pico_head.py", "file_name": "pico_head.py", "fun_name": "_generate_anchors", "commit_message": "Simplify picodet postprocess (#5650)", "code": "def _generate_anchors(self, feats=None):\n # just use in eval time\n anchor_points = []\n stride_tensor = []\n for i, stride in enumerate(self.fpn_stride):\n if feats is not None:\n _, _, h, w = feats[i].shape\n else:\n h = math.ceil(self.eval_size[0] / stride)\n w = math.ceil(self.eval_size[1] / stride)\n shift_x = paddle.arange(end=w) + self.cell_offset\n shift_y = paddle.arange(end=h) + self.cell_offset\n shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)\n anchor_point = paddle.cast(\n paddle.stack(\n [shift_x, shift_y], axis=-1), dtype='float32')\n anchor_points.append(anchor_point.reshape([-1, 2]))\n stride_tensor.append(\n paddle.full(\n [h * w, 1], stride, dtype='float32'))\n anchor_points = paddle.concat(anchor_points)\n stride_tensor = paddle.concat(stride_tensor)\n return anchor_points, stride_tensor\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 333, "n_words": 84, "vocab_size": 62, "complexity": 3, "nloc": 22, "token_counts": 206, "n_ast_nodes": 319, "n_identifiers": 32, "random_cut": "def _generate_anchors(self, feats=None):\n # just use in eval time\n anchor_points = []\n stride_tensor = []\n for i, stride in enumerate(self.fpn_stride):\n if feats is not None:\n _, _, h, w = feats[i].shape\n else:\n h = math.ceil(self.eval_size[0] / stride)\n w = math.ceil(self.eval_size[1] / stride)\n shift_x = paddle.arange(end=w) + self.cell_offset\n shift_y = paddle.arange(end=h) + self.cell_offset\n shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)\n anchor_point = paddle.cast(\n paddle.stack(\n [shift_x, shift_y], axis=-1), dtype='float32')\n anchor_points.append(anchor_point.reshape([-1, 2]))\n stride_tensor.append(\n paddle.full(\n [h * w, 1], stride, dtype='float32'))\n ancho" }, { "id": 257263, "commit_id": "f8e02310bf0dfbd1ab79a1c3c73434e0aeba4f4b", "repo": "haystack", "path": "test/test_pipeline.py", "file_name": "test_pipeline.py", "fun_name": "test_graph_validation_invalid_node", "commit_message": "Validate YAML files without loading the nodes (#2438)\n\n* Remove BasePipeline and make a module for RayPipeline\r\n\r\n* Can load pipelines from yaml, plenty of issues left\r\n\r\n* Extract graph validation logic into _add_node_to_pipeline_graph & refactor load_from_config and add_node to use it\r\n\r\n* Fix pipeline tests\r\n\r\n* Move some tests out of test_pipeline.py and create MockDenseRetriever\r\n\r\n* myoy and pylint (silencing too-many-public-methods)\r\n\r\n* Fix issue found in some yaml files and in schema files\r\n\r\n* Fix paths to YAML and fix some typos in Ray\r\n\r\n* Fix eval tests\r\n\r\n* Simplify MockDenseRetriever\r\n\r\n* Fix Ray test\r\n\r\n* Accidentally pushed merge coinflict, fixed\r\n\r\n* Typo in schemas\r\n\r\n* Typo in _json_schema.py\r\n\r\n* Slightly reduce noisyness of version validation warnings\r\n\r\n* Fix version logs tests\r\n\r\n* Fix version logs tests again\r\n\r\n* remove seemingly unused file\r\n\r\n* Add check and test to avoid adding the same node to the pipeline twice\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Revert config to pipeline_config\r\n\r\n* Remo0ve unused import\r\n\r\n* Complete reverting to pipeline_config\r\n\r\n* Some more stray config=\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Feedback\r\n\r\n* Move back other_nodes tests into pipeline tests temporarily\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fixing tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fixing ray and standard pipeline tests\r\n\r\n* Rename colliding load() methods in dense retrievers and faiss\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix mypy on ray.py as well\r\n\r\n* Add check for no root node\r\n\r\n* Fix tests to use load_from_directory and load_index\r\n\r\n* Try to workaround the disabled add_node of RayPipeline\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix Ray test\r\n\r\n* Fix FAISS tests\r\n\r\n* Relax class check in _add_node_to_pipeline_graph\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Try to fix mypy in ray.py\r\n\r\n* unused import\r\n\r\n* Try another fix for Ray\r\n\r\n* Fix connector tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix ray\r\n\r\n* Update Documentation & Code Style\r\n\r\n* use BaseComponent.load() in pipelines/base.py\r\n\r\n* another round of feedback\r\n\r\n* stray BaseComponent.load()\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix FAISS tests too\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>\r\nCo-authored-by: tstadel <60758086+tstadel@users.noreply.github.com>", "code": "def test_graph_validation_invalid_node():\n docstore = MockDocumentStore()\n retriever = DummyRetriever(document_store=docstore)\n pipeline = Pipeline()\n pipeline.add_node(name=\"DocStore\", component=docstore, inputs=[\"Query\"])\n\n with pytest.raises(PipelineConfigError, match=\"Cannot find node 'InvalidNode'\"):\n pipeline.add_node(name=\"Retriever\", component=retriever, inputs=[\"InvalidNode\"])\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 44, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 70, "n_ast_nodes": 121, "n_identifiers": 16, "random_cut": "def test_graph_validation_invalid_node():\n docstore = MockDocumentStore()\n retriever = DummyRetriever(document_store=docstore)\n pipeline = Pipeline()\n pipeline.add_node(name=\"DocStore\"" }, { "id": 7944, "commit_id": "0ab41a299cc690940b750a79b704d69544315702", "repo": "ludwig", "path": "tests/ludwig/features/test_image_feature.py", "file_name": "test_image_feature.py", "fun_name": "test_image_preproc_module_bad_num_channels", "commit_message": "Update missing value strategy to only allow bfill and ffill (#2457)\n\n* push changes\r\n\r\n* working missing value strategy\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Add type hints to backward compatibility transformations\r\n\r\n* Update test to test both missing value strategy updates\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def test_image_preproc_module_bad_num_channels():\n metadata = {\n \"preprocessing\": {\n \"missing_value_strategy\": BFILL,\n \"in_memory\": True,\n \"resize_method\": \"interpolate\",\n \"scaling\": \"pixel_normalization\",\n \"num_processes\": 1,\n \"infer_image_num_channels\": True,\n \"infer_image_dimensions\": True,\n \"infer_image_max_height\": 256,\n \"infer_image_max_width\": 256,\n \"infer_image_sample_size\": 100,\n \"height\": 12,\n \"width\": 12,\n \"num_channels\": 2,\n },\n \"reshape\": (2, 12, 12),\n }\n module = _ImagePreprocessing(metadata)\n\n with pytest.raises(ValueError):\n module(torch.rand(2, 3, 10, 10))\n\n\n@pytest.mark.parametrize(\"resize_method\", [INTERPOLATE, CROP_OR_PAD])\n@pytest.mark.parametrize([\"num_channels\", \"num_channels_expected\"], [(1, 3), (3, 1)])", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"resize_method\", [INTERPOLATE, CROP_OR_PAD])\n@pytest.mark.parametrize([\"num_channels\", \"num_channels_expected\"], [(1, 3), (3, 1)])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 237, "n_words": 57, "vocab_size": 50, "complexity": 1, "nloc": 22, "token_counts": 104, "n_ast_nodes": 243, "n_identifiers": 14, "random_cut": "def test_image_preproc_module_bad_num_channels():\n metadata = {\n \"preprocessing\": {\n \"missing_value_strategy\": BFILL,\n \"in_memory\": True,\n \"resize_method\": \"interpolate\",\n \"scaling\": \"pixel_normalization\",\n \"num_processes\": 1,\n \"infer_image_num_channels\": True,\n \"infer_image_dimensions\": True,\n \"infer_image_max_height\": 256,\n \"infer_image_max_width\": 256,\n \"infer_image_sample_size\": 100,\n \"height\": 12,\n \"width\": 12,\n \"num_channels\": 2,\n },\n \"reshape\": (2, 12, 12),\n }\n module = _ImagePreprocessing(metadata)\n\n with pytest.raises(ValueError):\n module(torch.rand(2, 3, 10, 10))\n\n\n@pytest.mark.parametrize(\"resize_method\", [INTERPOLATE," }, { "id": 220253, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ast.py", "file_name": "ast.py", "fun_name": "visit_arg", "commit_message": "add python 3.10.4 for windows", "code": "def visit_arg(self, node):\n self.write(node.arg)\n if node.annotation:\n self.write(\": \")\n self.traverse(node.annotation)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 5, "token_counts": 34, "n_ast_nodes": 57, "n_identifiers": 7, "random_cut": "def visit_arg(self, node):\n self.write(" }, { "id": 285020, "commit_id": "89297fadc4b5f9381b259f16314e44f9ba9be7bd", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/portfolio/test_portfolio_model.py", "file_name": "test_portfolio_model.py", "fun_name": "test_tracking_error", "commit_message": "Adding new portfolio metrics (#2029)\n\n* Making pa active + pa minor features\r\n\r\nMakes pa actice and adds country to the df. The groupby command also gets percents of holding allocation. It also fixes warnings and prepares for a later pr that I'm currently working on.\r\n\r\n* Fix linting\r\n\r\n* black linter\r\n\r\n* Fixes\r\n\r\nShould fix everything\r\n\r\n* Linting\r\n\r\n* Making pa controller to base class standard\r\n\r\n* Fix linting\r\n\r\n* first metrics\r\n\r\n* Adding calmar ratio\r\n\r\n* Adding metrics to controller\r\n\r\n* Linting\r\n\r\n* Linting\r\n\r\n* Test fixes\r\n\r\n* Test fixes\r\n\r\n* Adding tests\r\n\r\n* Linting\r\n\r\n* Restructuring added tests\r\n\r\n* Linting\r\n\r\n* Updating hugo website and fixing help\r\n\r\n* Fixing commands\r\n\r\n* Fix kelly criterion command\r\n\r\n* Fixing tests\r\n\r\n* Linting\r\n\r\n* More linting\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Jeroen Bouma ", "code": "def test_tracking_error(recorder):\n result_df, _ = portfolio_model.get_tracking_error(\n portfolio_returns, benchmark_returns\n )\n\n recorder.capture(result_df)\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 9, "random_cut": "def test_tracking_error(recorder):\n result_df, _ = portfolio_model.get_tracking_error(\n portfolio_return" }, { "id": 172322, "commit_id": "35a7f807ac9f02128333c1b5df0f03c897d13445", "repo": "pandas", "path": "pandas/tests/arithmetic/test_datetime64.py", "file_name": "test_datetime64.py", "fun_name": "test_dt64arr_addsub_object_dtype_2d", "commit_message": "API: dont do type inference on arithmetic results (#49714)\n\n* API: dont do type inference on arithmetic results\r\n\r\n* mypy fixup\r\n\r\n* use concat_compat\r\n\r\n* dont infer in TimedeltaArray\r\n\r\n* update addsub\r\n\r\n* avoid messing with box_expected", "code": "def test_dt64arr_addsub_object_dtype_2d():\n # block-wise DataFrame operations will require operating on 2D\n # DatetimeArray/TimedeltaArray, so check that specifically.\n dti = date_range(\"1994-02-13\", freq=\"2W\", periods=4)\n dta = dti._data.reshape((4, 1))\n\n other = np.array([[pd.offsets.Day(n)] for n in range(4)])\n assert other.shape == dta.shape\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dta + other\n with tm.assert_produces_warning(PerformanceWarning):\n expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)\n\n tm.assert_numpy_array_equal(result, expected)\n\n with tm.assert_produces_warning(PerformanceWarning):\n # Case where we expect to get a TimedeltaArray back\n result2 = dta - dta.astype(object)\n\n assert result2.shape == (4, 1)\n assert all(td.value == 0 for td in result2.ravel())\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 150, "n_words": 86, "vocab_size": 64, "complexity": 3, "nloc": 14, "token_counts": 165, "n_ast_nodes": 266, "n_identifiers": 30, "random_cut": "def test_dt64arr_addsub_object_dtype_2d():\n # block-wise DataFrame operations will require operating on 2D\n # DatetimeArray/TimedeltaArray, so check that specifically.\n dti = date_range(\"1994-02-13\", freq=\"2W\", periods=4)\n dta = dti._data.reshape((4, 1))\n\n other = np.array([[pd.offsets.Day(n)] for n in range(4)])\n assert other.shape == dta.shape\n\n with tm.assert_produces_warning(PerformanceWarning):" }, { "id": 267044, "commit_id": "4d69c09695c8f78b95edf51314999be3c19b62eb", "repo": "ansible", "path": "lib/ansible/galaxy/collection/concrete_artifact_manager.py", "file_name": "concrete_artifact_manager.py", "fun_name": "get_direct_collection_dependencies", "commit_message": "ansible-galaxy collection - ensure dependencies are a dict (#77561)\n\n* fix traceback when installing collection with dependencies set to None", "code": "def get_direct_collection_dependencies(self, collection):\n # type: (Candidate | Requirement) -> dict[str, str]\n \n collection_dependencies = self.get_direct_collection_meta(collection)['dependencies']\n if collection_dependencies is None:\n collection_dependencies = {}\n return collection_dependencies # type: ignore[return-value]\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 73, "n_words": 26, "vocab_size": 20, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 54, "n_identifiers": 5, "random_cut": "def get_direct_collection_dependencies(self, collection):\n # " }, { "id": 125005, "commit_id": "569fe0109629048d08e1d9e023f7769f10bd2244", "repo": "ray", "path": "rllib/offline/tests/test_dataset_reader.py", "file_name": "test_dataset_reader.py", "fun_name": "test_absolute_zip", "commit_message": "[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)", "code": "def test_absolute_zip(self):\n \n\n # this should work regardless of where th current working directory is.\n with tempfile.TemporaryDirectory() as tmp_dir:\n cwdir = os.getcwd()\n os.chdir(tmp_dir)\n unzipped_paths = _unzip_if_needed(\n [str(Path(self.absolute_path) / \"enormous.zip\")], \"json\"\n )\n self.assertEqual(\n str(Path(unzipped_paths[0]).absolute()),\n str(Path(\"./\").absolute() / \"enormous.json\"),\n )\n\n assert all([Path(fpath).exists() for fpath in unzipped_paths])\n os.chdir(cwdir)\n\n # @TODO: unskip when this is fixed", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 207, "n_words": 50, "vocab_size": 45, "complexity": 2, "nloc": 13, "token_counts": 106, "n_ast_nodes": 186, "n_identifiers": 19, "random_cut": "def test_absolute_zip(self):\n \n\n # this should work regardless of where th current working directory is.\n with tempfile.TemporaryDirectory() as tmp_dir:\n cwdir = os.getcwd()\n os.chdir(tmp_dir)\n unzipped_paths = _unzip_if_needed(\n [str(Path(self.absolute_pat" }, { "id": 22861, "commit_id": "39c49e07066b2a53e176d555af6a7bf8aabb8a9c", "repo": "Python", "path": "VoiceAssistant/Project_Basic_struct/speechtotext.py", "file_name": "speechtotext.py", "fun_name": "stt", "commit_message": "VoiceAssistant\n\nThis is Voice Assistant coded using Python which can do the following: -\r\n 1. Speak Text entered by User.\r\n 2. Search anything on Google.\r\n 3. Search anything on Wikipedia.\r\n 4. Read an MS Word(docx) document.\r\n 5. Read a book(PDF).\r\n 6. Can be used as a Dictator.", "code": "def stt():\r\n with sr.Microphone() as source:\r\n # read the audio data from the default microphone\r\n audio_data = r.record(source, duration=5)\r\n print(\"Recognizing...\")\r\n # convert speech to text\r\n text = r.recognize_google(audio_data)\r\n print(text)", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 73, "n_words": 29, "vocab_size": 25, "complexity": 1, "nloc": 6, "token_counts": 41, "n_ast_nodes": 75, "n_identifiers": 11, "random_cut": "def stt():\r\n with sr.Microphone() as source:\r\n # read the audio data from the default microphone\r\n audio_data = r.record(source, duration=5)\r\n " }, { "id": 155185, "commit_id": "193505fdf0c984743397ba3df56262f30aee13a8", "repo": "modin", "path": "modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py", "file_name": "partition.py", "fun_name": "_apply_list_of_funcs", "commit_message": "FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)\n\nSigned-off-by: Igoshev, Iaroslav ", "code": "def _apply_list_of_funcs(call_queue, partition): # pragma: no cover\n \n for func, f_args, f_kwargs in call_queue:\n func = deserialize(func)\n args = deserialize(f_args)\n kwargs = deserialize(f_kwargs)\n try:\n partition = func(partition, *args, **kwargs)\n # Sometimes Arrow forces us to make a copy of an object before we operate on it. We\n # don't want the error to propagate to the user, and we want to avoid copying unless\n # we absolutely have to.\n except ValueError:\n partition = func(partition.copy(), *args, **kwargs)\n\n return (\n partition,\n len(partition) if hasattr(partition, \"__len__\") else 0,\n len(partition.columns) if hasattr(partition, \"columns\") else 0,\n unidist.get_ip(),\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 211, "n_words": 92, "vocab_size": 71, "complexity": 5, "nloc": 15, "token_counts": 109, "n_ast_nodes": 174, "n_identifiers": 16, "random_cut": "def _apply_list_of_funcs(call_queue, partition): # pragma: no cover\n \n for func, f_args, f_kwargs in call_queue:\n func = deserialize(func)\n args = deserialize(f_args)\n kwargs = deserialize(f_kwargs)\n try:\n partition = func(partition, *args, " }, { "id": 74930, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/embeds/blocks.py", "file_name": "blocks.py", "fun_name": "get_prep_value", "commit_message": "Reformat with black", "code": "def get_prep_value(self, value):\n # serialisable value should be a URL string\n if value is None:\n return \"\"\n else:\n return value.url\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 62, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 5, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 4, "random_cut": "def get_prep_value(self, value):\n # serialisable value should be a URL string\n if value is None:\n return \"\"\n else:\n return value.url\n" }, { "id": 182523, "commit_id": "ef99069cf4cc0782f207a0a53689567bf85110ee", "repo": "textual", "path": "src/textual/_compositor.py", "file_name": "_compositor.py", "fun_name": "_get_renders", "commit_message": "box model", "code": "def _get_renders(self) -> Iterable[tuple[Region, Region, Lines]]:\n \n # If a renderable throws an error while rendering, the user likely doesn't care about the traceback\n # up to this point.\n _rich_traceback_guard = True\n\n if self.map:\n widget_regions = sorted(\n [\n (widget, region, order, clip)\n for widget, (region, order, clip) in self.map.items()\n if widget.is_visual and widget.visible\n ],\n key=itemgetter(2),\n reverse=True,\n )\n else:\n widget_regions = []\n\n for widget, region, _order, clip in widget_regions:\n\n lines = widget._get_lines()\n\n if region in clip:\n yield region, clip, lines\n elif clip.overlaps(region):\n new_region = region.intersection(clip)\n delta_x = new_region.x - region.x\n delta_y = new_region.y - region.y\n splits = [delta_x, delta_x + new_region.width]\n lines = lines[delta_y : delta_y + new_region.height]\n divide = Segment.divide\n lines = [list(divide(line, splits))[1] for line in lines]\n yield region, clip, lines\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 488, "n_words": 121, "vocab_size": 85, "complexity": 9, "nloc": 32, "token_counts": 203, "n_ast_nodes": 306, "n_identifiers": 37, "random_cut": "def _get_renders(self) -> Iterable[tuple[Region, Region, Lines]]:\n \n # If a renderable throws an error while rendering, the user likely doesn't care about the traceback\n # up to this point.\n _rich_traceback_guard = True\n\n if self.map:\n widget_regions = sorted(\n [\n (widget, region, order, clip)\n for widget, (region, order, clip) in self.map.items()\n if widget.is_visual and widget.visible\n ],\n key=itemgetter(2),\n reverse=True,\n )\n else:\n widget_regions = []\n\n for widget, region, _order, clip in widget_regions:\n\n lines = widget._get_lines()\n\n if region in clip:\n yield region, clip, lines\n elif clip.overlaps(region):\n new_region = region.intersection(clip)\n delta_x = new_region.x - region.x\n delta_y = new_region.y - region.y\n splits = [delta_x, delta_x + new_region.width]\n lines = lines[delta_y : delta_y + new_region.h" }, { "id": 33785, "commit_id": "f5f430e5c80b85b57bb910435e45d84746210133", "repo": "transformers", "path": "tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py", "file_name": "test_tokenization_gpt_neox_japanese.py", "fun_name": "test_sequence_builders", "commit_message": "Add support for Japanese GPT-NeoX-based model by ABEJA, Inc. (#18814)\n\n* add gpt-neox-japanese model and tokenizer as new model\r\n\r\n* Correction to PR's comment for GPT NeoX Japanese\r\n- Fix to be able to use gpu\r\n- Add comment # Copied... at the top of RotaryEmbedding\r\n- Implement nn.Linear instead of original linear class\r\n- Add generation test under @slow\r\n\r\n* fix bias treatment for gpt-neox-japanese\r\n\r\n* Modidy gpt-neox-japanese following PR\r\n- add doc for bias_dropout_add\r\n- style change following a PR comment\r\n\r\n* add document for gpt-neox-japanese\r\n\r\n* remove unused import from gpt-neox-japanese\r\n\r\n* fix README for gpt-neox-japanese", "code": "def test_sequence_builders(self):\n tokenizer = self.tokenizer_class.from_pretrained(\"abeja/gpt-neox-japanese-2.7b\")\n\n ids_1 = tokenizer.encode(\"ありがとう。\", add_special_tokens=False)\n ids_2 = tokenizer.encode(\"どういたしまして。\", add_special_tokens=False)\n\n encoded_sentence = tokenizer.build_inputs_with_special_tokens(ids_1)\n encoded_pair = tokenizer.build_inputs_with_special_tokens(ids_1, ids_2)\n\n assert encoded_sentence == ids_1\n assert encoded_pair == ids_1 + ids_2\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 78, "n_words": 30, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 110, "n_identifiers": 12, "random_cut": "def test_sequence_builders(self):\n tokenizer = self.tokenizer_class.from_pretrained(\"abeja/gpt-neox-japanese-2.7b\")\n\n ids_1 = tokenizer.encode(\"ありがとう。\", add" }, { "id": 164623, "commit_id": "a1ce6a0eb07e5f969ab192b792083cb1c1f702d5", "repo": "pandas", "path": "pandas/tests/computation/test_eval.py", "file_name": "test_eval.py", "fun_name": "test_pow", "commit_message": "TST: Don't use autouse fixture in test_eval (#45832)", "code": "def test_pow(self, lhs, rhs, engine, parser):\n # odd failure on win32 platform, so skip\n ex = \"lhs ** rhs\"\n expected = _eval_single_bin(lhs, \"**\", rhs, engine)\n result = pd.eval(ex, engine=engine, parser=parser)\n\n if (\n is_scalar(lhs)\n and is_scalar(rhs)\n and isinstance(expected, (complex, np.complexfloating))\n and np.isnan(result)\n ):\n msg = \"(DataFrame.columns|numpy array) are different\"\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(result, expected)\n else:\n tm.assert_almost_equal(result, expected)\n\n ex = \"(lhs ** rhs) ** rhs\"\n result = pd.eval(ex, engine=engine, parser=parser)\n\n middle = _eval_single_bin(lhs, \"**\", rhs, engine)\n expected = _eval_single_bin(middle, \"**\", rhs, engine)\n tm.assert_almost_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 277, "n_words": 82, "vocab_size": 53, "complexity": 5, "nloc": 20, "token_counts": 161, "n_ast_nodes": 249, "n_identifiers": 27, "random_cut": "def test_pow(self, lhs, rhs, engine, parser):\n # odd failure on win32 platform, so skip\n ex = \"lhs ** rhs\"\n expected = _eval_single_bin(lhs, \"**\", rhs, engine)\n result = pd.eval(ex, engine=engine, parser=parser)\n\n if (\n is_scalar(lhs)\n and is_scalar(rhs)\n and isinstance(expected, (complex, np.complexfloating))\n and np.isnan(result)\n ):\n msg = \"(DataFrame.columns|numpy array) are different\"\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(result, expected)\n else:\n tm.assert_almost_equal(result, expected)\n\n ex = \"(lhs ** rhs) ** rhs\"\n result = pd.eval(ex, engine=engine, parser=parser)\n\n middle = _eval_single_bin(lhs, \"**\", rhs, engine)\n " }, { "id": 182508, "commit_id": "f541c26587ba6e1eb07e5d7b6030082c729e9f2e", "repo": "textual", "path": "tests/test_region_group.py", "file_name": "test_region_group.py", "fun_name": "test_inline_ranges_fully_overlapping_regions", "commit_message": "Convert method inline_ranges to function, remove RegionGroup class", "code": "def test_inline_ranges_fully_overlapping_regions():\n regions = [Region(1, 1, 3, 3), Region(2, 2, 1, 1), Region(0, 2, 3, 1)]\n assert list(inline_ranges(regions)) == [\n InlineRange(1, 1, 3), InlineRange(2, 0, 3), InlineRange(3, 1, 3)\n ]\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 30, "vocab_size": 23, "complexity": 1, "nloc": 5, "token_counts": 77, "n_ast_nodes": 103, "n_identifiers": 6, "random_cut": "def test_inline_ranges_fully_overlapping_regions():\n regions" }, { "id": 95409, "commit_id": "2a7a0aac94e3b612cb49ca24c8882b7290c788f8", "repo": "sentry", "path": "src/sentry_plugins/slack/client.py", "file_name": "client.py", "fun_name": "request", "commit_message": "ref(slack plugin): Don't raise errors for unactionable things (#30998)\n\n* ref(slack plugin): Don't raise errors for unactionable things", "code": "def request(self, data):\n try:\n return self._request(\n path=self.webhook, method=\"post\", data=data, json=False, allow_text=True\n )\n except ApiError as e:\n # Ignore 404 and ignorable errors from slack webhooks\n if e.text and e.text in IGNORABLE_SLACK_ERRORS or e.code == 404:\n return\n raise e\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 136, "n_words": 38, "vocab_size": 35, "complexity": 5, "nloc": 9, "token_counts": 61, "n_ast_nodes": 95, "n_identifiers": 14, "random_cut": "def request(self, data):\n try:\n return self._request(\n path=self.webhook," }, { "id": 152693, "commit_id": "59a2b9e5afc27d2fda72069ca0635070535d18fe", "repo": "stable-diffusion-webui", "path": "modules/deepbooru.py", "file_name": "deepbooru.py", "fun_name": "get_deepbooru_tags", "commit_message": "deepdanbooru interrogator", "code": "def get_deepbooru_tags(pil_image, threshold=0.5):\n with ProcessPoolExecutor() as executor:\n f = executor.submit(_load_tf_and_return_tags, pil_image, threshold)\n ret = f.result() # will rethrow any exceptions\n return ret", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 42, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 39, "n_ast_nodes": 62, "n_identifiers": 10, "random_cut": "def get_deepbooru_tags(pil_image, threshold=0.5):\n with ProcessPoolExecutor() as execut" }, { "id": 196160, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/permutations.py", "file_name": "permutations.py", "fun_name": "__add__", "commit_message": "Updated import locations", "code": "def __add__(self, other):\n \n rank = (self.rank() + other) % self.cardinality\n rv = self.unrank_lex(self.size, rank)\n rv._rank = rank\n return rv\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 19, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 68, "n_identifiers": 9, "random_cut": "def __add__(self, other):\n \n rank = (self.rank() + other) % self.cardinality\n rv = self.unrank_lex(self.size, rank)\n " }, { "id": 164631, "commit_id": "c055dc4e6be9fc1b68d873a1ace286322dadd5e1", "repo": "pandas", "path": "pandas/tests/io/test_stata.py", "file_name": "test_stata.py", "fun_name": "test_chunked_categorical_partial", "commit_message": "TST: Don't use autouse fixture in test_stata (#45831)", "code": "def test_chunked_categorical_partial(datapath):\n dta_file = datapath(\"io\", \"data\", \"stata\", \"stata-dta-partially-labeled.dta\")\n values = [\"a\", \"b\", \"a\", \"b\", 3.0]\n with StataReader(dta_file, chunksize=2) as reader:\n with tm.assert_produces_warning(CategoricalConversionWarning):\n for i, block in enumerate(reader):\n assert list(block.cats) == values[2 * i : 2 * (i + 1)]\n if i < 2:\n idx = pd.Index([\"a\", \"b\"])\n else:\n idx = pd.Index([3.0], dtype=\"float64\")\n tm.assert_index_equal(block.cats.cat.categories, idx)\n with tm.assert_produces_warning(CategoricalConversionWarning):\n with StataReader(dta_file, chunksize=5) as reader:\n large_chunk = reader.__next__()\n direct = read_stata(dta_file)\n tm.assert_frame_equal(direct, large_chunk)\n\n\n@pytest.mark.parametrize(\"chunksize\", (-1, 0, \"apple\"))", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"chunksize\", (-1, 0, \"apple\"))", "n_ast_errors": 1, "ast_levels": 19, "n_whitespaces": 223, "n_words": 73, "vocab_size": 57, "complexity": 3, "nloc": 17, "token_counts": 174, "n_ast_nodes": 319, "n_identifiers": 30, "random_cut": "def test_chunked_categorical_partial(datapath):\n dta_file = datapath(\"io\", \"data\", \"stata\", \"stata-dta-partially-labeled.dta\")\n values = [\"a\", \"b\", \"a\", \"b\", 3.0]\n with StataReader(dta_file, chunksize=2) as reader:\n with tm.assert_produces_warning(CategoricalConversionWarning):\n for i, block in enumerate(reader):\n assert list(block.cats) == values[2 * i : 2 * (i + 1)]\n if i < 2:\n idx = pd.Index([\"a\", \"b\"])\n else:\n idx = pd.Index([3.0], dtype=\"float64\")\n tm.assert_index_equal(block.cats.cat.categories, idx)\n wit" }, { "id": 244407, "commit_id": "19441631117425b5521655b23cd4d885a7858478", "repo": "mmdetection", "path": "mmdet/datasets/samplers/batch_sampler.py", "file_name": "batch_sampler.py", "fun_name": "__iter__", "commit_message": "[Refactor] Refactor samplers.", "code": "def __iter__(self) -> Sequence[int]:\n for idx in self.sampler:\n data_info = self.sampler.dataset.get_data_info(idx)\n width, height = data_info['width'], data_info['height']\n bucket_id = 0 if width < height else 1\n bucket = self._aspect_ratio_buckets[bucket_id]\n bucket.append(idx)\n # yield a batch of indices in the same aspect ratio group\n if len(bucket) == self.batch_size:\n yield bucket[:]\n del bucket[:]\n\n # yield the rest data and reset the bucket\n left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[\n 1]\n self._aspect_ratio_buckets = [[] for _ in range(2)]\n while len(left_data) > 0:\n if len(left_data) <= self.batch_size:\n if not self.drop_last:\n yield left_data[:]\n left_data = []\n else:\n yield left_data[:self.batch_size]\n left_data = left_data[self.batch_size:]\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 346, "n_words": 93, "vocab_size": 66, "complexity": 8, "nloc": 21, "token_counts": 167, "n_ast_nodes": 269, "n_identifiers": 21, "random_cut": "def __iter__(self) -> Sequence[int]:\n for idx in self.sampler:\n data_info = self.sampler.dataset.get_data_info(idx)\n width, height = data_info['width'], data_info['height']\n bucket_id = 0 if width < height else 1\n bucket = self._aspect_ratio_buckets[bucket_id]\n bucket.append(idx)\n # yield a batch of indices in the same aspect ratio group\n if len(bucket) == self.batch_size:\n yield bucket[:]\n del bucket[:]\n\n # yield the rest data and reset the bucket\n left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[\n 1]\n se" }, { "id": 189767, "commit_id": "10a5f40aa5c51bd6822742b707acd2390ce2cc23", "repo": "manim", "path": "manim/mobject/opengl/opengl_vectorized_mobject.py", "file_name": "opengl_vectorized_mobject.py", "fun_name": "get_stroke_shader_wrapper", "commit_message": "Reuse shader wrappers and shader data (#2062)\n\n* reuse shader wrappers and shader data arrays\r\n\r\n* Update uniforms\r\n\r\nCo-authored-by: Laith Bahodi <70682032+hydrobeam@users.noreply.github.com>\r\nCo-authored-by: Darylgolden ", "code": "def get_stroke_shader_wrapper(self):\n self.update_stroke_shader_wrapper()\n return self.stroke_shader_wrapper\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 24, "n_identifiers": 4, "random_cut": "def get_stroke_shader_wrapper(self):\n self.update_stroke_shader_wrapper()\n return " }, { "id": 124140, "commit_id": "68b893369cada42be61d05843a9b7267b4f0b353", "repo": "ray", "path": "python/ray/data/tests/test_dataset.py", "file_name": "test_dataset.py", "fun_name": "test_groupby_arrow", "commit_message": "[dataset] Support push-based shuffle in groupby operations (#25910)\n\nAllows option for push-based shuffle in groupby operations, to improve scalability to larger Datasets.", "code": "def test_groupby_arrow(ray_start_regular_shared, use_push_based_shuffle):\n # Test empty dataset.\n agg_ds = (\n ray.data.range_table(10)\n .filter(lambda r: r[\"value\"] > 10)\n .groupby(\"value\")\n .count()\n )\n assert agg_ds.count() == 0\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 62, "n_words": 23, "vocab_size": 23, "complexity": 1, "nloc": 8, "token_counts": 49, "n_ast_nodes": 83, "n_identifiers": 11, "random_cut": "def test_groupby_arrow(ray_start_regular_shared, use_push_based_shuffle):\n # Test empty dataset.\n agg_ds = (\n ray.data.range_table(10)\n " }, { "id": 116380, "commit_id": "25201296fe9944420667cb5fed4d676b869f48ff", "repo": "mindsdb", "path": "mindsdb/api/http/namespaces/config.py", "file_name": "config.py", "fun_name": "put", "commit_message": "storage", "code": "def put(self, name):\n params = {}\n params.update((request.json or {}).get('params', {}))\n params.update(request.form or {})\n\n if len(params) == 0:\n abort(400, \"type of 'params' must be dict\")\n\n files = request.files\n temp_dir = None\n if files is not None and len(files) > 0:\n temp_dir = tempfile.mkdtemp(prefix='integration_files_')\n for key, file in files.items():\n temp_dir_path = Path(temp_dir)\n file_name = Path(file.filename)\n file_path = temp_dir_path.joinpath(file_name).resolve()\n if temp_dir_path not in file_path.parents:\n raise Exception(f'Can not save file at path: {file_path}')\n file.save(file_path)\n params[key] = file_path\n\n is_test = params.get('test', False)\n if is_test:\n del params['test']\n\n handler = request.integration_controller.create_tmp_handler(\n handler_type=params.get('type'),\n connection_data=params\n )\n status = handler.check_connection()\n if temp_dir is not None:\n shutil.rmtree(temp_dir)\n return status, 200\n\n integration = request.integration_controller.get(name, sensitive_info=False)\n if integration is not None:\n abort(400, f\"Integration with name '{name}' already exists\")\n\n try:\n engine = params['type']\n if engine is not None:\n del params['type']\n request.integration_controller.add(name, engine, params)\n\n if is_test is False and params.get('publish', False) is True:\n stream_controller = StreamController(request.company_id)\n if engine in stream_controller.known_dbs and params.get('publish', False) is True:\n stream_controller.setup(name)\n except Exception as e:\n log.error(str(e))\n if temp_dir is not None:\n shutil.rmtree(temp_dir)\n abort(500, f'Error during config update: {str(e)}')\n\n if temp_dir is not None:\n shutil.rmtree(temp_dir)\n return '', 200\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 712, "n_words": 177, "vocab_size": 105, "complexity": 19, "nloc": 49, "token_counts": 368, "n_ast_nodes": 616, "n_identifiers": 52, "random_cut": "def put(self, name):\n params = {}\n params.update((request.json or {}).get('params', {}))\n params.update(request.form or {})\n\n if len(params) == 0:\n abort(400, \"type of 'params' must be dict\")\n\n files = request.files\n temp_dir = None\n if files is not None and len(files) > 0:\n temp_dir = tempfile.mkdtemp(prefix='integration_files_')\n for key, file in files.items():\n temp_dir_path = Path(temp_dir)\n file_name = Path(file.filename)\n file_path = temp_dir_path.joinpath(file_name).resolve()\n if temp_dir_path not in file_path.parents:\n raise Exception(f'Can not save file at path: {file_path}')\n file.save(file_path)\n params[key] = file_path\n\n is_test = params.get('test', False)\n if is_test:\n del params['test']\n\n handler = request.integration_controller.create_tmp_handler(\n handler_type=params.get('type'),\n connection_data=params\n )\n status = handler.check_connection()\n if temp_dir is not None:\n shutil.rmtree(temp_dir)\n return status, 200\n\n integration = request.integration_controller.get(name, sensitive_info=False)\n if integration is not None:\n abort(400, f\"Integration with name '{name}' already exists\")\n\n try:\n engine = params['type']\n if engine is not None:\n del params['type']\n request.integration_controller.add(name, engine, params)\n\n if is_test is False and params.get('publish', False) is True:\n stream_controller = StreamController(request.company_id)\n if engine in stream_controller.known_dbs and params.get('publish', False) is True:\n stream_controller.setup(name)\n except Exception as e:\n lo" }, { "id": 54988, "commit_id": "37549d157007f6eef07ed8b1e2e14efb73134840", "repo": "prefect", "path": "tests/orion/api/test_task_runs.py", "file_name": "test_task_runs.py", "fun_name": "test_set_task_run_state", "commit_message": "Use status constants instead of hardcoded values\n\nCloses: PrefectHQ/orion#1673", "code": "async def test_set_task_run_state(self, task_run, client, session):\n response = await client.post(\n f\"/task_runs/{task_run.id}/set_state\",\n json=dict(state=dict(type=\"RUNNING\", name=\"Test State\")),\n )\n assert response.status_code == status.HTTP_201_CREATED\n\n api_response = OrchestrationResult.parse_obj(response.json())\n assert api_response.status == responses.SetStateStatus.ACCEPT\n\n task_run_id = task_run.id\n session.expire_all()\n run = await models.task_runs.read_task_run(\n session=session, task_run_id=task_run_id\n )\n assert run.state.type == states.StateType.RUNNING\n assert run.state.name == \"Test State\"\n assert run.run_count == 1\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 166, "n_words": 50, "vocab_size": 37, "complexity": 1, "nloc": 16, "token_counts": 123, "n_ast_nodes": 202, "n_identifiers": 32, "random_cut": "async def test_set_task_run_state(self, task_run, client, session):\n response = await client.post(\n f\"/task_runs/{task_run.id}/set_state\",\n json=dict(state=dict(type=\"RUNNING\", name=\"Test State\")),\n )\n assert response.status_code == status.HTTP_201_CREATED\n\n api_response = OrchestrationResult.parse_obj(response.json())\n assert api_response.status == respons" }, { "id": 270640, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/dtensor/test_util.py", "file_name": "test_util.py", "fun_name": "tearDown", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def tearDown(self):\n super().tearDown()\n # Make sure all async ops finish.\n context.async_wait()\n\n # TODO(hthu): Remove the reset once we fixed the CopyToMesh with\n # DefaultMesh placement issue.\n reset_dtensor()\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 68, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 4, "token_counts": 20, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def tearDown(self):\n super().tearDown()\n # " }, { "id": 247522, "commit_id": "32c828d0f760492711a98b11376e229d795fd1b3", "repo": "synapse", "path": "tests/rest/media/v1/test_media_storage.py", "file_name": "test_media_storage.py", "fun_name": "default_config", "commit_message": "Add type hints to `tests/rest`. (#12208)\n\nCo-authored-by: Patrick Cloke ", "code": "def default_config(self) -> Dict[str, Any]:\n config = default_config(\"test\")\n\n config.update(\n {\n \"spam_checker\": [\n {\n \"module\": TestSpamChecker.__module__ + \".TestSpamChecker\",\n \"config\": {},\n }\n ]\n }\n )\n\n return config\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 188, "n_words": 25, "vocab_size": 22, "complexity": 1, "nloc": 13, "token_counts": 46, "n_ast_nodes": 80, "n_identifiers": 9, "random_cut": "def default_config(self) -> Dict[str, Any]:\n config = default_config(\"test\")\n\n config.update(\n {\n \"spam_checker\": [\n {\n \"module\": TestSpamChecker.__module__ + \".TestSpamChecker\",\n \"config\": {},\n }\n ]\n }\n )\n\n return config\n" }, { "id": 276547, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/tests/model_subclassing_test_util.py", "file_name": "model_subclassing_test_util.py", "fun_name": "call", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def call(self, inputs):\n x = self.dense1(inputs)\n x = self.bn(x)\n x = self.test_net(x)\n return self.dense2(x)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 41, "n_words": 14, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 38, "n_ast_nodes": 61, "n_identifiers": 8, "random_cut": "def call(self, inputs):\n x = self.dense1(inputs)\n x = self.bn(x)\n x = self.test_net(x)\n return self.d" }, { "id": 54916, "commit_id": "37549d157007f6eef07ed8b1e2e14efb73134840", "repo": "prefect", "path": "tests/orion/api/test_deployments.py", "file_name": "test_deployments.py", "fun_name": "test_delete_deployment", "commit_message": "Use status constants instead of hardcoded values\n\nCloses: PrefectHQ/orion#1673", "code": "async def test_delete_deployment(self, session, client, deployment):\n # schedule both an autoscheduled and manually scheduled flow run\n # for this deployment id, these should be deleted when the deployment is deleted\n flow_run_1 = await models.flow_runs.create_flow_run(\n session=session,\n flow_run=schemas.core.FlowRun(\n flow_id=deployment.flow_id,\n deployment_id=deployment.id,\n flow_version=\"1.0\",\n auto_scheduled=False,\n state=schemas.states.Scheduled(\n scheduled_time=pendulum.now(\"UTC\"),\n message=\"Flow run scheduled\",\n ),\n ),\n )\n flow_run_2 = await models.flow_runs.create_flow_run(\n session=session,\n flow_run=schemas.core.FlowRun(\n flow_id=deployment.flow_id,\n deployment_id=deployment.id,\n flow_version=\"1.0\",\n auto_scheduled=True,\n state=schemas.states.Scheduled(\n scheduled_time=pendulum.now(\"UTC\"),\n message=\"Flow run scheduled\",\n ),\n ),\n )\n await session.commit()\n\n # delete the deployment\n response = await client.delete(f\"/deployments/{deployment.id}\")\n assert response.status_code == status.HTTP_204_NO_CONTENT\n\n # make sure it's deleted\n response = await client.get(f\"/deployments/{deployment.id}\")\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n # make sure scheduled flow runs are deleted\n n_runs = await models.flow_runs.count_flow_runs(\n session,\n flow_run_filter=schemas.filters.FlowRunFilter(\n id={\"any_\": [flow_run_1.id, flow_run_2.id]}\n ),\n )\n assert n_runs == 0\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 604, "n_words": 116, "vocab_size": 67, "complexity": 1, "nloc": 39, "token_counts": 228, "n_ast_nodes": 371, "n_identifiers": 39, "random_cut": "async def test_delete_deployment(self, session, client, deployment):\n # schedule both an autoscheduled and manually scheduled flow run\n # for this deployment id, these should be deleted when the deployment is deleted\n flow_run_1 = await models.flow_runs.create_flow_run(\n session=session,\n flow_run=schemas.core.FlowRun(\n flow_id=deployment.flow_id,\n deployment_id=deployment.id,\n flow_version=\"1.0\",\n auto_scheduled=False,\n state=schemas.states.Scheduled(\n scheduled_time=pendulum.now(\"UTC\"),\n message=\"Flow run scheduled\",\n ),\n ),\n )\n flow_run_2 = await models.flow_runs.create_flow_run(\n session=session,\n flow_run=schemas.core.FlowRun(\n flow_id=deployment.flow_id,\n deployment_id=deployment.id,\n flow_version=\"1.0\",\n auto_scheduled=True,\n state=schemas.states.Scheduled(\n scheduled_time=pendulum.now(\"UTC\"),\n message=\"Flow run scheduled\",\n ),\n ),\n )\n await session.commit()\n\n # delete the deployment\n response = await client.delete(f\"/deployments/{deployment.id}\")\n assert response.status_code == status.HTTP_204_NO_CONTENT\n\n # make sure it's deleted\n response = await client.get(f\"/deployments/{deployment.id}\")\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n # make sure scheduled flow runs are deleted\n n_runs = await models.flow_runs.count_flo" }, { "id": 72176, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_templatetags.py", "file_name": "test_templatetags.py", "fun_name": "test_timesince_last_update_before_today_shows_timeago", "commit_message": "Reformat with black", "code": "def test_timesince_last_update_before_today_shows_timeago(self):\n dt = timezone.now() - timedelta(weeks=1, days=2)\n\n timesince = timesince_last_update(dt, use_shorthand=False)\n self.assertEqual(timesince, \"1\\xa0week, 2\\xa0days ago\")\n\n timesince = timesince_last_update(dt)\n self.assertEqual(timesince, \"1\\xa0week ago\")\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 22, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 55, "n_ast_nodes": 93, "n_identifiers": 12, "random_cut": "def test_timesince_last_update_before_today_shows_timeago(self):\n dt = timezone.now() - timedelta(weeks=1, days=2)\n\n timesince = timesince_last_update(dt, use_shorthand=False)\n self.assertEqual(timesince, \"1\\xa0week, 2\\xa0days ago\")\n\n timesince = timesince_last_update(dt)\n self.assertEqual(timesince, \"1\\xa0week ago\")\n\n" }, { "id": 110252, "commit_id": "9b6abd0b4933811e0a45c2535ab8fd107db65dd9", "repo": "matplotlib", "path": "lib/matplotlib/bezier.py", "file_name": "bezier.py", "fun_name": "split_de_casteljau", "commit_message": "DOC: improve grammar and consistency", "code": "def split_de_casteljau(beta, t):\n \n beta = np.asarray(beta)\n beta_list = [beta]\n while True:\n beta = _de_casteljau1(beta, t)\n beta_list.append(beta)\n if len(beta) == 1:\n break\n left_beta = [beta[0] for beta in beta_list]\n right_beta = [beta[-1] for beta in reversed(beta_list)]\n\n return left_beta, right_beta\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 91, "n_words": 38, "vocab_size": 28, "complexity": 5, "nloc": 11, "token_counts": 79, "n_ast_nodes": 125, "n_identifiers": 12, "random_cut": "def split_de_casteljau(beta, t):\n \n beta = np.asarray(beta)\n beta_list = [beta]\n while True:\n beta = _de_casteljau1(beta, t)\n beta_list.append(beta)\n if len(bet" }, { "id": 110059, "commit_id": "73622a0173916bfcb4cb7b9b393929be025e18c9", "repo": "matplotlib", "path": "lib/matplotlib/patches.py", "file_name": "patches.py", "fun_name": "transmute", "commit_message": "Simplify some patches path definitions.\n\n- When a Path ends with a CLOSEPOLY, it is not necessary to put a LINETO\n to the closing position before it (in fact that can result in an\n incorrect line join at that position), and the xy position associated\n with the CLOSEPOLY can just be (0, 0), as it is irrelevant.\n- For defining the codes arrays, for short paths (such as the patch\n shapes here), one can just use list unpacking for shorter definitions.\n- Rename the _path and _fillable lists in ArrowStyle to plural names.\n- Rely on the default tolerance of\n split_bezier_intersecting_with_closedpath (which is 0.01) rather than\n re-specifying the same magic value everywhere.\n- Remove inapplicable comment re: make_compound_path_from_polys (which\n only applies to polygons all of with the same number of sides, which\n is not the case when clipping to a bbox).", "code": "def transmute(self, path, mutation_size, linewidth):\n\n if self._beginarrow_head or self._endarrow_head:\n head_length = self.head_length * mutation_size\n head_width = self.head_width * mutation_size\n head_dist = np.hypot(head_length, head_width)\n cos_t, sin_t = head_length / head_dist, head_width / head_dist\n\n scaleA = mutation_size if self.scaleA is None else self.scaleA\n scaleB = mutation_size if self.scaleB is None else self.scaleB\n\n # begin arrow\n x0, y0 = path.vertices[0]\n x1, y1 = path.vertices[1]\n\n # If there is no room for an arrow and a line, then skip the arrow\n has_begin_arrow = self._beginarrow_head and (x0, y0) != (x1, y1)\n verticesA, codesA, ddxA, ddyA = (\n self._get_arrow_wedge(x1, y1, x0, y0,\n head_dist, cos_t, sin_t, linewidth)\n if has_begin_arrow\n else ([], [], 0, 0)\n )\n\n # end arrow\n x2, y2 = path.vertices[-2]\n x3, y3 = path.vertices[-1]\n\n # If there is no room for an arrow and a line, then skip the arrow\n has_end_arrow = self._endarrow_head and (x2, y2) != (x3, y3)\n verticesB, codesB, ddxB, ddyB = (\n self._get_arrow_wedge(x2, y2, x3, y3,\n head_dist, cos_t, sin_t, linewidth)\n if has_end_arrow\n else ([], [], 0, 0)\n )\n\n # This simple code will not work if ddx, ddy is greater than the\n # separation between vertices.\n paths = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],\n path.vertices[1:-1],\n [(x3 + ddxB, y3 + ddyB)]]),\n path.codes)]\n fills = [False]\n\n if has_begin_arrow:\n if self.fillbegin:\n paths.append(\n Path([*verticesA, (0, 0)], [*codesA, Path.CLOSEPOLY]))\n fills.append(True)\n else:\n paths.append(Path(verticesA, codesA))\n fills.append(False)\n elif self._beginarrow_bracket:\n x0, y0 = path.vertices[0]\n x1, y1 = path.vertices[1]\n verticesA, codesA = self._get_bracket(x0, y0, x1, y1,\n self.widthA * scaleA,\n self.lengthA * scaleA,\n self.angleA)\n\n paths.append(Path(verticesA, codesA))\n fills.append(False)\n\n if has_end_arrow:\n if self.fillend:\n fills.append(True)\n paths.append(\n Path([*verticesB, (0, 0)], [*codesB, Path.CLOSEPOLY]))\n else:\n fills.append(False)\n paths.append(Path(verticesB, codesB))\n elif self._endarrow_bracket:\n x0, y0 = path.vertices[-1]\n x1, y1 = path.vertices[-2]\n verticesB, codesB = self._get_bracket(x0, y0, x1, y1,\n self.widthB * scaleB,\n self.lengthB * scaleB,\n self.angleB)\n\n paths.append(Path(verticesB, codesB))\n fills.append(False)\n\n return paths, fills\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1632, "n_words": 290, "vocab_size": 150, "complexity": 15, "nloc": 66, "token_counts": 575, "n_ast_nodes": 842, "n_identifiers": 54, "random_cut": "def transmute(self, path, mutation_size, linewidth):\n\n if self._beginarrow_head or self._endarrow_head:\n head_length = self.head_length * mutation_size\n head_width = self.head_width * mutation_size\n head_dist = np.hypot(head_length, head_width)\n cos_t, sin_t = head_length / head_dist, head_width / head_dist\n\n scaleA = mutation_size if self.scaleA is None else self.scaleA\n scaleB = mutation_size if self.scaleB is None else self.scaleB\n\n # begin arrow\n x0, y0 = path.vertices[" }, { "id": 150519, "commit_id": "6f5478cc029bc146e3980affa61dd7956c5cb416", "repo": "freqtrade", "path": "freqtrade/rpc/replicate/channel.py", "file_name": "channel.py", "fun_name": "send", "commit_message": "DataFrame transmission, strategy follower logic", "code": "async def send(self, data):\n \n # logger.info(f\"Serialized Send - {self._wrapped_ws._serialize(data)}\")\n await self._wrapped_ws.send(data)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 32, "n_identifiers": 4, "random_cut": "async def send(self, data):\n \n # logger.info(f\"Serialized Send - {self._wrapped_ws._serialize(" }, { "id": 209485, "commit_id": "e6eaa484b8fa3d10051e82f5a784fe8dedbd5592", "repo": "scapy", "path": "scapy/contrib/automotive/scanner/executor.py", "file_name": "executor.py", "fun_name": "scan", "commit_message": "Add assert to GMLAN Scanner to enforce fast fail on to many open TestSockets\n\nFix bugs in TestSocket\nFix bugs in the AutomotiveScanner execution_time handling\nSimplify test code for UDS_Scanner and reuse ObjectPipes to avoid mass\ncreation", "code": "def scan(self, timeout=None):\n # type: (Optional[int]) -> None\n \n kill_time = time.time() + (timeout or 0xffffffff)\n log_interactive.debug(\"[i] Set kill_time to %s\" % time.ctime(kill_time))\n while kill_time > time.time():\n test_case_executed = False\n log_interactive.debug(\"[i] Scan paths %s\", self.state_paths)\n for p, test_case in product(\n self.state_paths, self.configuration.test_cases):\n log_interactive.info(\"[i] Scan path %s\", p)\n terminate = kill_time <= time.time()\n if terminate:\n log_interactive.debug(\n \"[-] Execution time exceeded. Terminating scan!\")\n break\n\n final_state = p[-1]\n if test_case.has_completed(final_state):\n log_interactive.debug(\"[+] State %s for %s completed\",\n repr(final_state), test_case)\n continue\n\n try:\n if not self.enter_state_path(p):\n log_interactive.error(\n \"[-] Error entering path %s\", p)\n continue\n log_interactive.info(\n \"[i] Execute %s for path %s\", str(test_case), p)\n self.execute_test_case(test_case, kill_time)\n test_case_executed = True\n except (OSError, ValueError, Scapy_Exception) as e:\n log_interactive.critical(\"[-] Exception: %s\", e)\n if self.configuration.debug:\n raise e\n if isinstance(e, OSError):\n log_interactive.critical(\n \"[-] OSError occurred, closing socket\")\n self.socket.close()\n if cast(SuperSocket, self.socket).closed and \\\n self.reconnect_handler is None:\n log_interactive.critical(\n \"Socket went down. Need to leave scan\")\n raise e\n finally:\n self.cleanup_state()\n\n if not test_case_executed:\n log_interactive.info(\n \"[i] Execute failure or scan completed. Exit scan!\")\n break\n\n self.cleanup_state()\n self.reset_target()\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1044, "n_words": 160, "vocab_size": 116, "complexity": 14, "nloc": 49, "token_counts": 280, "n_ast_nodes": 468, "n_identifiers": 38, "random_cut": "def scan(self, timeout=None):\n # type: (Optional[int]) -> None\n \n kill_time = time.time() + (timeout or 0xffffffff)\n log_interactive.debug(\"[i] Set kill_time to %s\" % time.ctime(kill_time))\n while kill_time > time.time():\n test_case_executed = False\n log_interactive.debug(\"[i] Scan paths %s\", self.state_paths)\n for p, test_case in product(\n self.state_paths, self.configuration.test_cases):\n log_interactive.info(\"[i] Scan path %s\", p)\n terminate = kill_time <= time.time()\n if terminate:\n log_interactive.debug(\n \"[-] Execution time exceeded. Terminating scan!\")\n break\n\n final_state = p[-1]\n if test_case.has_completed(final_state):\n log_interactive.debug(\"[+] State %s for %s completed\",\n repr(final_state), test_case)\n continue\n\n try:\n if not self.enter_state_path(p):\n log_interactive.error(\n \"[-] Error entering path %s\", p)\n continue\n log_interactive.info(\n \"[i] Execute %s for path %s\", str(test_case), p)\n self.execute_test_case(test_case, kill_time)\n test_case_executed = True\n except (OSError, ValueError, Scapy_Exception) as e:\n log_interactive.critical(\"[-] Exception: %s\", e)\n " }, { "id": 246682, "commit_id": "02d708568b476f2f7716000b35c0adfa4cbd31b3", "repo": "synapse", "path": "tests/api/test_auth.py", "file_name": "test_auth.py", "fun_name": "test_get_user_by_req_appservice_valid_token", "commit_message": "Replace assertEquals and friends with non-deprecated versions. (#12092)", "code": "def test_get_user_by_req_appservice_valid_token(self):\n app_service = Mock(\n token=\"foobar\", url=\"a_url\", sender=self.test_user, ip_range_whitelist=None\n )\n self.store.get_app_service_by_token = Mock(return_value=app_service)\n self.store.get_user_by_access_token = simple_async_mock(None)\n\n request = Mock(args={})\n request.getClientIP.return_value = \"127.0.0.1\"\n request.args[b\"access_token\"] = [self.test_token]\n request.requestHeaders.getRawHeaders = mock_getRawHeaders()\n requester = self.get_success(self.auth.get_user_by_req(request))\n self.assertEqual(requester.user.to_string(), self.test_user)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 113, "n_words": 33, "vocab_size": 26, "complexity": 1, "nloc": 12, "token_counts": 118, "n_ast_nodes": 191, "n_identifiers": 28, "random_cut": "def test_get_user_by_req_appservice_valid_token(self):\n app_service = Mock(\n token=\"foobar\", url=\"a_url\", sender=self.test_user, ip_range_whitelist=None\n )\n se" }, { "id": 141215, "commit_id": "78688a0903d01421b000eb37d11607571dd80dfa", "repo": "ray", "path": "python/ray/air/config.py", "file_name": "config.py", "fun_name": "fill_defaults", "commit_message": "Enable streaming ingest in AIR (#25428)\n\nThis adds the following options to DatasetConfig, which can be used to enable streaming ingest.\r\n\r\n```\r\n # Whether the dataset should be streamed into memory using pipelined reads.\r\n # When enabled, get_dataset_shard() returns DatasetPipeline instead of Dataset.\r\n # The amount of memory to use is controlled by `stream_window_size`.\r\n # False by default for all datasets.\r\n use_stream_api: Optional[bool] = None\r\n\r\n # Configure the streaming window size in bytes. A typical value is something like\r\n # 20% of object store memory. If set to -1, then an infinite window size will be\r\n # used (similar to bulk ingest). This only has an effect if use_stream_api is set.\r\n # Set to 1.0 GiB by default.\r\n stream_window_size: Optional[float] = None\r\n\r\n # Whether to enable global shuffle (per pipeline window in streaming mode). Note\r\n # that this is an expensive all-to-all operation, and most likely you want to use\r\n # local shuffle instead.\r\n # False by default for all datasets.\r\n global_shuffle: Optional[bool] = None\r\n```", "code": "def fill_defaults(self) -> \"DatasetConfig\":\n \n return DatasetConfig(\n fit=self.fit or False,\n split=self.split or False,\n required=self.required or False,\n use_stream_api=self.use_stream_api or False,\n stream_window_size=self.stream_window_size\n if self.stream_window_size is not None\n else 1024 * 1024 * 1024,\n global_shuffle=self.global_shuffle or False,\n transform=self.transform if self.transform is not None else True,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 162, "n_words": 42, "vocab_size": 27, "complexity": 8, "nloc": 13, "token_counts": 86, "n_ast_nodes": 126, "n_identifiers": 10, "random_cut": "def fill_defaults(self) -> \"DatasetConfig\":\n \n return DatasetConfig(\n fit=self.fit or False,\n split=self.split or False,\n required=self.required or False,\n use_stream_api=self.use_stream_api or False,\n stream_window_size=self.stream_window_size\n if self.stream_window_size is not None\n " }, { "id": 281655, "commit_id": "9e671aeba98dacc69ecbbfec1f087aca3b139ee7", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/stocks_controller.py", "file_name": "stocks_controller.py", "fun_name": "call_sia", "commit_message": "Remember Contexts (#1187)\n\n* Refacotred classes\r\n\r\n* Handling for new instance desired\r\n\r\n* Added feature flag\r\n\r\n* Converted all menu calls", "code": "def call_sia(self, _):\n \n from gamestonk_terminal.stocks.sector_industry_analysis.sia_controller import (\n SectorIndustryAnalysisController,\n )\n\n self.queue = self.load_class(\n SectorIndustryAnalysisController, self.ticker, self.queue\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 73, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 39, "n_ast_nodes": 57, "n_identifiers": 11, "random_cut": "def call_sia(self, _):\n \n from game" }, { "id": 179333, "commit_id": "cc0cff893f9d7d472788adc2510c123967b384fe", "repo": "gradio", "path": "test/test_inputs.py", "file_name": "test_inputs.py", "fun_name": "test_in_interface", "commit_message": "Format The Codebase\n- black formatting\n- isort formatting", "code": "def test_in_interface(self):\n iface = gr.Interface(lambda x: x[::-1], \"textbox\", \"textbox\")\n self.assertEqual(iface.process([\"Hello\"])[0], [\"olleH\"])\n iface = gr.Interface(\n lambda sentence: max([len(word) for word in sentence.split()]),\n gr.inputs.Textbox(),\n gr.outputs.Textbox(),\n interpretation=\"default\",\n )\n scores, alternative_outputs = iface.interpret(\n [\"Return the length of the longest word in this sentence\"]\n )\n self.assertEqual(\n scores,\n [\n [\n (\"Return\", 0.0),\n (\" \", 0),\n (\"the\", 0.0),\n (\" \", 0),\n (\"length\", 0.0),\n (\" \", 0),\n (\"of\", 0.0),\n (\" \", 0),\n (\"the\", 0.0),\n (\" \", 0),\n (\"longest\", 0.0),\n (\" \", 0),\n (\"word\", 0.0),\n (\" \", 0),\n (\"in\", 0.0),\n (\" \", 0),\n (\"this\", 0.0),\n (\" \", 0),\n (\"sentence\", 1.0),\n (\" \", 0),\n ]\n ],\n )\n self.assertEqual(\n alternative_outputs,\n [[[\"8\"], [\"8\"], [\"8\"], [\"8\"], [\"8\"], [\"8\"], [\"8\"], [\"8\"], [\"8\"], [\"7\"]]],\n )\n\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 699, "n_words": 110, "vocab_size": 55, "complexity": 2, "nloc": 43, "token_counts": 308, "n_ast_nodes": 459, "n_identifiers": 20, "random_cut": "def test_in_interface(self):\n iface = gr.Interface(lambda x: x[::-1], \"textbox\", \"textbox\")\n self.assertEqual(iface.process([\"Hello\"])[0], [\"olleH\"])\n iface = gr.Interface(\n lambda sentence: max([len(word) for word in sentence.split()]),\n gr.inputs.Textbox(),\n gr.outputs.Textbox(),\n interpretation=\"default\",\n )\n scores, alternative_outputs = iface.interpret(\n [\"Return the length of the longest word in this sentence\"]\n )\n self.assertEqual(\n scores,\n [\n [\n (\"Return\", 0.0),\n (\" \", 0),\n (\"the\", 0.0),\n (\" \", 0),\n (\"length\", 0.0),\n (\" \", 0),\n (\"of\", 0.0),\n (\" \", 0),\n (\"the\", 0.0),\n (\" \", 0),\n (\"longest\", 0.0),\n (\" \", 0),\n (\"word\", 0.0),\n (\" \", 0),\n (\"in\", 0.0),\n (\" \", 0),\n (\"this\", 0.0),\n (\" \", 0),\n (\"sentence\", 1.0),\n (\" \", 0),\n ]\n ],\n )\n self.assertEqual(\n alternative_outputs,\n [[[\"8\"], [\"8\"], [\"8\"], [\"8\"], [\"8\"], [\"8\"" }, { "id": 74903, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/views/chooser.py", "file_name": "chooser.py", "fun_name": "render_to_response", "commit_message": "Reformat with black", "code": "def render_to_response(self):\n return render_modal_workflow(\n self.request,\n \"wagtaildocs/chooser/chooser.html\",\n None,\n self.get_context_data(),\n json_data={\n \"step\": \"chooser\",\n \"error_label\": _(\"Server Error\"),\n \"error_message\": _(\n \"Report this error to your website administrator with the following information:\"\n ),\n \"tag_autocomplete_url\": reverse(\"wagtailadmin_tag_autocomplete\"),\n },\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 205, "n_words": 32, "vocab_size": 32, "complexity": 1, "nloc": 15, "token_counts": 53, "n_ast_nodes": 94, "n_identifiers": 8, "random_cut": "def render_to_response(self):\n return render_modal_workflow(\n self.request,\n \"wagtaildocs/chooser/chooser.html\",\n None,\n self.get_context_data(),\n json_data={\n \"step\": \"chooser\",\n \"error_label\": _(\"Server Error\"),\n \"error_messag" }, { "id": 67489, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/setup/doctype/item_group/item_group.py", "file_name": "item_group.py", "fun_name": "get_child_item_groups", "commit_message": "style: format code with black", "code": "def get_child_item_groups(item_group_name):\n\titem_group = frappe.get_cached_value(\"Item Group\", item_group_name, [\"lft\", \"rgt\"], as_dict=1)\n\n\tchild_item_groups = [\n\t\td.name\n\t\tfor d in frappe.get_all(\n\t\t\t\"Item Group\", filters={\"lft\": (\">=\", item_group.lft), \"rgt\": (\"<=\", item_group.rgt)}\n\t\t)\n\t]\n\n\treturn child_item_groups or {}\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 23, "n_words": 32, "vocab_size": 29, "complexity": 3, "nloc": 9, "token_counts": 70, "n_ast_nodes": 116, "n_identifiers": 13, "random_cut": "def get_child_item_groups(item_group_name):\n\titem_group = frappe.get_cached_value(\"Item Group\", item_group_name, [\"lft\", \"rgt\"], as_dict=1)\n\n\tchild_item_groups = [\n\t\td.name\n\t\tfor d in frappe.get_all(\n\t\t\t\"Item Group\", filters={\"lft\": (\">=\", item_group.lft), \"rgt\": (\"<=\", item_group.rgt)}\n\t\t)\n\t]\n\n\treturn child_item_groups " }, { "id": 113432, "commit_id": "9e2a069d0f38da64d4c945b2c951fa64d19b9d94", "repo": "nni", "path": "nni/nas/oneshot/pytorch/supermodule/base.py", "file_name": "base.py", "fun_name": "_save_module_to_state_dict", "commit_message": "One-shot sub state dict implementation (#5054)", "code": "def _save_module_to_state_dict(self, destination, prefix, keep_vars):\n \n for name, module in self._modules.items():\n if module is not None:\n sub_state_dict(module, destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars)\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 63, "n_words": 23, "vocab_size": 21, "complexity": 3, "nloc": 4, "token_counts": 51, "n_ast_nodes": 78, "n_identifiers": 10, "random_cut": "def _save_module_to_state_dict(self, destination, prefix, keep_vars):\n \n for name, module in self._modules.items():\n if module is not None:\n sub_state_dict(module, destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars)\n" }, { "id": 43727, "commit_id": "88ea1575079c0e94e1f62df38d6d592b8c827bbd", "repo": "airflow", "path": "tests/providers/amazon/aws/sensors/test_redshift_cluster.py", "file_name": "test_redshift_cluster.py", "fun_name": "test_poke_cluster_not_found", "commit_message": "Standardize AWS Redshift naming (#20374)\n\n* Standardize AWS Redshift naming", "code": "def test_poke_cluster_not_found(self):\n self._create_cluster()\n op = RedshiftClusterSensor(\n task_id='test_cluster_sensor',\n poke_interval=1,\n timeout=5,\n aws_conn_id='aws_default',\n cluster_identifier='test_cluster_not_found',\n target_status='cluster_not_found',\n )\n\n assert op.poke({})\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 108, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 11, "token_counts": 47, "n_ast_nodes": 78, "n_identifiers": 12, "random_cut": "def test_poke_cluster_not_found(self):\n self._create_cluster()\n op = RedshiftClusterSensor(\n task_id='test_cluster_sensor',\n poke_interval=1,\n timeout=5,\n aws_conn_id='aws_default'," }, { "id": 223352, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/test_upload.py", "file_name": "test_upload.py", "fun_name": "_urlopen", "commit_message": "add python 3.10.4 for windows", "code": "def _urlopen(self, url):\n self.last_open = FakeOpen(url, msg=self.next_msg, code=self.next_code)\n return self.last_open\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 23, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 46, "n_identifiers": 9, "random_cut": "def _urlopen(self, url):\n self.last_open = Fak" }, { "id": 97398, "commit_id": "266dbc5a8172612679c6549a9ca240cfba9dab3d", "repo": "sentry", "path": "tests/acceptance/page_objects/issue_list.py", "file_name": "issue_list.py", "fun_name": "delete_issues", "commit_message": "feat(ui): Remove issues from issue stream (#31910)\n\n* Revert \"Revert \"feat(ui): Remove issues from issue stream\" (#31908)\"\r\n\r\nThis reverts commit 4bd69628c4ec1b5a4cd980a4fa8cd1291072d47f.\r\n\r\n* fix(acceptance): Fix delete_issues test (#31909)\r\n\r\n* add checks for issue-list-removal-action flag and check if realtimeActive to use feature\r\n\r\n* update tests\r\n\r\nCo-authored-by: Vu Luong ", "code": "def delete_issues(self):\n self.browser.click('[aria-label=\"More issue actions\"]')\n self.browser.wait_until('[data-test-id=\"delete\"]')\n self.browser.click('[data-test-id=\"delete\"]')\n self.browser.click('[data-test-id=\"confirm-button\"]')\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 37, "n_ast_nodes": 69, "n_identifiers": 5, "random_cut": "def delete_issues(self):\n self.browse" }, { "id": 117328, "commit_id": "9ce5a21dd6359fd7e8ebf78051ce9e97bd195ec9", "repo": "mindsdb", "path": "mindsdb/api/mysql/mysql_proxy/data_types/mysql_datum.py", "file_name": "mysql_datum.py", "fun_name": "setFromBuff", "commit_message": "ML handler supbrocess (#3377)\n\n* log -> logger\r\n\r\ndividing components:\r\n\r\napp initialize\r\n parse args\r\n set env.MINDSDB_CONFIG_PATH\r\n\r\nconfig\r\n requiers env.MINDSDB_CONFIG_PATH\r\n sets\r\n env.MINDSDB_DB_CON\r\n Config() - makes initialization\r\n\r\nlog\r\n uses config\r\n initialize_log - makes initialization\r\n\r\ndatabase\r\n uses env.MINDSDB_DB_CON\r\n have init() method\r\n\r\nfile storage\r\n uses config\r\n\r\n* partial sync for model storage interfaces\r\n\r\n* ml handler in subprocess interface\r\n\r\n* fix delete model\r\n\r\n* fix: model with error in success status\r\n\r\n* fix: remove hf predictor\r\n\r\n* fix pg handler\r\n\r\n* MLHandlerPersistWrapper keeps wrapper process opened\r\n\r\n* predictor with error keeps 'success' status\r\n\r\n#3362\r\n\r\n* lock for sending tasks to subprocess one by one\r\n\r\n* check target of predictor before run learn in subproccess\r\n\r\n* fix check target\r\n\r\n* fix: json_ai override and problem definition generation\r\n\r\n* fix None case\r\n\r\n* folder for ml handler tests\r\n\r\n* fix: add timeseries_settings key to learn_args\r\n\r\n* fixes in lw_handler\r\n\r\n* fix: del join_learn_process\r\n\r\n* tests for LW handler\r\n\r\n* finish unit test for LW\r\n\r\n* changes in tests:\r\n- set-project -> to base class\r\n- return of ml handler is dataframe\r\n- draft for project structure test\r\n\r\n* merge from staging\r\n\r\n* create_validation method to check learn params before send to subprocess\r\nfixes of HF\r\nfixed version of transformers in HF requirements\r\n\r\nCo-authored-by: Patricio Cerda Mardini ", "code": "def setFromBuff(self, buff):\n start = 0\n\n if self.var_len == 'lenenc':\n start = 1\n ln_enc = buff[0]\n if int(ln_enc) <= ONE_BYTE_ENC[0]:\n start = 0\n end = 1\n elif int(ln_enc) == TWO_BYTE_ENC[0]:\n end = 3\n elif int(ln_enc) == THREE_BYTE_ENC[0]:\n end = 4\n elif ln_enc:\n end = 9\n\n num_str = buff[start:end]\n if end > 9:\n logger.error('Cant decode integer greater than 8 bytes')\n return buff[end - 1:]\n\n for j in range(8 - (end - start)):\n num_str += b'\\0'\n\n if self.var_type == 'int':\n self.value = struct.unpack('i', num_str)\n return buff[end:]\n\n if self.var_type in ['byte', 'string']:\n length = struct.unpack('Q', num_str)[0]\n self.value = buff[end:(length + end)]\n return buff[(length + end):]\n\n if self.var_len == 'EOF':\n length = len(buff)\n self.var_len = str(length)\n self.value = buff\n return ''\n else:\n length = self.var_len\n\n if self.type == 'string':\n for j, x in enumerate(buff):\n if int(x) == 0:\n length = j + 1\n break\n\n length = int(length)\n if self.var_type in ['byte', 'string']:\n end = length\n self.value = buff[:end]\n else: # if its an integer\n end = length\n num_str = buff[:end]\n if end > 8:\n logger.error('cant decode integer greater than 8 bytes')\n return buff[end:]\n for j in range(8 - end):\n num_str += b'\\0'\n self.value = struct.unpack('Q', num_str)[0]\n if str(self.var_len) == 'NUL':\n self.value = self.value[:-1]\n return buff[end:]\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 839, "n_words": 201, "vocab_size": 88, "complexity": 18, "nloc": 55, "token_counts": 376, "n_ast_nodes": 618, "n_identifiers": 26, "random_cut": "def setFromBuff(self, buff):\n start = 0\n\n if self.var_len == 'lenenc':\n start = 1\n ln_enc = buff[0]\n if int(ln_enc) <= ONE_BYTE_ENC[0]:\n start = 0\n end = 1\n elif int(ln_enc) == TWO_BYTE_ENC[0]:\n end = 3\n elif int(ln_enc) == THREE_BYTE_ENC[0]:\n end = 4\n elif ln_enc:\n end = 9\n\n num_str = buff[start:end]\n if end > 9:\n logger.error('Cant decode integer greater than 8 bytes')\n return buff[end - 1:]\n\n for j in range(8 - (end - start)):\n num_str += b'\\0'\n\n if self.var_type == 'int':\n self.value = struct.unpack('i', num_str)\n return buff[end:]\n\n if self.var_type in ['byte', 'string']:\n length = struct.unpack('Q', num_str)[0]\n self.value = buff[end:(length + end)]\n return buff[(length + end):]\n\n if self.var_len == 'EOF':\n length = len(buff)\n self.var_len = str(length)\n self.value = buff\n return ''\n else:\n length = self.var_len\n\n if self.type == 'string':\n for j, x in enumerate(buff):\n if int(x) == 0:\n length = j + 1\n break\n\n length = int(length)\n if self.var_type in ['byte', 'string']:\n end = length\n " }, { "id": 72951, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/api/v2/views.py", "file_name": "views.py", "fun_name": "get_field_serializer_overrides", "commit_message": "Reformat with black", "code": "def get_field_serializer_overrides(cls, model):\n return {\n field.name: field.serializer\n for field in cls.get_body_fields(model) + cls.get_meta_fields(model)\n if field.serializer is not None\n }\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 19, "vocab_size": 18, "complexity": 3, "nloc": 6, "token_counts": 40, "n_ast_nodes": 60, "n_identifiers": 8, "random_cut": "def get_field_serializer_overrides(cls, model):\n return {\n field.name: field.serializer\n for " }, { "id": 134174, "commit_id": "305905f469b0c8c351670b276b54a0d3d77bc242", "repo": "ray", "path": "rllib/algorithms/impala/impala.py", "file_name": "impala.py", "fun_name": "training_step", "commit_message": "[RLlib] IMPALA: Move learner thread health-check into better place. (#29541)", "code": "def training_step(self) -> ResultDict:\n # First, check, whether our learner thread is still healthy.\n if not self._learner_thread.is_alive():\n raise RuntimeError(\"The learner thread died while training!\")\n\n # Get references to sampled SampleBatches from our workers.\n unprocessed_sample_batches_refs = self.get_samples_from_workers()\n # Tag workers that actually produced ready sample batches this iteration.\n # Those workers will have to get updated at the end of the iteration.\n self.workers_that_need_updates |= unprocessed_sample_batches_refs.keys()\n\n # Send the collected batches (still object refs) to our aggregation workers.\n if self.config[\"num_aggregation_workers\"] > 0:\n batches = self.process_experiences_tree_aggregation(\n unprocessed_sample_batches_refs\n )\n # Resolve collected batches here on local process (using the mixin buffer).\n else:\n batches = self.process_experiences_directly(unprocessed_sample_batches_refs)\n\n # Increase sampling counters now that we have the actual SampleBatches on\n # the local process (and can measure their sizes).\n for batch in batches:\n self._counters[NUM_ENV_STEPS_SAMPLED] += batch.count\n self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps()\n\n # Concatenate single batches into batches of size `train_batch_size`.\n self.concatenate_batches_and_pre_queue(batches)\n # Move train batches (of size `train_batch_size`) onto learner queue.\n self.place_processed_samples_on_learner_queue()\n # Extract most recent train results from learner thread.\n train_results = self.process_trained_results()\n\n # Sync worker weights.\n with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:\n self.update_workers_if_necessary()\n\n return train_results\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 426, "n_words": 174, "vocab_size": 122, "complexity": 4, "nloc": 20, "token_counts": 126, "n_ast_nodes": 223, "n_identifiers": 27, "random_cut": "def training_step(self) -> ResultDict:\n # First, check, whether our learner thread is still healthy.\n if not self._learner_thread.is_alive():\n raise RuntimeError(\"The learner thread died while training!\")\n\n # Get references to sampled SampleBatches from our workers.\n unprocessed_sample_batches_refs = self.get_samples_from_workers()\n # Tag workers that actually produced ready sample batches this iteration.\n # Those workers will have to get updated at the end of the iteration.\n self.workers_that_need_updates |= unprocessed_sample_batches_refs.keys()\n\n # Send the collected batches (still object refs) to our aggregation workers.\n if self.config[\"num_aggregation_workers\"] > 0:\n batches = self.process_experiences_tree_aggregation(\n unprocessed_sample_batches_refs\n )\n # Re" }, { "id": 84347, "commit_id": "52be020d0cbc5b8042ea89d7653c3b916badd433", "repo": "zulip", "path": "zerver/tests/test_custom_profile_data.py", "file_name": "test_custom_profile_data.py", "fun_name": "test_delete_internals", "commit_message": "custom_profile: Apply ProfileDataElementUpdateDict.\n\nWe explicitly annotate variables or parameters with\n`ProfileDataElementUpdateDict` as necessary.\n\nSigned-off-by: Zixuan James Li ", "code": "def test_delete_internals(self) -> None:\n user_profile = self.example_user(\"iago\")\n realm = user_profile.realm\n field = CustomProfileField.objects.get(name=\"Phone number\", realm=realm)\n data: List[ProfileDataElementUpdateDict] = [\n {\"id\": field.id, \"value\": \"123456\"},\n ]\n do_update_user_custom_profile_data_if_changed(user_profile, data)\n\n self.assertTrue(self.custom_field_exists_in_realm(field.id))\n self.assertEqual(user_profile.customprofilefieldvalue_set.count(), self.original_count)\n\n do_remove_realm_custom_profile_field(realm, field)\n\n self.assertFalse(self.custom_field_exists_in_realm(field.id))\n self.assertEqual(user_profile.customprofilefieldvalue_set.count(), self.original_count - 1)\n\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 123, "n_words": 36, "vocab_size": 32, "complexity": 1, "nloc": 13, "token_counts": 129, "n_ast_nodes": 210, "n_identifiers": 23, "random_cut": "def test_delete_internals(self) -> None:\n user_profile = self.example_user(\"iago\")\n realm = user_profile.realm\n field = CustomProfileField.objects.get(name=\"Phone number\", realm=realm)\n " }, { "id": 295432, "commit_id": "17403f930f625dd70bbd8ab44565bbc467db886a", "repo": "core", "path": "tests/components/media_player/test_async_helpers.py", "file_name": "test_async_helpers.py", "fun_name": "supported_features", "commit_message": "Add EntityFeature enum to Media Player (#69119)", "code": "def supported_features(self):\n \n return (\n mp.const.MediaPlayerEntityFeature.VOLUME_SET\n | mp.const.MediaPlayerEntityFeature.VOLUME_STEP\n | mp.const.MediaPlayerEntityFeature.PLAY\n | mp.const.MediaPlayerEntityFeature.PAUSE\n | mp.const.MediaPlayerEntityFeature.TURN_OFF\n | mp.const.MediaPlayerEntityFeature.TURN_ON\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 103, "n_words": 16, "vocab_size": 12, "complexity": 1, "nloc": 9, "token_counts": 56, "n_ast_nodes": 88, "n_identifiers": 11, "random_cut": "def supported_features(self):\n \n return (\n mp.const.MediaPlayerEntityFeature.VOLUME_SET\n | mp.const.MediaPlayerEntityFeature.VOLUME_STEP\n | mp.const.MediaPlayerEntityFeatu" }, { "id": 318060, "commit_id": "2b1e1365fdb3bfe72feb515fcf2e02331caa4088", "repo": "core", "path": "homeassistant/components/device_tracker/legacy.py", "file_name": "legacy.py", "fun_name": "async_update", "commit_message": "Add StrEnum for device_tracker `SourceType` (#75892)\n\nAdd StrEnum for device_tracker SourceType", "code": "async def async_update(self) -> None:\n \n if not self.last_seen:\n return\n if self.location_name:\n self._state = self.location_name\n elif self.gps is not None and self.source_type == SourceType.GPS:\n zone_state = zone.async_active_zone(\n self.hass, self.gps[0], self.gps[1], self.gps_accuracy\n )\n if zone_state is None:\n self._state = STATE_NOT_HOME\n elif zone_state.entity_id == zone.ENTITY_ID_HOME:\n self._state = STATE_HOME\n else:\n self._state = zone_state.name\n elif self.stale():\n self.mark_stale()\n else:\n self._state = STATE_HOME\n self.last_update_home = True\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 271, "n_words": 59, "vocab_size": 38, "complexity": 8, "nloc": 24, "token_counts": 128, "n_ast_nodes": 205, "n_identifiers": 22, "random_cut": "async def async_update(self) -> None:\n \n if not self.last_seen:\n return\n if self.location_name:\n self._state = self.location_name\n elif self.gps is not None and self.source_type == SourceType.GPS:\n zone_state = zone.async_active_zone(\n self.hass, self.gps[0], self.gps[1], self.gps_accuracy\n )\n if zone_state is None:\n self._state = STATE_NOT_HOME\n elif zone_state.entity_id == zone.ENTITY_ID_HOME:\n self._state = STATE_HOME\n else:\n self._state = zone" }, { "id": 195518, "commit_id": "0f15b897302e7a4a257e6b2edf3c1b811c95081c", "repo": "ParlAI", "path": "projects/image_chat/interactive.py", "file_name": "interactive.py", "fun_name": "interactive_running", "commit_message": "[image_chat] remove decode methods for handling HTTP requests, add torchvision requirements (#4867)\n\n* [image_chat] Support both bytes and string\r\n\r\n* update cache key\r\n\r\n* Update config.yml\r\n\r\nCo-authored-by: Kurt Shuster ", "code": "def interactive_running(self, data):\n \n reply = {}\n if type(data[\"personality\"][0]) is bytes:\n reply[\"text\"] = data[\"personality\"][0].decode(\"utf-8\")\n else:\n reply[\"text\"] = data[\"personality\"][0]\n if type(data[\"text\"][0]) is bytes:\n text = data[\"text\"][0].decode(\"utf-8\")\n else:\n text = data[\"text\"][0]\n if text:\n reply[\"text\"] = \"\\n\".join(SHARED[\"dialog_history\"] + [text, reply[\"text\"]])\n SHARED[\"dialog_history\"].append(text)\n if SHARED[\"image_feats\"] is None:\n if type(data[\"image\"][0]) is bytes:\n img_data = data[\"image\"][0].decode(\"utf-8\")\n _, encoded = img_data.split(\",\", 1)\n encoded = encoded[2:-1]\n else:\n img_data = data[\"image\"][0]\n _, encoded = img_data.split(\",\", 1)\n image = Image.open(io.BytesIO(b64decode(encoded))).convert(\"RGB\")\n SHARED[\"image_feats\"] = SHARED[\"image_loader\"].extract(image)\n\n reply[\"image\"] = SHARED[\"image_feats\"]\n SHARED[\"agent\"].observe(reply)\n model_res = SHARED[\"agent\"].act()\n return model_res\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 349, "n_words": 80, "vocab_size": 43, "complexity": 6, "nloc": 27, "token_counts": 276, "n_ast_nodes": 481, "n_identifiers": 26, "random_cut": "def interactive_running(self, data):\n \n reply = {}\n if type(data[\"personality\"][0]) is bytes:\n reply[\"text\"] = data[\"personality\"][0].decode(\"utf-8\")\n else:\n reply[\"text\"] = data[\"personality\"][0]\n if type(data[\"text\"][0]) is bytes:\n text = data[\"text\"][0].decode(\"utf-8\")\n else:\n text = data[\"text\"][0]\n if text:\n reply[\"text\"] " }, { "id": 149834, "commit_id": "1fae6c9ef794a014c3e8f1a692bda8b66b46b960", "repo": "freqtrade", "path": "freqtrade/freqai/data_kitchen.py", "file_name": "data_kitchen.py", "fun_name": "load_data", "commit_message": "keep model accessible in memory to avoid loading objects from disk during live/dry", "code": "def load_data(self) -> Any:\n \n\n with open(self.model_path / str(self.model_filename + \"_metadata.json\"), \"r\") as fp:\n self.data = json.load(fp)\n self.training_features_list = self.data[\"training_features_list\"]\n\n self.data_dictionary[\"train_features\"] = pd.read_pickle(\n self.model_path / str(self.model_filename + \"_trained_df.pkl\")\n )\n\n self.model_path = Path(self.data[\"model_path\"])\n self.model_filename = self.data[\"model_filename\"]\n\n # try to access model in memory instead of loading object from disk to save time\n if self.live and self.model_filename in self.model_dictionary:\n model = self.model_dictionary[self.model_filename]\n else:\n model = load(self.model_path / str(self.model_filename + \"_model.joblib\"))\n\n assert model, (\n f\"Unable to load model, ensure model exists at \"\n f\"{self.model_path} \"\n )\n\n if self.config[\"freqai\"][\"feature_parameters\"][\"principal_component_analysis\"]:\n self.pca = pk.load(\n open(self.model_path / str(self.model_filename + \"_pca_object.pkl\"), \"rb\")\n )\n\n return model\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 337, "n_words": 96, "vocab_size": 65, "complexity": 4, "nloc": 27, "token_counts": 189, "n_ast_nodes": 331, "n_identifiers": 22, "random_cut": "def load_data(self) -> Any:\n \n\n with open(self.model_path / str(self.model_filename + \"_metadata.json\"), \"r\") as fp:\n self.data = json.load(fp)\n self.training_features_list = self.data[\"training_features_list\"]\n\n self.data_dictionary[\"train_features\"] = pd.read_pickle(\n self.model_path / str(self.model_filename + \"_trained_df.pkl\")\n )\n\n self.model_path = Path(self.data[\"model_path\"])\n self.model_filename = self.data[\"model_filename\"]\n\n # try to access model in memory instead of loading object from disk to save time\n if self.live and" }, { "id": 73791, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/__init__.py", "file_name": "__init__.py", "fun_name": "__str__", "commit_message": "Reformat with black", "code": "def __str__(self):\n return \"Group %d ('%s') has permission '%s' on page %d ('%s')\" % (\n self.group.id,\n self.group,\n self.permission_type,\n self.page.id,\n self.page,\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 89, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 8, "token_counts": 34, "n_ast_nodes": 50, "n_identifiers": 6, "random_cut": "def __str__(self):\n return \"Group %d ('%s') has permission '%s' on page %d ('%s')\" % (\n self.group.id,\n self.group,\n self.permission_type,\n self.page.id,\n self.page,\n )\n\n" }, { "id": 167427, "commit_id": "734db4f1fde2566a02b3c7ff661a479b0a71633c", "repo": "pandas", "path": "pandas/io/json/_json.py", "file_name": "_json.py", "fun_name": "write", "commit_message": "TYP: Return annotations for io/{formats,json} (#47516)\n\n* TYP: Return annotations for io/{formats,json}\r\n\r\n* flake8\r\n\r\n* explicitly check whether width is None", "code": "def write(self) -> str:\n iso_dates = self.date_format == \"iso\"\n return dumps(\n self.obj_to_write,\n orient=self.orient,\n double_precision=self.double_precision,\n ensure_ascii=self.ensure_ascii,\n date_unit=self.date_unit,\n iso_dates=iso_dates,\n default_handler=self.default_handler,\n indent=self.indent,\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 128, "n_words": 20, "vocab_size": 20, "complexity": 1, "nloc": 12, "token_counts": 62, "n_ast_nodes": 90, "n_identifiers": 13, "random_cut": "def write(self) -> str:\n iso_dates = self.date_format == \"iso\"\n return dumps(\n self.obj_to_write,\n orient=self.orient,\n double_precision=self.double_precision,\n ensure_ascii=self.ensure_a" }, { "id": 77412, "commit_id": "52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_edit_page.py", "file_name": "test_edit_page.py", "fun_name": "test_edit_post_publish_scheduled_unpublished_page", "commit_message": "Replace `PageRevision` with generic `Revision` model (#8441)", "code": "def test_edit_post_publish_scheduled_unpublished_page(self):\n # Unpublish the page\n self.child_page.live = False\n self.child_page.save()\n\n go_live_at = timezone.now() + datetime.timedelta(days=1)\n expire_at = timezone.now() + datetime.timedelta(days=2)\n post_data = {\n \"title\": \"I've been edited!\",\n \"content\": \"Some content\",\n \"slug\": \"hello-world\",\n \"action-publish\": \"Publish\",\n \"go_live_at\": submittable_timestamp(go_live_at),\n \"expire_at\": submittable_timestamp(expire_at),\n }\n response = self.client.post(\n reverse(\"wagtailadmin_pages:edit\", args=(self.child_page.id,)), post_data\n )\n\n # Should be redirected to explorer page\n self.assertEqual(response.status_code, 302)\n\n child_page_new = SimplePage.objects.get(id=self.child_page.id)\n\n # The page should not be live anymore\n self.assertFalse(child_page_new.live)\n\n # Instead a revision with approved_go_live_at should now exist\n self.assertTrue(\n Revision.page_revisions.filter(object_id=child_page_new.id)\n .exclude(approved_go_live_at__isnull=True)\n .exists()\n )\n\n # The page SHOULD have the \"has_unpublished_changes\" flag set,\n # because the changes are not visible as a live page yet\n self.assertTrue(\n child_page_new.has_unpublished_changes,\n \"A page scheduled for future publishing should have has_unpublished_changes=True\",\n )\n\n self.assertEqual(child_page_new.status_string, \"scheduled\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 401, "n_words": 116, "vocab_size": 85, "complexity": 1, "nloc": 29, "token_counts": 194, "n_ast_nodes": 330, "n_identifiers": 37, "random_cut": "def test_edit_post_publish_scheduled_unpublished_page(self):\n # Unpublish the page\n self.child_page.live = False\n self.child_page.save()\n\n go_" }, { "id": 89252, "commit_id": "d07b2f35a620fd8cec5f17a7a1605024b8b5afff", "repo": "sentry", "path": "tests/sentry/api/serializers/test_organization.py", "file_name": "test_organization.py", "fun_name": "test_simple", "commit_message": "feat(discover-homepage): Default feature flag to True (#41969)\n\nThis feature has been GA'd for a while now. Defaulting the feature flag\r\nto True for self hosted releases", "code": "def test_simple(self):\n user = self.create_user()\n organization = self.create_organization(owner=user)\n\n result = serialize(organization, user)\n\n assert result[\"id\"] == str(organization.id)\n assert result[\"features\"] == {\n \"advanced-search\",\n \"change-alerts\",\n \"crash-rate-alerts\",\n \"custom-event-title\",\n \"custom-symbol-sources\",\n \"data-forwarding\",\n \"dashboards-basic\",\n \"dashboards-edit\",\n \"dashboards-top-level-filter\",\n \"discover-basic\",\n \"discover-query\",\n \"discover-query-builder-as-landing-page\",\n \"event-attachments\",\n \"integrations-alert-rule\",\n \"integrations-chat-unfurl\",\n \"integrations-deployment\",\n \"integrations-event-hooks\",\n \"integrations-incident-management\",\n \"integrations-issue-basic\",\n \"integrations-issue-sync\",\n \"integrations-ticket-rules\",\n \"invite-members\",\n \"invite-members-rate-limits\",\n \"minute-resolution-sessions\",\n \"open-membership\",\n \"relay\",\n \"shared-issues\",\n \"sso-basic\",\n \"sso-saml2\",\n \"symbol-sources\",\n \"team-insights\",\n \"discover-frontend-use-events-endpoint\",\n \"performance-frontend-use-events-endpoint\",\n \"performance-issues-ingest\",\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 470, "n_words": 55, "vocab_size": 51, "complexity": 1, "nloc": 41, "token_counts": 118, "n_ast_nodes": 219, "n_identifiers": 11, "random_cut": "def test_simple(self):\n user = self.create_user()\n organization = self.create_organization(owner=user)\n\n result = serialize(organization, user)\n\n assert result[\"id\"] == str(organization.id)\n assert result[\"features\"] == {\n \"advanced-search\",\n \"change-alerts\",\n \"crash-rate-alerts\",\n \"custom-event-title\",\n \"custom-symbol-sources\",\n \"data-forwarding\",\n \"dashboards-basic\",\n \"dashboards-edit\",\n \"dashboards-top-level-filter\",\n \"discover-basic\",\n \"discover-query\",\n \"discover-query-builder-as-landing-page\",\n \"event-attachments\",\n \"integrations-alert-rule\",\n \"integrations-chat-unfurl\",\n \"integrations-deployment\",\n \"integrations-event-hooks\",\n \"integrations-incident-management\",\n \"integrations-issue-basic\",\n \"integrations-issue-sync\",\n \"integrations-ticket-rules\",\n \"invite-m" }, { "id": 178389, "commit_id": "2c20b90946a8aa5ad4ee39ad365ff1b83f182770", "repo": "Nuitka", "path": "nuitka/freezer/IncludedEntryPoints.py", "file_name": "IncludedEntryPoints.py", "fun_name": "addShlibEntryPoint", "commit_message": "UI: In case of PermissionError, allow uses to retry\n\n* Esp. on Windows it happens a lot that running programs cannot be\n updated by Nuitka, this avoids the cryptic error somewhere ranomly.", "code": "def addShlibEntryPoint(module):\n target_filename = os.path.join(\n getStandaloneDirectoryPath(), module.getFullName().asPath()\n )\n target_filename += getSharedLibrarySuffix(preferred=False)\n\n target_dir = os.path.dirname(target_filename)\n\n if not os.path.isdir(target_dir):\n makePath(target_dir)\n\n copyFile(module.getFilename(), target_filename)\n\n standalone_entry_points.append(\n makeExtensionModuleEntryPoint(\n source_path=module.getFilename(),\n dest_path=target_filename,\n package_name=module.getFullName().getPackageName(),\n )\n )\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 111, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 16, "token_counts": 102, "n_ast_nodes": 165, "n_identifiers": 24, "random_cut": "def addShlibEntryPoint(module):\n target_filename = os.path.join(\n getStandaloneDirectoryPath(), module.getFullName().asPath()\n " }, { "id": 253223, "commit_id": "0bbb0215c16bbeaf3b048c023ed0ee55f57b0de8", "repo": "mitmproxy", "path": "mitmproxy/utils/debug.py", "file_name": "debug.py", "fun_name": "dump_system_info", "commit_message": "more mypy (#5724)\n\nCo-authored-by: requires.io \r\nCo-authored-by: Maximilian Hils ", "code": "def dump_system_info():\n mitmproxy_version = version.get_dev_version()\n openssl_version = SSL.SSLeay_version(SSL.SSLEAY_VERSION)\n if isinstance(openssl_version, bytes):\n openssl_version = openssl_version.decode()\n\n data = [\n f\"Mitmproxy: {mitmproxy_version}\",\n f\"Python: {platform.python_version()}\",\n f\"OpenSSL: {openssl_version}\",\n f\"Platform: {platform.platform()}\",\n ]\n return \"\\n\".join(data)\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 86, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 12, "token_counts": 59, "n_ast_nodes": 127, "n_identifiers": 15, "random_cut": "def dump_system_info():\n mitmproxy_version = version.get_dev_version()\n openssl_version = SSL.SSLeay_version(SSL.SSLEAY_VERSION)\n if isinstance(openssl_version, bytes):\n openssl_version = openssl_version.decode()\n\n data = [\n f\"Mitmproxy: {mitmproxy_version}\",\n f\"Python: {platform.python_version()}\",\n f\"OpenSSL: {openssl_version}\",\n f\"Platform: {platform.platform()}\",\n ]\n retu" }, { "id": 69262, "commit_id": "77fdc37cb75d465a7a5297fc89bba31b8193ebeb", "repo": "erpnext", "path": "erpnext/selling/doctype/sales_order/sales_order.py", "file_name": "sales_order.py", "fun_name": "make_purchase_order_for_default_supplier", "commit_message": "fix: use default supplier currency if default supplier is enabled", "code": "def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None):\n\t\n\n\tfrom erpnext.setup.utils import get_exchange_rate\n\n\tif not selected_items:\n\t\treturn\n\n\tif isinstance(selected_items, str):\n\t\tselected_items = json.loads(selected_items)\n\n\tdef set_missing_values(source, target):\n\t\ttarget.supplier = supplier\n\t\ttarget.currency = frappe.db.get_value(\n\t\t\t\"Supplier\", filters={\"name\": supplier}, fieldname=[\"default_currency\"]\n\t\t)\n\t\tcompany_currency = frappe.db.get_value(\n\t\t\t\"Company\", filters={\"name\": target.company}, fieldname=[\"default_currency\"]\n\t\t)\n\n\t\ttarget.conversion_rate = get_exchange_rate(target.currency, company_currency, args=\"for_buying\")\n\n\t\ttarget.apply_discount_on = \"\"\n\t\ttarget.additional_discount_percentage = 0.0\n\t\ttarget.discount_amount = 0.0\n\t\ttarget.inter_company_order_reference = \"\"\n\t\ttarget.shipping_rule = \"\"\n\n\t\tdefault_price_list = frappe.get_value(\"Supplier\", supplier, \"default_price_list\")\n\t\tif default_price_list:\n\t\t\ttarget.buying_price_list = default_price_list\n\n\t\tif any(item.delivered_by_supplier == 1 for item in source.items):\n\t\t\tif source.shipping_address_name:\n\t\t\t\ttarget.shipping_address = source.shipping_address_name\n\t\t\t\ttarget.shipping_address_display = source.shipping_address\n\t\t\telse:\n\t\t\t\ttarget.shipping_address = source.customer_address\n\t\t\t\ttarget.shipping_address_display = source.address_display\n\n\t\t\ttarget.customer_contact_person = source.contact_person\n\t\t\ttarget.customer_contact_display = source.contact_display\n\t\t\ttarget.customer_contact_mobile = source.contact_mobile\n\t\t\ttarget.customer_contact_email = source.contact_email\n\n\t\telse:\n\t\t\ttarget.customer = \"\"\n\t\t\ttarget.customer_name = \"\"\n\n\t\ttarget.run_method(\"set_missing_values\")\n\t\ttarget.run_method(\"calculate_taxes_and_totals\")\n\n\tdef update_item(source, target, source_parent):\n\t\ttarget.schedule_date = source.delivery_date\n\t\ttarget.qty = flt(source.qty) - (flt(source.ordered_qty) / flt(source.conversion_factor))\n\t\ttarget.stock_qty = flt(source.stock_qty) - flt(source.ordered_qty)\n\t\ttarget.project = source_parent.project\n\n\tsuppliers = [item.get(\"supplier\") for item in selected_items if item.get(\"supplier\")]\n\tsuppliers = list(dict.fromkeys(suppliers)) # remove duplicates while preserving order\n\n\titems_to_map = [item.get(\"item_code\") for item in selected_items if item.get(\"item_code\")]\n\titems_to_map = list(set(items_to_map))\n\n\tif not suppliers:\n\t\tfrappe.throw(\n\t\t\t_(\"Please set a Supplier against the Items to be considered in the Purchase Order.\")\n\t\t)\n\n\tpurchase_orders = []\n\tfor supplier in suppliers:\n\t\tdoc = get_mapped_doc(\n\t\t\t\"Sales Order\",\n\t\t\tsource_name,\n\t\t\t{\n\t\t\t\t\"Sales Order\": {\n\t\t\t\t\t\"doctype\": \"Purchase Order\",\n\t\t\t\t\t\"field_no_map\": [\n\t\t\t\t\t\t\"address_display\",\n\t\t\t\t\t\t\"contact_display\",\n\t\t\t\t\t\t\"contact_mobile\",\n\t\t\t\t\t\t\"contact_email\",\n\t\t\t\t\t\t\"contact_person\",\n\t\t\t\t\t\t\"taxes_and_charges\",\n\t\t\t\t\t\t\"shipping_address\",\n\t\t\t\t\t\t\"terms\",\n\t\t\t\t\t],\n\t\t\t\t\t\"validation\": {\"docstatus\": [\"=\", 1]},\n\t\t\t\t},\n\t\t\t\t\"Sales Order Item\": {\n\t\t\t\t\t\"doctype\": \"Purchase Order Item\",\n\t\t\t\t\t\"field_map\": [\n\t\t\t\t\t\t[\"name\", \"sales_order_item\"],\n\t\t\t\t\t\t[\"parent\", \"sales_order\"],\n\t\t\t\t\t\t[\"stock_uom\", \"stock_uom\"],\n\t\t\t\t\t\t[\"uom\", \"uom\"],\n\t\t\t\t\t\t[\"conversion_factor\", \"conversion_factor\"],\n\t\t\t\t\t\t[\"delivery_date\", \"schedule_date\"],\n\t\t\t\t\t],\n\t\t\t\t\t\"field_no_map\": [\n\t\t\t\t\t\t\"rate\",\n\t\t\t\t\t\t\"price_list_rate\",\n\t\t\t\t\t\t\"item_tax_template\",\n\t\t\t\t\t\t\"discount_percentage\",\n\t\t\t\t\t\t\"discount_amount\",\n\t\t\t\t\t\t\"pricing_rules\",\n\t\t\t\t\t],\n\t\t\t\t\t\"postprocess\": update_item,\n\t\t\t\t\t\"condition\": lambda doc: doc.ordered_qty < doc.stock_qty\n\t\t\t\t\tand doc.supplier == supplier\n\t\t\t\t\tand doc.item_code in items_to_map,\n\t\t\t\t},\n\t\t\t},\n\t\t\ttarget_doc,\n\t\t\tset_missing_values,\n\t\t)\n\n\t\tdoc.insert()\n\t\tfrappe.db.commit()\n\t\tpurchase_orders.append(doc)\n\n\treturn purchase_orders\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 19, "n_whitespaces": 177, "n_words": 280, "vocab_size": 186, "complexity": 11, "nloc": 67, "token_counts": 305, "n_ast_nodes": 1020, "n_identifiers": 80, "random_cut": "def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None):\n\t\n\n\tfrom erpnext.setup.utils import get_exchange_rate\n\n\tif not selected_items:\n\t\treturn\n\n\tif isinstance(selected_items, str):\n\t\tselected_items = json.loads(selected_items)\n\n\tdef set_missing_values(source, target):\n\t\ttarget.supplier = supplier\n\t\ttarget.currency = frappe.db.get_value(\n\t\t\t\"Supplier\", filters={\"name\": supplier}, fieldname=[\"default_currency\"]\n\t\t)\n\t\tcompany_currency = frappe.db.get_value(\n\t\t\t\"Company\", filters={\"name\": target.company}, fieldname=[\"default_currency\"]\n\t\t)\n\n\t\ttarget.conversion_rate = get_exchange_rate(target.currency, company_currency, args=\"for_buying\")\n\n\t\ttarget.apply_discount_on = \"\"\n\t\ttarget.additional_discount_percentage = 0.0\n\t\ttarget.discount_amount = 0.0\n\t\ttarget.inter_company_order_reference = \"\"\n\t\ttarget.shipping_rule = \"\"\n\n\t\tdefault_price_list = frappe.get_value(\"Supplier\", supplier, \"default_price_list\")\n\t\tif default_price_list:\n\t\t\ttarget.buying_price_list = default_price_list\n\n\t\tif any(item.delivered_by_supplier == 1 for item in source.items):\n\t\t\tif source.shipping_address_name:\n\t\t\t\ttarget.shipping_address = source.shipping_address_name\n\t\t\t\ttarget.shipping_address_display = source.shipping_address\n\t\t\telse:\n\t\t\t\ttarget.shipping_address = source.customer_address\n\t\t\t\ttarget.shipping_address_display = source.address_display\n\n\t\t\ttarget.customer_contact_person = source.contact_person\n\t\t\ttarget.customer_contact_display = source." }, { "id": 69635, "commit_id": "3eb1ed19a19a0e26e9814d70267530769bf8b274", "repo": "erpnext", "path": "erpnext/accounts/report/tds_payable_monthly/tds_payable_monthly.py", "file_name": "tds_payable_monthly.py", "fun_name": "get_tds_docs", "commit_message": "fix: Tax withholding net total for PI in reports", "code": "def get_tds_docs(filters):\n\ttds_documents = []\n\tpurchase_invoices = []\n\tpayment_entries = []\n\tjournal_entries = []\n\ttax_category_map = frappe._dict()\n\tinvoice_net_total_map = frappe._dict()\n\tor_filters = frappe._dict()\n\tjournal_entry_party_map = frappe._dict()\n\tbank_accounts = frappe.get_all(\"Account\", {\"is_group\": 0, \"account_type\": \"Bank\"}, pluck=\"name\")\n\n\ttds_accounts = frappe.get_all(\n\t\t\"Tax Withholding Account\", {\"company\": filters.get(\"company\")}, pluck=\"account\"\n\t)\n\n\tquery_filters = {\n\t\t\"account\": (\"in\", tds_accounts),\n\t\t\"posting_date\": (\"between\", [filters.get(\"from_date\"), filters.get(\"to_date\")]),\n\t\t\"is_cancelled\": 0,\n\t\t\"against\": (\"not in\", bank_accounts),\n\t}\n\n\tif filters.get(\"supplier\"):\n\t\tdel query_filters[\"account\"]\n\t\tdel query_filters[\"against\"]\n\t\tor_filters = {\"against\": filters.get(\"supplier\"), \"party\": filters.get(\"supplier\")}\n\n\ttds_docs = frappe.get_all(\n\t\t\"GL Entry\",\n\t\tfilters=query_filters,\n\t\tor_filters=or_filters,\n\t\tfields=[\"voucher_no\", \"voucher_type\", \"against\", \"party\"],\n\t)\n\n\tfor d in tds_docs:\n\t\tif d.voucher_type == \"Purchase Invoice\":\n\t\t\tpurchase_invoices.append(d.voucher_no)\n\t\telif d.voucher_type == \"Payment Entry\":\n\t\t\tpayment_entries.append(d.voucher_no)\n\t\telif d.voucher_type == \"Journal Entry\":\n\t\t\tjournal_entries.append(d.voucher_no)\n\n\t\ttds_documents.append(d.voucher_no)\n\n\tif purchase_invoices:\n\t\tget_doc_info(purchase_invoices, \"Purchase Invoice\", tax_category_map, invoice_net_total_map)\n\n\tif payment_entries:\n\t\tget_doc_info(payment_entries, \"Payment Entry\", tax_category_map)\n\n\tif journal_entries:\n\t\tjournal_entry_party_map = get_journal_entry_party_map(journal_entries)\n\t\tget_doc_info(journal_entries, \"Journal Entry\", tax_category_map)\n\n\treturn (\n\t\ttds_documents,\n\t\ttds_accounts,\n\t\ttax_category_map,\n\t\tjournal_entry_party_map,\n\t\tinvoice_net_total_map,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 87, "n_words": 138, "vocab_size": 95, "complexity": 9, "nloc": 51, "token_counts": 320, "n_ast_nodes": 549, "n_identifiers": 26, "random_cut": "def get_tds_docs(filters):\n\ttds_documents = []\n\tpurchase_invoices = []\n\t" }, { "id": 26349, "commit_id": "41b87559118f560c223f83d405efe9b406701d17", "repo": "saleor", "path": "saleor/graphql/order/tests/test_order_invoices.py", "file_name": "test_order_invoices.py", "fun_name": "test_order_query_invoices_customer_user_by_token", "commit_message": "Migrate order id from int to UUID (#9324)\n\n* Add migration to change order id from int to UUID (#9281)\r\n\r\n* Change order token to uuid\r\n\r\n* Migrate order id to uuid\r\n\r\n* Fix failing tests\r\n\r\n* Apply code review suggestions\r\n\r\n* Fix payment migration dependencies\r\n\r\n* Fix typo in order migration name\r\n\r\n* Handle old order ids for order queries\r\n\r\n* Hanlde old order ids for order mutations\r\n\r\n* Add order relation to GiftCardEvent model\r\n\r\n* Deprecate order token related queries and fields (#9295)\r\n\r\n* Deprecate order.token field\r\n\r\n* Update description of orderByToken query\r\n\r\n* Update prepare_order_search_document_value method\r\n\r\n* Update changelog\r\n\r\n* Update schema file", "code": "def test_order_query_invoices_customer_user_by_token(api_client, fulfilled_order):\n query = \n response = api_client.post_graphql(query, {\"token\": fulfilled_order.id})\n assert_no_permission(response)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 20, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 14, "token_counts": 30, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "def test_order_query_invoices_customer_user_by_token(api_client, fulfilled_order):\n query = \n response = api_client" }, { "id": 101989, "commit_id": "2e8ef5e3c8f2df0f1cca9b342baa8aaa6f620650", "repo": "faceswap", "path": "lib/training/preview_tk.py", "file_name": "preview_tk.py", "fun_name": "_add_save_button", "commit_message": "GUI - Preview updates\n - Training preview. Embed preview pop-out window\n - Bugfix - convert/extract previews", "code": "def _add_save_button(self) -> None:\n \n logger.debug(\"Adding save button\")\n button = tk.Button(self._frame,\n text=\"Save\",\n cursor=\"hand2\",\n command=lambda: self.save_var.set(True))\n button.pack(side=tk.LEFT)\n logger.debug(\"Added save burron: '%s'\", button)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 133, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 9, "token_counts": 63, "n_ast_nodes": 107, "n_identifiers": 16, "random_cut": "def _add_save_button(self) -> None:\n \n logger.debug(\"Adding save button\")\n button = tk.Button(self._frame,\n text=\"Save\",\n cursor=\"hand2\",\n command=lambda: self.save_var.set(True))\n button.pack(side=tk.LEFT)\n logger.debug(\"Added save burron: '%s'\", button)\n" }, { "id": 256244, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "haystack/modeling/model/prediction_head.py", "file_name": "prediction_head.py", "fun_name": "resize_input", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def resize_input(self, input_dim):\n \n if \"feed_forward\" not in dir(self):\n return\n else:\n old_dims = self.feed_forward.layer_dims\n if input_dim == old_dims[0]:\n return\n new_dims = [input_dim] + old_dims[1:]\n logger.info(\n f\"Resizing input dimensions of {type(self).__name__} ({self.task_name}) \"\n f\"from {old_dims} to {new_dims} to match language model\"\n )\n self.feed_forward = FeedForwardBlock(new_dims)\n self.layer_dims[0] = input_dim\n self.feed_forward.layer_dims[0] = input_dim\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 214, "n_words": 49, "vocab_size": 40, "complexity": 3, "nloc": 15, "token_counts": 82, "n_ast_nodes": 162, "n_identifiers": 14, "random_cut": "def resize_input(self, input_dim):\n \n if \"feed_forward\" not in dir(self):\n return\n else:\n old_dims = self.feed_forward.layer_dims\n if input_dim == old_dims[0]:\n return\n new_dims = [input_dim] + old_dims[1:]\n logger.info(\n f\"Resizing input dimensions of {type(self).__name__} ({self.task_name}) \"\n " }, { "id": 304127, "commit_id": "65eb1584f765dcc2ec502bd8a9fa8d2f23d47cfd", "repo": "core", "path": "homeassistant/components/apple_tv/media_player.py", "file_name": "media_player.py", "fun_name": "async_media_stop", "commit_message": "Improve entity type hints [a] (#76986)", "code": "async def async_media_stop(self) -> None:\n \n if self.atv:\n await self.atv.remote_control.stop()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 34, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 4, "token_counts": 23, "n_ast_nodes": 42, "n_identifiers": 5, "random_cut": "async def async_media_stop(self) -> None:\n \n if self.atv:\n await self.atv.remote_control.stop()\n" }, { "id": 71349, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/api/test_pages.py", "file_name": "test_pages.py", "fun_name": "get_response", "commit_message": "Reformat with black", "code": "def get_response(self, page_id, **params):\n return self.client.get(\n reverse(\"wagtailadmin_api:pages:detail\", args=(page_id,)), params\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 34, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 47, "n_identifiers": 8, "random_cut": "def get_response(self, page_id, **params):\n return self.client.get(\n reverse(\"wagtailadmin_a" }, { "id": 252741, "commit_id": "f6ac5006982fd18dfe9f9a67c3534300c7ba8192", "repo": "mitmproxy", "path": "test/mitmproxy/contentviews/test_http3.py", "file_name": "test_http3.py", "fun_name": "test_render_priority", "commit_message": "add HTTP/3 content view", "code": "def test_render_priority():\n v = http3.ViewHttp3()\n assert not v.render_priority(b\"random stuff\")\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 14, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def test_render_priority():\n v = http3.ViewHttp3()\n assert not v.render_priority(b\"rando" }, { "id": 300624, "commit_id": "4885331509eeffe50f42d76b234996467b06170f", "repo": "core", "path": "tests/helpers/test_event.py", "file_name": "test_event.py", "fun_name": "test_track_template_result_none", "commit_message": "Fail template functions when no default specified (#71687)", "code": "async def test_track_template_result_none(hass):\n \n specific_runs = []\n wildcard_runs = []\n wildercard_runs = []\n\n template_condition = Template(\"{{state_attr('sensor.test', 'battery')}}\", hass)\n template_condition_var = Template(\n \"{{(state_attr('sensor.test', 'battery')|int(default=0)) + test }}\", hass\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 27, "vocab_size": 21, "complexity": 1, "nloc": 34, "token_counts": 205, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "async def test_track_template_result_none(hass):\n \n specific_runs = []\n wildcard_runs = []\n wildercard_runs = []\n\n template_condition =" }, { "id": 176268, "commit_id": "17fa9942568bfca34d4a68f8d93c538014f69389", "repo": "networkx", "path": "networkx/algorithms/clique.py", "file_name": "clique.py", "fun_name": "find_cliques_recursive", "commit_message": "Fix functions appearing in variables `__all__` but not in docs for NX2.7 (#5289)\n\n* Adjust functions appearing in `__all__` but not in docs\r\n\r\n* clean up coloring: merge two modules make interchange private\r\n\r\n* fix duplicate name. Probably should be changed\r\n\r\n* fix \"see also\" doc of recursive_simple_cycles\r\n\r\n* Rm internal uses of deprecated .\r\n\r\n* Fixup warnings filters regex.\r\n\r\n* clean up a bit more, make Node & AdjList private classes\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Mridul Seth ", "code": "def find_cliques_recursive(G):\n \n if len(G) == 0:\n return iter([])\n\n adj = {u: {v for v in G[u] if v != u} for u in G}\n Q = []\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 46, "n_words": 27, "vocab_size": 22, "complexity": 5, "nloc": 7, "token_counts": 63, "n_ast_nodes": 77, "n_identifiers": 8, "random_cut": "def find_cliques_recursive(G):\n \n if len(G) == 0:\n return iter([])\n\n adj = {u: {v for v in G[u] if v != u}" }, { "id": 249846, "commit_id": "6d47b7e32589e816eb766446cc1ff19ea73fc7c1", "repo": "synapse", "path": "synapse/server.py", "file_name": "server.py", "fun_name": "get_device_handler", "commit_message": "Add a type hint for `get_device_handler()` and fix incorrect types. (#14055)\n\nThis was the last untyped handler from the HomeServer object. Since\r\nit was being treated as Any (and thus unchecked) it was being used\r\nincorrectly in a few places.", "code": "def get_device_handler(self) -> DeviceWorkerHandler:\n if self.config.worker.worker_app:\n return DeviceWorkerHandler(self)\n else:\n return DeviceHandler(self)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 46, "n_identifiers": 7, "random_cut": "def get_device_handler(self) -> DeviceWorkerHandler:\n if self.config.worker.worker_app:\n return DeviceWorkerHandler(self)\n else:\n return DeviceHandler(self)\n" }, { "id": 213106, "commit_id": "a5db070f446b7cfebdaa6ad2e3dcf78f6105a272", "repo": "serverless-application-model", "path": "tests/utils/test_py27hash_fix.py", "file_name": "test_py27hash_fix.py", "fun_name": "test_resources_api", "commit_message": "fix: Py27hash fix (#2182)\n\n* Add third party py27hash code\r\n\r\n* Add Py27UniStr and unit tests\r\n\r\n* Add py27hash_fix utils and tests\r\n\r\n* Add to_py27_compatible_template and tests\r\n\r\n* Apply py27hash fix to wherever it is needed\r\n\r\n* Apply py27hash fix, all tests pass except api_with_any_method_in_swagger\r\n\r\n* apply py27hash fix in openapi + run black\r\n\r\n* remove py27 testing\r\n\r\n* remove other py27 references\r\n\r\n* black fixes\r\n\r\n* fixes/typos\r\n\r\n* remove py27 from tox.ini\r\n\r\n* refactoring\r\n\r\n* third party notice\r\n\r\n* black\r\n\r\n* Fix py27hash fix to deal with null events\r\n\r\n* Fix Py27UniStr repr for unicode literals\r\n\r\n* black reformat\r\n\r\n* Update _template_has_api_resource to check data type more defensively\r\n\r\n* Apply py27Dict in _get_authorizers\r\n\r\n* Apply Py27Dict to authorizers and gateway responses which will go into swagger\r\n\r\n* Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class\r\n\r\n* Rename _convert_to_py27_dict to _convert_to_py27_type\r\n\r\n* Apply Py27UniStr to path param name\r\n\r\n* Handle HttpApi resource under to_py27_compatible_template\r\n\r\n* Fix InvalidDocumentException to not sort different exceptions\r\n\r\n* black reformat\r\n\r\n* Remove unnecessary test files\r\n\r\nCo-authored-by: Wing Fung Lau <4760060+hawflau@users.noreply.github.com>", "code": "def test_resources_api(self):\n template = {\n \"Resources\": {\n \"Api\": {\"Type\": \"AWS::Serverless::Api\", \"Properties\": {\"Name\": \"MyApi\"}},\n \"HttpApi\": {\"Type\": \"AWS::Serverless::HttpApi\"},\n \"Function\": {\n \"Type\": \"AWS::Serverless::Function\",\n \"Properties\": {\n \"FunctionName\": {\"Ref\": \"MyFunctionName\"},\n \"Events\": {\n \"ApiEvent\": {\"Type\": \"Api\", \"Properties\": {\"Path\": \"/user\", \"Method\": \"GET\"}},\n \"SecondApiEvent\": {\"Type\": \"Api\", \"Properties\": {\"Path\": \"/admin\", \"Method\": \"GET\"}},\n },\n },\n },\n \"StateMachine\": {\n \"Type\": \"AWS::Serverless::StateMachine\",\n \"Condition\": \"ShouldAddStateMachine\",\n \"Properties\": {\n \"Event\": {\n \"ApiEvent\": {\"Type\": \"Api\", \"Properties\": {\"Path\": \"/state-machine\", \"Method\": \"GET\"}}\n }\n },\n },\n }\n }\n to_py27_compatible_template(template)\n self.assertIsInstance(template[\"Resources\"], Py27Dict)\n self.assertNotIsInstance(template[\"Resources\"][\"Api\"], Py27Dict)\n self.assertIsInstance(template[\"Resources\"][\"Api\"][\"Properties\"], Py27Dict)\n self.assertIsInstance(template[\"Resources\"][\"Api\"][\"Properties\"][\"Name\"], Py27UniStr)\n", "url": "https://github.com/aws/serverless-application-model.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 567, "n_words": 78, "vocab_size": 45, "complexity": 1, "nloc": 31, "token_counts": 211, "n_ast_nodes": 415, "n_identifiers": 8, "random_cut": "def test_resources_api(self):\n template = {\n \"Resources\": {\n \"Api\": {\"Type\": \"AWS::Serverless::Api\", \"Properties\": {\"Name\": \"MyApi\"}},\n \"HttpApi\": {\"Type\": \"AWS::Serverless::HttpApi\"},\n \"Function\": {\n \"Type\": \"AWS::Serverless::Function\",\n \"Properties\": {\n \"FunctionName\": {\"Ref\": \"MyFunctionName\"},\n \"Events\": {\n \"ApiEvent\": {\"Type\": \"Api\", \"Properties\": {\"Path\": \"/user\", \"Method\": \"GET\"}},\n \"SecondApiEvent\": {\"Type\": \"Api\", \"Properties\": {\"Path\": \"/admin\", \"Method\": \"GET\"}},\n },\n },\n },\n \"StateMachine\": {\n \"Type\": \"AWS::Serverless::StateMachine\",\n \"Condition\": \"ShouldAddStateMachine\",\n \"Properties\": {\n \"Event\": {\n \"ApiEvent\": {\"Type\": \"Api\", \"Properties\": {\"Path\": \"/state-machine\", \"Method\": \"GET\"}}\n }\n },\n },\n }\n }\n to_py27_compatible_template(template)\n self.assertIsInstance(template[\"Resources\"], Py27Dict)\n self.as" }, { "id": 72997, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/forms/tests/test_forms.py", "file_name": "test_forms.py", "fun_name": "setUp", "commit_message": "Reformat with black", "code": "def setUp(self):\n # Create a form page\n home_page = Page.objects.get(url_path=\"/home/\")\n\n self.form_page = home_page.add_child(\n instance=FormPage(\n title=\"Contact us\",\n slug=\"contact-us\",\n to_address=\"to@email.com\",\n from_address=\"from@email.com\",\n subject=\"The subject\",\n )\n )\n\n FormField.objects.create(\n page=self.form_page,\n sort_order=1,\n label=\"Your name\",\n field_type=\"singleline\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your message\",\n field_type=\"multiline\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your birthday\",\n field_type=\"date\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your birthtime :)\",\n field_type=\"datetime\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=1,\n label=\"Your email\",\n field_type=\"email\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your homepage\",\n field_type=\"url\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your favourite number\",\n field_type=\"number\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your favourite text editors\",\n field_type=\"multiselect\",\n required=True,\n choices=\"vim,nano,emacs\",\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your favourite Python IDEs\",\n field_type=\"dropdown\",\n required=True,\n choices=\"PyCharm,vim,nano\",\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Ὕour favourite Ρython ÏÐÈ\", # unicode example\n help_text=\"Choose one\",\n field_type=\"radio\",\n required=True,\n choices=\"PyCharm,vim,nano\",\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=3,\n label=\"Your choices\",\n field_type=\"checkboxes\",\n required=False,\n choices=\"foo,bar,baz\",\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=3,\n label=\"I agree to the Terms of Use\",\n field_type=\"checkbox\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=1,\n label=\"A Hidden Field\",\n field_type=\"hidden\",\n required=False,\n )\n\n # Create a form builder\n self.fb = FormBuilder(self.form_page.get_form_fields())\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 1249, "n_words": 158, "vocab_size": 79, "complexity": 1, "nloc": 108, "token_counts": 462, "n_ast_nodes": 721, "n_identifiers": 28, "random_cut": "def setUp(self):\n # Create a form page\n home_page = Page.objects.get(url_path=\"/home/\")\n\n self.form_page = home_page.add_child(\n instance=FormPage(\n title=\"Contact us\",\n slug=\"contact-us\",\n to_address=\"to@email.com\",\n from_address=\"from@email.com\",\n subject=\"The subject\",\n )\n )\n\n FormField.objects.create(\n page=self.form_page,\n sort_order=1,\n label=\"Your name\",\n field_type=\"singleline\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your message\",\n field_type=\"multiline\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your birthday\",\n field_type=\"date\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your birthtime :)\",\n field_type=\"datetime\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=1,\n label=\"Your email\",\n field_type=\"email\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your homepage\",\n field_type=\"url\",\n required=True,\n )\n FormField.objects.create(\n page=self.form_page,\n sort_order=2,\n label=\"Your favourite number\",\n field_type=\"number\",\n require" }, { "id": 63399, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "__setstate__", "commit_message": "upd; format", "code": "def __setstate__(self, state):\n self.__toklist = state[0]\n self.__tokdict, par, inAccumNames, self.__name = state[1]\n self.__accumNames = {}\n self.__accumNames.update(inAccumNames)\n if par is not None:\n self.__parent = wkref(par)\n else:\n self.__parent = None\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 91, "n_words": 28, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 66, "n_ast_nodes": 102, "n_identifiers": 12, "random_cut": "def __setstate__(self, state):\n self.__toklist = state[0]\n self.__tokdict, par, inAccumNames, self.__name = state[1]\n self.__accumNames = {}\n self.__accumNames.update(inAccumNames)\n if par is not None:\n " }, { "id": 102559, "commit_id": "54fe2741a1b16e36f714fa167f8f692886fd6640", "repo": "pytorch", "path": "test/fx_acc/test_acc_tracer.py", "file_name": "test_acc_tracer.py", "fun_name": "test_trunc_div", "commit_message": "[fx2trt] break down div (#71172)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/71172\n\nBreak down div to smaller ops to make those div ops look like all other elementwise ops.\n\nUse operator div ops instead of torch div if possible to avoid converting literal numbers to torch tensor (like in the following).\n```\na = 1\nb = 2\n\n// `c` would be 0.5\nc = a / b\n\n// `c` would be torch.tensor([0.5])\nc = torch.div(a, b)\n```\n\nThe problem we saw on shufflenet is that there's size op followed by a div op which results in int64 tensors in acc traced graph (acc tracer turns operator.div to acc_ops.div which uses torch.div). And trt splitter splits out the reshape op that consumes the div op because we have a rule to split out ops that takes in int64 tensors as inputs.\n\nTest Plan: Unit tests.\n\nReviewed By: wushirong\n\nDifferential Revision: D33482231\n\nfbshipit-source-id: 508a171520c4e5b4188cfc5c30c1370ba9db1c55", "code": "def test_trunc_div(self):\n self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.div(x, 2, rounding_mode=\"trunc\"))\n self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.floor_divide(x, 2))\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 26, "n_words": 13, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 49, "n_ast_nodes": 76, "n_identifiers": 10, "random_cut": "def test_trunc_div(self):\n self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.div(x, 2, rounding_mode=\"tr" }, { "id": 311659, "commit_id": "076faaa4a4f231eb5b7b7c72fa20c239c7cc391c", "repo": "core", "path": "homeassistant/components/intent_script/__init__.py", "file_name": "__init__.py", "fun_name": "async_handle", "commit_message": "Add support to reprompt user (#65256)", "code": "async def async_handle(self, intent_obj):\n \n speech = self.config.get(CONF_SPEECH)\n reprompt = self.config.get(CONF_REPROMPT)\n card = self.config.get(CONF_CARD)\n action = self.config.get(CONF_ACTION)\n is_async_action = self.config.get(CONF_ASYNC_ACTION)\n slots = {key: value[\"value\"] for key, value in intent_obj.slots.items()}\n\n if action is not None:\n if is_async_action:\n intent_obj.hass.async_create_task(\n action.async_run(slots, intent_obj.context)\n )\n else:\n await action.async_run(slots, intent_obj.context)\n\n response = intent_obj.create_response()\n\n if speech is not None:\n response.async_set_speech(\n speech[CONF_TEXT].async_render(slots, parse_result=False),\n speech[CONF_TYPE],\n )\n\n if reprompt is not None and reprompt[CONF_TEXT].template:\n response.async_set_reprompt(\n reprompt[CONF_TEXT].async_render(slots, parse_result=False),\n reprompt[CONF_TYPE],\n )\n\n if card is not None:\n response.async_set_card(\n card[CONF_TITLE].async_render(slots, parse_result=False),\n card[CONF_CONTENT].async_render(slots, parse_result=False),\n card[CONF_TYPE],\n )\n\n return response\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 430, "n_words": 82, "vocab_size": 51, "complexity": 8, "nloc": 32, "token_counts": 239, "n_ast_nodes": 365, "n_identifiers": 35, "random_cut": "async def async_handle(self, intent_obj):\n \n speech = self.config.get(CONF_SPEECH)\n reprompt = self.config.get(CONF_REPROMPT)\n card = self.config.get(CONF_CARD)\n action = self.config.get(CONF_ACTION)\n is_async_action = self.config.get(CONF_ASYNC_ACTION)\n slots = {key: value[\"value\"] for key, value in intent_obj.slots.items()}\n\n if action is not None:\n if is_async_action:\n intent_obj.hass.async_create_task(\n action.async_run(slots, intent_obj.context)\n )\n else:\n await action.async_run(slots, intent_obj.context)\n\n response = intent_obj.create_response()\n\n if speech is not None:\n response.async_set_speech(\n speech[CONF_TEXT].async_render(slots, parse_result=False),\n speech[CONF_TYPE],\n )\n\n if " }, { "id": 191024, "commit_id": "301124c5b377fa56b940d298900dbc5816dbc24e", "repo": "thumbor", "path": "tests/storages/test_mixed_storage.py", "file_name": "test_mixed_storage.py", "fun_name": "test_mixed_storage_put_includes_path", "commit_message": "Reformat to 80 chars and mypy.ini", "code": "async def test_mixed_storage_put_includes_path(self):\n await self.storage.put(\"path1\", \"contents\")\n await self.storage.put_crypto(\"path1\")\n await self.storage.put_detector_data(\"path1\", \"detector\")\n\n expect(self.storage.file_storage.storage[\"path1\"][\"path\"]).to_equal(\n \"path1\"\n )\n expect(\n self.storage.file_storage.storage[\"path1\"][\"contents\"]\n ).to_equal(\"contents\")\n\n contents = await self.storage.get(\"path1\")\n expect(contents).to_equal(\"contents\")\n expect(self.storage.file_storage.storage[\"path1\"]).not_to_include(\n \"crypto\"\n )\n expect(self.storage.crypto_storage.storage[\"path1\"]).not_to_include(\n \"contents\"\n )\n expect(\n self.storage.crypto_storage.storage[\"path1\"][\"crypto\"]\n ).to_equal(\"security-key\")\n\n contents = await self.storage.get_crypto(\"path1\")\n expect(contents).to_equal(\"security-key\")\n\n contents = await self.storage.get_detector_data(\"path1\")\n expect(contents).to_equal(\"detector\")\n\n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 228, "n_words": 41, "vocab_size": 29, "complexity": 1, "nloc": 25, "token_counts": 195, "n_ast_nodes": 358, "n_identifiers": 15, "random_cut": "async def test_mixed_storage_put_includes_path(self):\n await self.storage.put(\"path1\", \"contents\")\n await self.storage.put_crypto(\"path1\")\n await self.storage.put_detector_data(\"path1\", \"detector\")\n\n exp" }, { "id": 299724, "commit_id": "0926470ef0f7bf7fd11da09a9d101ea17a4b4c00", "repo": "core", "path": "tests/components/media_player/test_browse_media.py", "file_name": "test_browse_media.py", "fun_name": "test_process_play_media_url", "commit_message": "Skip signing URL that we know requires no auth (#71208)", "code": "async def test_process_play_media_url(hass, mock_sign_path):\n \n await async_process_ha_core_config(\n hass,\n {\"internal_url\": \"http://example.local:8123\"},\n )\n hass.config.api = Mock(use_ssl=False, port=8123, local_ip=\"192.168.123.123\")\n\n # Not changing a url that is not a hass url\n assert (\n async_process_play_media_url(hass, \"https://not-hass.com/path\")\n == \"https://not-hass.com/path\"\n )\n # Not changing a url that is not http/https\n assert (\n async_process_play_media_url(hass, \"file:///tmp/test.mp3\")\n == \"file:///tmp/test.mp3\"\n )\n\n # Testing signing hass URLs\n assert (\n async_process_play_media_url(hass, \"/path\")\n == \"http://example.local:8123/path?authSig=bla\"\n )\n assert (\n async_process_play_media_url(hass, \"http://example.local:8123/path\")\n == \"http://example.local:8123/path?authSig=bla\"\n )\n assert (\n async_process_play_media_url(hass, \"http://192.168.123.123:8123/path\")\n == \"http://192.168.123.123:8123/path?authSig=bla\"\n )\n with pytest.raises(HomeAssistantError), patch(\n \"homeassistant.components.media_player.browse_media.get_url\",\n side_effect=NoURLAvailableError,\n ):\n async_process_play_media_url(hass, \"/path\")\n\n # Test skip signing URLs that have a query param\n assert (\n async_process_play_media_url(hass, \"/path?hello=world\")\n == \"http://example.local:8123/path?hello=world\"\n )\n assert (\n async_process_play_media_url(\n hass, \"http://192.168.123.123:8123/path?hello=world\"\n )\n == \"http://192.168.123.123:8123/path?hello=world\"\n )\n\n # Test skip signing URLs if they are known to require no auth\n assert (\n async_process_play_media_url(hass, \"/api/tts_proxy/bla\")\n == \"http://example.local:8123/api/tts_proxy/bla\"\n )\n assert (\n async_process_play_media_url(\n hass, \"http://example.local:8123/api/tts_proxy/bla\"\n )\n == \"http://example.local:8123/api/tts_proxy/bla\"\n )\n\n with pytest.raises(ValueError):\n async_process_play_media_url(hass, \"hello\")\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 436, "n_words": 142, "vocab_size": 68, "complexity": 1, "nloc": 53, "token_counts": 177, "n_ast_nodes": 322, "n_identifiers": 18, "random_cut": "async def test_process_play_media_url(hass, mock_sign_path):\n \n await async_process_ha_core_config(\n hass,\n {\"internal_url\": \"http://example.local:8123\"},\n )\n hass.config.api = Mock(use_ssl=False, port=8123, local_ip=\"192.168.123.123\")\n\n # Not changing a url that is not a hass url\n assert (\n async_process_play_media_url(hass, \"https://not-hass.com/path\")\n == \"https://not-hass.com/path\"\n )\n # Not changing a url that is not http/https\n assert (\n async_process_play_media_url(hass, \"file:///tmp/test.mp3\")\n == \"file:///tmp/test.mp3\"\n )\n\n # Testing signing hass URLs\n assert (\n async_process_play_media_url(hass, \"/path\")\n == \"http://example.local:8123/path?authSig=bla\"\n )\n assert (\n async_process_play_media_url(hass, \"http://example.local:8123/path\")\n == \"http://example.local:8123/path?authSig=bla\"\n )\n assert (\n async_process_play_media_url(hass, \"http://192.168.123.123:8123/path\")\n == \"http://192.168.123.123:8123/path?authSig=bla\"\n )\n with pytest.raises(HomeAssistantError), patch(\n \"homeassistant.components.media_player.browse_media.get_url\",\n side_effect=NoURLAvailableError,\n ):\n async_process_play_media_url(hass, \"/path\")\n\n # Test skip signing URLs that have a query param\n assert (\n async_process_play_media_url(hass, \"/path?hello=world\")\n == \"http://example.local:8123/path?hello=world\"\n )\n assert (\n async_process_play_media_url(\n hass, \"http://192.168.123.123:8123/path?hello=world\"\n )\n == \"http://192.168.123.123:8123/path?hel" }, { "id": 273832, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/rnn/cudnn_test.py", "file_name": "cudnn_test.py", "fun_name": "test_trainability", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_trainability(self):\n input_size = 10\n units = 2\n for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:\n layer = layer_class(units)\n layer.build((None, None, input_size))\n self.assertEqual(len(layer.weights), 3)\n self.assertEqual(len(layer.trainable_weights), 3)\n self.assertEqual(len(layer.non_trainable_weights), 0)\n layer.trainable = False\n self.assertEqual(len(layer.weights), 3)\n self.assertEqual(len(layer.non_trainable_weights), 3)\n self.assertEqual(len(layer.trainable_weights), 0)\n layer.trainable = True\n self.assertEqual(len(layer.weights), 3)\n self.assertEqual(len(layer.trainable_weights), 3)\n self.assertEqual(len(layer.non_trainable_weights), 0)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 206, "n_words": 43, "vocab_size": 25, "complexity": 2, "nloc": 17, "token_counts": 173, "n_ast_nodes": 266, "n_identifiers": 17, "random_cut": "def test_trainability(self):\n input_size = 10\n units = 2\n for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]:\n layer = layer_class(units)\n layer.build((None, None, input_size))\n self.assertEqual(len(layer.weights), 3)\n self.assertEqual(len(layer.trainable_weights), 3)\n self.assertEqu" }, { "id": 60007, "commit_id": "b326ebbcca00b79c82ef92ad4d823044dab40e5f", "repo": "prefect", "path": "tests/infrastructure/test_process.py", "file_name": "test_process.py", "fun_name": "test_process_kill_sends_sigterm_then_sigkill", "commit_message": "Monitor process after kill and return early when possible (#7746)", "code": "async def test_process_kill_sends_sigterm_then_sigkill(monkeypatch):\n os_kill = MagicMock()\n monkeypatch.setattr(\"os.kill\", os_kill)\n\n infrastructure_pid = f\"{socket.gethostname()}:12345\"\n grace_seconds = 2\n\n process = Process(command=[\"noop\"])\n await process.kill(\n infrastructure_pid=infrastructure_pid, grace_seconds=grace_seconds\n )\n\n os_kill.assert_has_calls(\n [\n call(12345, signal.SIGTERM),\n call(12345, 0),\n call(12345, signal.SIGKILL),\n ]\n )\n\n\n@pytest.mark.skipif(\n sys.platform == \"win32\",\n reason=\"SIGTERM/SIGKILL are only used in non-Windows environments\",\n)", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(\n sys.platform == \"win32\",\n reason=\"SIGTERM/SIGKILL are only used in non-Windows environments\",\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 128, "n_words": 44, "vocab_size": 37, "complexity": 1, "nloc": 16, "token_counts": 80, "n_ast_nodes": 170, "n_identifiers": 24, "random_cut": "async def test_process_kill_sends_sigterm_then_sigkill(monkeypatch):\n os_kill = MagicMock()\n monkeypatch.setattr(\"os.kill\", os_kill)\n\n infrastructure_pid = f\"{socket.gethostname()}:12345\"\n grace_seconds = 2\n\n process = Process(command=[\"noop\"])\n await process.kill(\n infrastructure_pid=infrastructure_pid, grace_seconds=grace_seconds\n )\n\n os_kill.assert_has_calls(\n [\n" }, { "id": 262801, "commit_id": "3aad9af18641aa2181dd86cececc2aeb8a0dba06", "repo": "pyinstaller", "path": "tests/unit/test_normalize_icon_type.py", "file_name": "test_normalize_icon_type.py", "fun_name": "test_normalize_icon_pillow", "commit_message": "Icon translation using PIL (#6697)\n\nConvert icons into the correct platform dependent format using PIL/Pillow if installed.", "code": "def test_normalize_icon_pillow(tmp_path):\n data_dir = str(Path(PyInstaller.__file__).with_name(\"bootloader\") / \"images\")\n workpath = str(tmp_path)\n\n pytest.importorskip(\"PIL\", reason=\"Needs PIL / Pillow for this test\")\n\n # Alternative image - output is a different file with the correct suffix\n\n icon = os.path.join(data_dir, 'github_logo.png')\n ret = normalize_icon_type(icon, (\"ico\",), \"ico\", workpath)\n\n _, ret_filetype = os.path.splitext(ret)\n if ret_filetype != \".ico\":\n pytest.fail(\"icon validation didn't convert to the right format\", False)\n\n # Some random non-image file: Raises an image conversion error\n\n icon = os.path.join(data_dir, 'pyi_icon.notanicon')\n with open(icon, \"w\") as f:\n f.write(\"this is in fact, not an icon\")\n\n with pytest.raises(ValueError):\n normalize_icon_type(icon, (\"ico\",), \"ico\", workpath)\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 146, "n_words": 90, "vocab_size": 70, "complexity": 2, "nloc": 14, "token_counts": 140, "n_ast_nodes": 248, "n_identifiers": 27, "random_cut": "def test_normalize_icon_pillow(tmp_path):\n data_dir = str(Path(PyInstaller.__file__).with_name(\"bootloader\") / \"images\")\n workpath = str(tmp_path)\n\n pytest.importorskip(\"PIL\", reason=\"Needs PIL / Pillow for this test\")\n\n # Alternative image - output is a different file with the correct suffix\n\n icon = os.path.join(data_dir, 'github_logo.png')\n ret = normalize_icon_type(icon, (\"ico\",), \"ico\", workpath)\n\n _, ret_filetype = os.path.splitext(ret)\n if ret_filetype != \".ico\":\n pytest.fail(\"icon validation didn't convert to the right format\", False)\n\n # Some random non-image file: Raises an image conversion error\n\n icon = " }, { "id": 71326, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/api/test_pages.py", "file_name": "test_pages.py", "fun_name": "test_meta_parent_id_doesnt_show_root_page", "commit_message": "Reformat with black", "code": "def test_meta_parent_id_doesnt_show_root_page(self):\n # Root page is visible in the admin API\n response = self.get_response(2)\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n self.assertIsNotNone(content[\"meta\"][\"parent\"])\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 40, "n_ast_nodes": 71, "n_identifiers": 9, "random_cut": "def test_meta_parent_id_doesnt_show_root_page(self):\n # Root page is visible in the admin API\n response = self.get_response(2)\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n self.assertIsNotN" }, { "id": 60024, "commit_id": "40201d8b8c49047cb897b8b6664635dcfffe9413", "repo": "prefect", "path": "src/prefect/orion/orchestration/core_policy.py", "file_name": "core_policy.py", "fun_name": "priority", "commit_message": "Fix scheduled time copy for flows (#7770)", "code": "def priority():\n return [\n HandleFlowTerminalStateTransitions,\n PreventRedundantTransitions,\n CopyScheduledTime,\n WaitForScheduledTime,\n RetryFailedFlows,\n ]\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 78, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 8, "token_counts": 17, "n_ast_nodes": 23, "n_identifiers": 6, "random_cut": "def priority():\n return [\n HandleFlowTerminalStateTransitions,\n PreventRedundantTransitions,\n CopyScheduledTime,\n WaitForScheduledTime,\n RetryFailedFl" }, { "id": 109922, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/art3d.py", "file_name": "art3d.py", "fun_name": "set_3d_properties", "commit_message": "Improve mpl_toolkit documentation", "code": "def set_3d_properties(self, zs=0, zdir='z'):\n \n xs = self.get_xdata()\n ys = self.get_ydata()\n zs = cbook._to_unmasked_float_array(zs).ravel()\n zs = np.broadcast_to(zs, len(xs))\n self._verts3d = juggle_axes(xs, ys, zs, zdir)\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 26, "vocab_size": 20, "complexity": 1, "nloc": 7, "token_counts": 72, "n_ast_nodes": 116, "n_identifiers": 17, "random_cut": "def set_3d_properties(self, zs=0, zdir='z'):\n \n xs = self.get_xdata()\n ys = self.get_ydata()\n zs = cbook._to_unmasked_float_array(zs).ravel()\n zs = np.broadcast_to(zs, len(xs))\n self._ve" }, { "id": 27490, "commit_id": "08aa724176be00d7aaf654f14e9ae99dd4327f97", "repo": "saleor", "path": "saleor/core/utils/random_data.py", "file_name": "random_data.py", "fun_name": "create_product_variant_channel_listings", "commit_message": "Update sample products set (#9796)\n\n* Update products data set\r\n\r\n* Fix image filenames\r\n\r\n* Replave `default_variant` with `default: true`\r\n\r\n* Fix fake user creation and attribute-product assignment\r\n\r\n* Drop preorders creation\r\n\r\n* Optimize images\r\n\r\nShout out to https://github.com/ImageOptim/ImageOptim team\r\n\r\n* Load menus from JSON\r\n\r\n* Reduce the number of pregenerated sales\r\n\r\n* Fix one of the images\r\n\r\n* Apply code review changes\r\n\r\n* Fix attr values when loading data from json dump\r\n\r\n* Don't test user existence twice\r\n\r\n* Add some product descriptions\r\n\r\n* Fix failing tests\r\n\r\n* Simplify channel query\r\n\r\n* Reduce files sizes\r\n\r\n* Use relative imports\r\n\r\nCo-authored-by: Patryk Zawadzki \r\nCo-authored-by: Krzysztof Wolski \r\nCo-authored-by: IKarbowiak ", "code": "def create_product_variant_channel_listings(product_variant_channel_listings_data):\n channel_USD = Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG)\n channel_PLN = Channel.objects.get(slug=\"channel-pln\")\n for variant_channel_listing in product_variant_channel_listings_data:\n pk = variant_channel_listing[\"pk\"]\n defaults = dict(variant_channel_listing[\"fields\"])\n\n defaults[\"variant_id\"] = defaults.pop(\"variant\")\n channel = defaults.pop(\"channel\")\n defaults[\"channel_id\"] = channel_USD.pk if channel == 1 else channel_PLN.pk\n ProductVariantChannelListing.objects.update_or_create(pk=pk, defaults=defaults)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 85, "n_words": 35, "vocab_size": 28, "complexity": 3, "nloc": 10, "token_counts": 100, "n_ast_nodes": 168, "n_identifiers": 18, "random_cut": "def create_product_variant_channel_listings(product_variant_channel_listings_data):\n channel_USD = Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG)\n channel_PLN = Channel.objects.get(slug=\"channel-pln\")\n for variant_channel_listing in product_variant_channel_listings_data:\n pk = variant_channel_listing[\"pk\"]\n defaults = dict(variant_channel_listing[\"fields\"])\n\n" }, { "id": 149113, "commit_id": "20fc9459f23979f57d7925175dee376cd69acef0", "repo": "freqtrade", "path": "tests/strategy/test_strategy_loading.py", "file_name": "test_strategy_loading.py", "fun_name": "test_strategy_can_short", "commit_message": "Add test for can_short strategy attribute", "code": "def test_strategy_can_short(caplog, default_conf):\n caplog.set_level(logging.INFO)\n default_conf.update({\n 'strategy': CURRENT_TEST_STRATEGY,\n })\n strat = StrategyResolver.load_strategy(default_conf)\n assert isinstance(strat, IStrategy)\n default_conf['strategy'] = 'StrategyTestV3Futures'\n with pytest.raises(ImportError, match=\"\"):\n StrategyResolver.load_strategy(default_conf)\n\n default_conf['trading_mode'] = 'futures'\n strat = StrategyResolver.load_strategy(default_conf)\n assert isinstance(strat, IStrategy)\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 73, "n_words": 30, "vocab_size": 21, "complexity": 1, "nloc": 13, "token_counts": 86, "n_ast_nodes": 147, "n_identifiers": 17, "random_cut": "def test_strategy_can_short(caplog, default_conf):\n caplog.set_level(logging.INFO)\n default_conf.update({\n 'strategy': CURRENT_TEST_STRATEGY,\n })\n strat = StrategyResolver.load_strategy(default_conf)\n assert isinstance(strat, IStrategy)\n default_conf['strategy'] = 'StrategyTestV3Futures'\n with pytest.raises(ImportError, match=\"\"):\n StrategyResolver.load_strategy(default_conf)\n\n default_conf['trading_mode'] = 'futures'\n strat = StrategyResolver.load_strategy(default_conf)\n as" }, { "id": 220794, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/subprocess.py", "file_name": "subprocess.py", "fun_name": "_feed_stdin", "commit_message": "add python 3.10.4 for windows", "code": "async def _feed_stdin(self, input):\n debug = self._loop.get_debug()\n self.stdin.write(input)\n if debug:\n logger.debug(\n '%r communicate: feed stdin (%s bytes)', self, len(input))\n try:\n await self.stdin.drain()\n except (BrokenPipeError, ConnectionResetError) as exc:\n # communicate() ignores BrokenPipeError and ConnectionResetError\n if debug:\n logger.debug('%r communicate: stdin got %r', self, exc)\n\n if debug:\n logger.debug('%r communicate: close stdin', self)\n self.stdin.close()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 183, "n_words": 50, "vocab_size": 41, "complexity": 5, "nloc": 14, "token_counts": 90, "n_ast_nodes": 151, "n_identifiers": 15, "random_cut": "async def _feed_stdin(self, input):\n debug = self._loop.get_debug()\n self.stdin.write(input)\n if debug:\n logger.debug(\n '%r communicate: feed stdin" }, { "id": 92712, "commit_id": "ef5a739249de199b25d2cba7a2ee52820d9f34de", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_v2.py", "file_name": "test_organization_events_v2.py", "fun_name": "test_performance_view_feature", "commit_message": "tests(discover): Improve stability of eventsv2 tests (#36641)\n\nSame motivation as #36619, this aims to improve the stability of the eventsv2\r\ntests by moving the event timestamps further in the past.", "code": "def test_performance_view_feature(self):\n self.store_event(\n data={\"event_id\": \"a\" * 32, \"timestamp\": self.ten_mins_ago, \"fingerprint\": [\"group1\"]},\n project_id=self.project.id,\n )\n\n query = {\"field\": [\"id\", \"project.id\"], \"project\": [self.project.id]}\n response = self.do_request(query)\n assert response.status_code == 200\n assert len(response.data[\"data\"]) == 1\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 94, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 9, "token_counts": 87, "n_ast_nodes": 149, "n_identifiers": 13, "random_cut": "def test_performance_view_feature(self):\n self.store_event(\n data={\"event_id\": \"a\" * 32, \"timestamp\": self.ten_mins_ago, \"fingerprint\": [\"group1\"]},\n project_id=self.project.id,\n )\n\n query = {\"field\": [\"id\", \"project.id\"], \"project\": [self.project.id]}\n response = self.do_request(query)\n assert response.status_code == 200\n assert len(response." }, { "id": 25221, "commit_id": "1f9400dd7374ce9cc47981372e324ff412e53ba3", "repo": "PaddleOCR", "path": "ppocr/postprocess/drrg_postprocess.py", "file_name": "drrg_postprocess.py", "fun_name": "__call__", "commit_message": "add drrg", "code": "def __call__(self, preds, shape_list):\n \n edges, scores, text_comps = preds\n if edges is not None:\n if isinstance(edges, paddle.Tensor):\n edges = edges.numpy()\n if isinstance(scores, paddle.Tensor):\n scores = scores.numpy()\n if isinstance(text_comps, paddle.Tensor):\n text_comps = text_comps.numpy()\n assert len(edges) == len(scores)\n assert text_comps.ndim == 2\n assert text_comps.shape[1] == 9\n\n vertices, score_dict = graph_propagation(edges, scores, text_comps)\n clusters = connected_components(vertices, score_dict, self.link_thr)\n pred_labels = clusters2labels(clusters, text_comps.shape[0])\n text_comps, pred_labels = remove_single(text_comps, pred_labels)\n boundaries = comps2boundaries(text_comps, pred_labels)\n else:\n boundaries = []\n\n boundaries, scores = self.resize_boundary(\n boundaries, (1 / shape_list[0, 2:]).tolist()[::-1])\n boxes_batch = [dict(points=boundaries, scores=scores)]\n return boxes_batch\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 324, "n_words": 87, "vocab_size": 58, "complexity": 5, "nloc": 23, "token_counts": 207, "n_ast_nodes": 318, "n_identifiers": 30, "random_cut": "def __call__(self, preds, shape_list):\n \n edges, scores, text_comps = preds\n if edges is not None:\n if isinstance(edges, paddle.Tensor):\n edges = edges.numpy()\n if isinstance(scores, paddle.Tensor):\n scores = scores.numpy()\n if isinstance(text_comps, paddle.Tensor):\n text_comps = text_comps.numpy()\n assert len(edges) == len(scores)\n assert text_comps.ndim == 2\n assert text_comps.shape[1] == 9\n\n vertices, score_dict = graph_propagation(edges, scores, text_comps)\n clusters = connected_components(vertices, score_" }, { "id": 120172, "commit_id": "4806c29bf784c22ea10b7c87b1d03f2f42c662d4", "repo": "jax", "path": "jax/_src/lax/fft.py", "file_name": "fft.py", "fun_name": "_fft_batching_rule", "commit_message": "[MHLO] Add MHLO lowerings for FFT ops.\n\nPiperOrigin-RevId: 441768017", "code": "def _fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n return fft(x, fft_type, fft_lengths), 0\n\nfft_p = Primitive('fft')\nfft_p.def_impl(_fft_impl)\nfft_p.def_abstract_eval(fft_abstract_eval)\nxla.register_translation(fft_p, _fft_translation_rule)\nmlir.register_lowering(fft_p, _fft_lowering)\nad.deflinear2(fft_p, _fft_transpose_rule)\nbatching.primitive_batchers[fft_p] = _fft_batching_rule\nif pocketfft:\n xla.register_translation(fft_p, _fft_translation_rule_cpu, platform='cpu')\n if jax._src.lib.version >= (0, 3, 6):\n mlir.register_lowering(fft_p, _fft_lowering_cpu, platform='cpu')\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 49, "vocab_size": 39, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 208, "n_identifiers": 34, "random_cut": "def _fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths):\n x, = batched_args\n bd, = batch_dims\n x = batching.moveaxis(x, bd, 0)\n return fft(x, fft_type, fft_lengths), 0\n\nfft_p = Primitive('fft')\nfft_p.def_impl(_fft_impl)\nfft_p.def_abstract_eval(fft_abstract_eval)\nxla.register_translation(fft_p, _fft_translation_rule)\nmlir.register_lowering(fft_p, _fft_lowering)\nad.deflinear2(fft_p, _fft_transpose_rule)\nbatching.primitive_batchers[fft_p] = _fft_batching_rule\nif pocketfft:\n xla.register_translation(fft_p, _fft_translation_rule_cpu, platform='cpu')\n if j" }, { "id": 246712, "commit_id": "02d708568b476f2f7716000b35c0adfa4cbd31b3", "repo": "synapse", "path": "tests/events/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_event_fields_works_with_nested_keys", "commit_message": "Replace assertEquals and friends with non-deprecated versions. (#12092)", "code": "def test_event_fields_works_with_nested_keys(self):\n self.assertEqual(\n self.serialize(\n MockEvent(\n sender=\"@alice:localhost\",\n room_id=\"!foo:bar\",\n content={\"body\": \"A message\"},\n ),\n [\"content.body\"],\n ),\n {\"content\": {\"body\": \"A message\"}},\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 166, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 12, "token_counts": 50, "n_ast_nodes": 89, "n_identifiers": 8, "random_cut": "def test_event_fields_works_with_nested_keys(self):\n self.assertEqual(\n " }, { "id": 258390, "commit_id": "756e0114e661767e3c59bb24cbd66c41ad4a5903", "repo": "haystack", "path": "test/nodes/test_table_reader.py", "file_name": "test_table_reader.py", "fun_name": "test_table_reader_batch_single_query_single_doc_list", "commit_message": "refactor: Remove duplicate code in TableReader (#3708)\n\n* Refactor table reader to use util functions to reduce code duplication.\r\n\r\n* Expanding the tests for the table reader\r\n\r\n* Adding types\r\n\r\n* Updating tests to work for RCIReader\r\n\r\n* Fix bug in RCIReader. Saving the wrong queries list.\r\n\r\n* Update _flatten_inputs to not change input variable\r\n\r\n* Remove duplicate code", "code": "def test_table_reader_batch_single_query_single_doc_list(table_reader_and_param, table1, table2):\n table_reader, param = table_reader_and_param\n query = \"When was Di Caprio born?\"\n prediction = table_reader.predict_batch(\n queries=[query],\n documents=[Document(content=table1, content_type=\"table\"), Document(content=table2, content_type=\"table\")],\n )\n # Expected output: List of lists of answers\n assert isinstance(prediction[\"answers\"], list)\n assert isinstance(prediction[\"answers\"][0], list)\n assert isinstance(prediction[\"answers\"][0][0], Answer)\n assert prediction[\"queries\"] == [\"When was Di Caprio born?\", \"When was Di Caprio born?\"]\n\n # Check number of answers for each document\n num_ans_reference = {\n \"tapas_small\": {\"num_answers\": [1, 1]},\n \"rci\": {\"num_answers\": [10, 10]},\n \"tapas_scored\": {\"num_answers\": [3, 3]},\n }\n assert len(prediction[\"answers\"]) == 2\n for i, ans_list in enumerate(prediction[\"answers\"]):\n assert len(ans_list) == num_ans_reference[param][\"num_answers\"][i]\n\n # Check first answer from the 1ST document\n score_reference = {\"tapas_small\": {\"score\": 1.0}, \"rci\": {\"score\": -6.5301}, \"tapas_scored\": {\"score\": 0.50568}}\n assert prediction[\"answers\"][0][0].score == pytest.approx(score_reference[param][\"score\"], rel=1e-3)\n assert prediction[\"answers\"][0][0].answer == \"11 november 1974\"\n assert prediction[\"answers\"][0][0].offsets_in_context[0].start == 7\n assert prediction[\"answers\"][0][0].offsets_in_context[0].end == 8\n\n # Check first answer from the 2ND Document\n ans_reference = {\n \"tapas_small\": {\"answer\": \"5 april 1980\", \"start\": 7, \"end\": 8, \"score\": 0.86314},\n \"rci\": {\"answer\": \"15 september 1960\", \"start\": 11, \"end\": 12, \"score\": -7.9429},\n \"tapas_scored\": {\"answer\": \"5\", \"start\": 10, \"end\": 11, \"score\": 0.11485},\n }\n assert prediction[\"answers\"][1][0].score == pytest.approx(ans_reference[param][\"score\"], rel=1e-3)\n assert prediction[\"answers\"][1][0].answer == ans_reference[param][\"answer\"]\n assert prediction[\"answers\"][1][0].offsets_in_context[0].start == ans_reference[param][\"start\"]\n assert prediction[\"answers\"][1][0].offsets_in_context[0].end == ans_reference[param][\"end\"]\n\n\n@pytest.mark.parametrize(\"table_reader_and_param\", [\"tapas_small\", \"rci\", \"tapas_scored\"], indirect=True)", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"table_reader_and_param\", [\"tapas_small\", \"rci\", \"tapas_scored\"], indirect=True)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 336, "n_words": 194, "vocab_size": 123, "complexity": 2, "nloc": 33, "token_counts": 482, "n_ast_nodes": 806, "n_identifiers": 35, "random_cut": "def test_table_reader_batch_single_query_single_doc_list(table_reader_and_param, table1, table2):\n table_reader, param = table_reader_and_param\n query = \"When was Di Caprio born?\"\n prediction = table_reader.predict_batch(\n queries=[query],\n documents=[Document(content=table1, content_type=\"table\"), Document(content=table2, content_type=\"table\")],\n )\n # Expected output: List of lists of answers\n assert isinstance(prediction[\"answers\"], list)\n assert isinstance(prediction[\"answers\"][0], list)\n assert isinstance(prediction[\"answers\"][0][0], Answer)\n assert prediction[\"queries\"] == [\"When was Di Caprio born?\", \"When was Di Caprio born?\"]\n\n # Check number of answers for each document\n num_ans_reference = {\n \"tapas_small\": {\"num_answers\": [1, 1]},\n \"rci\": {\"num_answers\": [10, 10]},\n \"tapas_scored\": {\"num_answers\": [3, 3]},\n }\n assert len(prediction[\"answers\"]) == 2\n for i, ans_list in enumerate(prediction[\"answers\"]):\n assert len(ans_list) == num_ans_reference[param][\"num_answers\"][i]\n\n # Check first answer from the 1ST document\n score_reference = {\"tapas_small\": {\"score\": 1.0}, \"rci\": {\"score\": -6.5301}, \"tapas_scored\": {\"score\": 0.50568}}\n assert prediction[\"answers\"][0][0].score == pytest.approx(score_reference[param][\"score\"], rel=1e-3)\n assert prediction[\"answers\"][0][0].answer == \"11 november 1974\"\n assert prediction[\"answers\"][0][0].offsets_in_context[0].start == 7\n assert prediction[\"answers\"][0][0].offsets_in_context[0].end == 8\n\n # Check first answer from the 2ND Document\n ans_reference = {\n \"tapas_small\"" }, { "id": 31776, "commit_id": "6c8f4c9a938a09749ea1b19a5fa2a8dd27e99a29", "repo": "transformers", "path": "tests/models/groupvit/test_modeling_groupvit.py", "file_name": "test_modeling_groupvit.py", "fun_name": "create_and_check_model", "commit_message": "Adding GroupViT Models (#17313)\n\n* add group vit and fixed test (except slow)\r\n\r\n* passing slow test\r\n\r\n* addressed some comments\r\n\r\n* fixed test\r\n\r\n* fixed style\r\n\r\n* fixed copy\r\n\r\n* fixed segmentation output\r\n\r\n* fixed test\r\n\r\n* fixed relative path\r\n\r\n* fixed copy\r\n\r\n* add ignore non auto configured\r\n\r\n* fixed docstring, add doc\r\n\r\n* fixed copies\r\n\r\n* Apply suggestions from code review\r\n\r\nmerge suggestions\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* resolve comment, renaming model\r\n\r\n* delete unused attr\r\n\r\n* use fix copies\r\n\r\n* resolve comments\r\n\r\n* fixed attn\r\n\r\n* remove unused vars\r\n\r\n* refactor tests\r\n\r\n* resolve final comments\r\n\r\n* add demo notebook\r\n\r\n* fixed inconsitent default\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* rename stage->stages\r\n\r\n* Create single GroupViTEncoderLayer class\r\n\r\n* Update conversion script\r\n\r\n* Simplify conversion script\r\n\r\n* Remove cross-attention class in favor of GroupViTAttention\r\n\r\n* Convert other model as well, add processor to conversion script\r\n\r\n* addressing final comment\r\n\r\n* fixed args\r\n\r\n* Update src/transformers/models/groupvit/modeling_groupvit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Niels Rogge ", "code": "def create_and_check_model(self, config, pixel_values):\n model = GroupViTVisionModel(config=config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n result = model(pixel_values)\n self.parent.assertEqual(\n result.last_hidden_state.shape, (self.batch_size, self.num_output_groups[-1], self.hidden_size)\n )\n self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 93, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 10, "token_counts": 93, "n_ast_nodes": 144, "n_identifiers": 20, "random_cut": "def create_and_check_model(self, config, pixel_values):\n model = GroupViTVisionModel(config=config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n resul" }, { "id": 290682, "commit_id": "ff1ec7a028f747de1f96521eb3df6f98d7426434", "repo": "core", "path": "tests/components/fibaro/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "test_normalize_url_add_api", "commit_message": "Normalize url entered in fibaro integration setup dialog (#81996)\n\n* Normalize url entered in fibaro integration setup dialog\r\n\r\n* Improvements as suggested in code review\r\n\r\n* Fix spelling in comments", "code": "async def test_normalize_url_add_api():\n \n assert _normalize_url(\"http://192.168.1.1/\") == \"http://192.168.1.1/api/\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 28, "n_identifiers": 2, "random_cut": "async def test_normalize_url_add_api():\n \n assert _normalize_url(\"http://192.168.1.1/\") == \"http://192.168.1.1/api/\"\n\n" }, { "id": 30377, "commit_id": "7595c08e122c43aa55eea9be9fabd2de2c1d7b9a", "repo": "spotify-downloader", "path": "tests/utils/test_ffmpeg.py", "file_name": "test_ffmpeg.py", "fun_name": "test_get_local_ffmpeg", "commit_message": "properly patch is file func", "code": "def test_get_local_ffmpeg(monkeypatch):\n \n\n monkeypatch.setattr(pathlib.Path, \"isfile\", lambda *_: True)\n\n platform_str = platform.system()\n\n local_ffmpeg = get_local_ffmpeg()\n\n assert local_ffmpeg is not None\n\n if platform_str == \"Linux\":\n assert str(local_ffmpeg).endswith(\"ffmpeg\")\n elif platform_str == \"Darwin\":\n assert str(local_ffmpeg).endswith(\"ffmpeg\")\n elif platform_str == \"Windows\":\n assert str(local_ffmpeg).endswith(\"ffmpeg.exe\")\n\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 81, "n_words": 36, "vocab_size": 24, "complexity": 4, "nloc": 11, "token_counts": 84, "n_ast_nodes": 151, "n_identifiers": 13, "random_cut": "def test_get_local_ffmpeg(monkeypatch):\n \n\n monkeypatch.setattr(pathlib.Path, \"isfile\", lambda *_: True)\n\n platform_str = platform.system()\n\n local_ffmpeg = " }, { "id": 191896, "commit_id": "e65a857b5487a8493bc8a80a95d64d9f049de347", "repo": "vision", "path": "references/classification/sampler.py", "file_name": "sampler.py", "fun_name": "__iter__", "commit_message": "only set random generator if shuffle=true (#5135)\n\nCo-authored-by: Vasilis Vryniotis ", "code": "def __iter__(self):\n if self.shuffle:\n # Deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.seed + self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = list(range(len(self.dataset)))\n\n # Add extra samples to make it evenly divisible\n indices = [ele for ele in indices for i in range(self.repetitions)]\n indices += indices[: (self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # Subsample\n indices = indices[self.rank : self.total_size : self.num_replicas]\n assert len(indices) == self.num_samples\n\n return iter(indices[: self.num_selected_samples])\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 194, "n_words": 70, "vocab_size": 52, "complexity": 4, "nloc": 13, "token_counts": 145, "n_ast_nodes": 230, "n_identifiers": 26, "random_cut": "def __iter__(self):\n if self.shuffle:\n # Deterministically sh" }, { "id": 12267, "commit_id": "c1f0ae51ed4ef76ff9aaa976d234670a296eac07", "repo": "jina", "path": "jina/helper.py", "file_name": "helper.py", "fun_name": "_update_policy", "commit_message": "fix: close loop from run_async (#4734)\n\n* fix: close loop from run_async\r\n\r\n* fix: check if the loop was just created\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: context manage loop\r\n\r\n* fix: style\r\n\r\n* fix(loop): don't create new loops\r\n\r\n* fix: create loop if not existing\r\n\r\n* fix: close asyncio loop after fork\r\n\r\n* fix: better loop closing\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro ", "code": "def _update_policy():\n if __windows__:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\n elif 'JINA_DISABLE_UVLOOP' in os.environ:\n return\n else:\n try:\n import uvloop\n\n if not isinstance(asyncio.get_event_loop_policy(), uvloop.EventLoopPolicy):\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n except ModuleNotFoundError:\n warnings.warn(\n 'Install `uvloop` via `pip install \"jina[uvloop]\"` for better performance.'\n )\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 142, "n_words": 32, "vocab_size": 31, "complexity": 5, "nloc": 14, "token_counts": 65, "n_ast_nodes": 115, "n_identifiers": 14, "random_cut": "def _update_policy():\n if __windows__:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\n elif 'JINA_DISABLE_UVLOOP' in os.environ:\n return\n else:\n try:\n import uvloop\n\n if not isinstance(asyncio.get_event_loop_policy(" }, { "id": 102898, "commit_id": "74e70d2548255b26983c34e81bf3c7f85caa778b", "repo": "kitty", "path": "bypy/macos/__main__.py", "file_name": "__main__.py", "fun_name": "install_dylib", "commit_message": "Refactor: More f-string for bypy scripts", "code": "def install_dylib(self, path, set_id=True):\n shutil.copy2(path, self.frameworks_dir)\n if set_id:\n self.set_id(\n join(self.frameworks_dir, basename(path)),\n f'{self.FID}/{basename(path)}')\n self.fix_dependencies_in_lib(join(self.frameworks_dir, basename(path)))\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 75, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 7, "token_counts": 59, "n_ast_nodes": 106, "n_identifiers": 11, "random_cut": "def install_dylib(self, path, set_id=True):\n shutil.copy2(path, self.frameworks_dir)\n if set_id:\n self.set_id(\n join(self.frameworks_dir, basename(path)),\n f'{self.FID}/{basename(path)}')\n self.fix_dependencies_in_lib(join(self.frameworks_dir, basename(path)))\n" }, { "id": 53233, "commit_id": "b937ae2f19021dabcb8b2548b3c204d6eb34a3e8", "repo": "prefect", "path": "src/prefect/orion/database/orm_models.py", "file_name": "orm_models.py", "fun_name": "versions_dir", "commit_message": "fix logging override bug", "code": "def versions_dir(self) -> Path:\n \n return (\n Path(prefect.orion.database.__file__).parent\n / \"migrations/versions/postgresql\"\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 53, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 25, "n_ast_nodes": 44, "n_identifiers": 8, "random_cut": "def versions_dir(self) -> Path:\n \n return (\n Path(prefect.orion.database.__file__).parent\n / \"migrations/version" }, { "id": 258952, "commit_id": "1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe", "repo": "scikit-learn", "path": "sklearn/tree/tests/test_tree.py", "file_name": "test_tree.py", "fun_name": "check_class_weights", "commit_message": "MNT Update black to stable version (#22474)", "code": "def check_class_weights(name):\n \n TreeClassifier = CLF_TREES[name]\n\n # Iris is balanced, so no effect expected for using 'balanced' weights\n clf1 = TreeClassifier(random_state=0)\n clf1.fit(iris.data, iris.target)\n clf2 = TreeClassifier(class_weight=\"balanced\", random_state=0)\n clf2.fit(iris.data, iris.target)\n assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)\n\n # Make a multi-output problem with three copies of Iris\n iris_multi = np.vstack((iris.target, iris.target, iris.target)).T\n # Create user-defined weights that should balance over the outputs\n clf3 = TreeClassifier(\n class_weight=[\n {0: 2.0, 1: 2.0, 2: 1.0},\n {0: 2.0, 1: 1.0, 2: 2.0},\n {0: 1.0, 1: 2.0, 2: 2.0},\n ],\n random_state=0,\n )\n clf3.fit(iris.data, iris_multi)\n assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)\n # Check against multi-output \"auto\" which should also have no effect\n clf4 = TreeClassifier(class_weight=\"balanced\", random_state=0)\n clf4.fit(iris.data, iris_multi)\n assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)\n\n # Inflate importance of class 1, check against user-defined weights\n sample_weight = np.ones(iris.target.shape)\n sample_weight[iris.target == 1] *= 100\n class_weight = {0: 1.0, 1: 100.0, 2: 1.0}\n clf1 = TreeClassifier(random_state=0)\n clf1.fit(iris.data, iris.target, sample_weight)\n clf2 = TreeClassifier(class_weight=class_weight, random_state=0)\n clf2.fit(iris.data, iris.target)\n assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)\n\n # Check that sample_weight and class_weight are multiplicative\n clf1 = TreeClassifier(random_state=0)\n clf1.fit(iris.data, iris.target, sample_weight**2)\n clf2 = TreeClassifier(class_weight=class_weight, random_state=0)\n clf2.fit(iris.data, iris.target, sample_weight)\n assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)\n\n\n@pytest.mark.parametrize(\"name\", CLF_TREES)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"name\", CLF_TREES)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 323, "n_words": 168, "vocab_size": 97, "complexity": 1, "nloc": 34, "token_counts": 374, "n_ast_nodes": 546, "n_identifiers": 26, "random_cut": "def check_class_weights(name):\n \n TreeClassifier = CLF_TREES[name]\n\n # Iris is balanced, so no effect expected for using 'balanced' weights\n clf1 = TreeClassifier(random_state=0)\n clf1.fit(iris.data, iris.target)\n clf2 = TreeClassifier(class_weight=\"balanced\", random_state=0)\n clf2.fit(iris.data, iris.target)\n assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)\n\n # Make a multi-output problem with three copies of Iris\n iris_multi = np.vstack((iris.target, iris.target, iris.target)).T\n # Create user-defined weights that should balance over the outputs\n clf3 = TreeClassifier(\n class_weight=[\n {0: 2.0, 1: 2.0, 2: 1.0},\n {0: 2.0, 1: 1.0, 2: 2.0},\n {0: 1.0, 1: 2.0, 2: 2.0},\n ],\n random_state=0,\n )\n clf3.fit(iris.data, iris_multi)\n assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)\n # Check against multi-output \"auto\" which should " }, { "id": 152833, "commit_id": "1f92336be768d235c18a82acb2195b7135101ae7", "repo": "stable-diffusion-webui", "path": "modules/deepbooru.py", "file_name": "deepbooru.py", "fun_name": "release_process", "commit_message": "refactored the deepbooru module to improve speed on running multiple interogations in a row. Added the option to generate deepbooru tags for textual inversion preproccessing.", "code": "def release_process():\n \n from modules import shared # prevents circular reference\n shared.deepbooru_process_queue.put(\"QUIT\")\n shared.deepbooru_process.join()\n shared.deepbooru_process_queue = None\n shared.deepbooru_process = None\n shared.deepbooru_process_return = None\n shared.deepbooru_process_manager = None\n", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 44, "n_ast_nodes": 79, "n_identifiers": 9, "random_cut": "def release_process():\n \n " }, { "id": 168545, "commit_id": "252ae0555abf488522f947107dcdee684be6ac8a", "repo": "pandas", "path": "pandas/tests/frame/constructors/test_from_records.py", "file_name": "test_from_records.py", "fun_name": "test_from_records_series_categorical_index", "commit_message": "Revert Interval/IntervalIndex/interval_range.inclusive deprecation (#48116)\n\n* Revert \"Cln tests interval wrt inclusive (#47775)\"\r\n\r\nThis reverts commit 2d6e0b251955d3a2c0c88f7e6ddb57b335ed09b7.\r\n\r\n* Revert \"CLN: Rename private variables to inclusive (#47655)\"\r\n\r\nThis reverts commit 102b3ca2119df822e2b0f346fa936d0fe9f17501.\r\n\r\n* Revert \"TYP: Improve typing interval inclusive (#47646)\"\r\n\r\nThis reverts commit 55064763e8ba55f6ff5370a8dd083767a189d7a4.\r\n\r\n* Revert \"DEPR: Deprecate set_closed and add set_incluive (#47636)\"\r\n\r\nThis reverts commit bd4ff395cbbf4cbde1fc8f1f746cae064a401638.\r\n\r\n* Revert \"DEPR: Remove deprecation from private class IntervalTree (#47637)\"\r\n\r\nThis reverts commit f6658ef9fdef5972214fdc338e2c6b5ee308dbf4.\r\n\r\n* Revert \"Revert inclusive default change of IntervalDtype (#47367)\"\r\n\r\nThis reverts commit d9dd1289e07d86928d144e53beb3d5b8ab3c2215.\r\n\r\n* Revert \"ENH: consistency of input args for boundaries - Interval (#46522)\"\r\n\r\nThis reverts commit 7e23a37e1c5bda81234801a6584563e2880769eb.\r\n\r\n* Revert \"ENH: consistency of input args for boundaries - pd.interval_range (#46355)\"\r\n\r\nThis reverts commit 073b3535d7a5171102e5915c38b57c21d13795ae.\r\n\r\n* Fix ArrowIntervalType manually\r\n\r\n* Remove unused import\r\n\r\n* Fix doctest and leftover usage\r\n\r\n* Fix remaining tests\r\n\r\n* Fix wording in doctoring\r\n\r\nCo-authored-by: Patrick Hoefler <61934744+phofl@users.noreply.github.com>", "code": "def test_from_records_series_categorical_index(self):\n # GH#32805\n index = CategoricalIndex(\n [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]\n )\n series_of_dicts = Series([{\"a\": 1}, {\"a\": 2}, {\"b\": 3}], index=index)\n frame = DataFrame.from_records(series_of_dicts, index=index)\n expected = DataFrame(\n {\"a\": [1, 2, np.NaN], \"b\": [np.NaN, np.NaN, 3]}, index=index\n )\n tm.assert_frame_equal(frame, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 119, "n_words": 42, "vocab_size": 36, "complexity": 1, "nloc": 10, "token_counts": 119, "n_ast_nodes": 183, "n_identifiers": 15, "random_cut": "def test_from_records_series_categorical_index(self):\n # GH#32805\n index = CategoricalIndex(\n [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]\n )\n series_of_dicts = Series([{\"a\": 1}, {\"a\": 2}, {\"b\": 3}], index=index)\n frame = DataFrame.from_records(series_of_dicts, index=index)\n expe" }, { "id": 80599, "commit_id": "604cbc17376620dc67df35386421835d43732a4e", "repo": "awx", "path": "awx/main/models/ha.py", "file_name": "ha.py", "fun_name": "consumed_capacity", "commit_message": "Consume control capacity (#11665)\n\n* Select control node before start task\r\n\r\nConsume capacity on control nodes for controlling tasks and consider\r\nremainging capacity on control nodes before selecting them.\r\n\r\nThis depends on the requirement that control and hybrid nodes should all\r\nbe in the instance group named 'controlplane'. Many tests do not satisfy that\r\nrequirement. I'll update the tests in another commit.\r\n\r\n* update tests to use controlplane\r\n\r\nWe don't start any tasks if we don't have a controlplane instance group\r\n\r\nDue to updates to fixtures, update tests to set node type and capacity\r\nexplicitly so they get expected result.\r\n\r\n* Fixes for accounting of control capacity consumed\r\n\r\nUpdate method is used to account for currently consumed capacity for\r\ninstance groups in the in-memory capacity tracking data structure we initialize in\r\nafter_lock_init and then update via calculate_capacity_consumed (both in\r\ntask_manager.py)\r\n\r\nAlso update fit_task_to_instance to consider control impact on instances\r\n\r\nTrust that these functions do the right thing looking for a\r\nnode with capacity, and cut out redundant check for the whole group's\r\ncapacity per Alan's reccomendation.\r\n\r\n* Refactor now redundant code\r\n\r\nDeal with control type tasks before we loop over the preferred instance\r\ngroups, which cuts out the need for some redundant logic.\r\n\r\nAlso, fix a bug where I was missing assigning the execution node in one case!\r\n\r\n* set job explanation on tasks that need capacity\r\n\r\nmove the job explanation for jobs that need capacity to a function\r\nso we can re-use it in the three places we need it.\r\n\r\n* project updates always run on the controlplane\r\n\r\nInstance group ordering makes no sense on project updates because they\r\nalways need to run on the control plane.\r\n\r\nAlso, since hybrid nodes should always run the control processes for the\r\njobs running on them as execution nodes, account for this when looking for a\r\nexecution node.\r\n\r\n* fix misleading message\r\n\r\nthe variables and wording were both misleading, fix to be more accurate\r\ndescription in the two different cases where this log may be emitted.\r\n\r\n* use settings correctly\r\n\r\nuse settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME instead of a hardcoded\r\nname\r\ncache the controlplane_ig object during the after lock init to avoid\r\nan uneccesary query\r\neliminate mistakenly duplicated AWX_CONTROL_PLANE_TASK_IMPACT and use\r\nonly AWX_CONTROL_NODE_TASK_IMPACT\r\n\r\n* add test for control capacity consumption\r\n\r\nadd test to verify that when there are 2 jobs and only capacity for one\r\nthat one will move into waiting and the other stays in pending\r\n\r\n* add test for hybrid node capacity consumption\r\n\r\nassert that the hybrid node is used for both control and execution and\r\ncapacity is deducted correctly\r\n\r\n* add test for task.capacity_type = control\r\n\r\nTest that control type tasks have the right capacity consumed and\r\nget assigned to the right instance group\r\n\r\nAlso fix lint in the tests\r\n\r\n* jobs_running not accurate for control nodes\r\n\r\nWe can either NOT use \"idle instances\" for control nodes, or we need\r\nto update the jobs_running property on the Instance model to count\r\njobs where the node is the controller_node.\r\n\r\nI didn't do that because it may be an expensive query, and it would be\r\nhard to make it match with jobs_running on the InstanceGroup which\r\nfilters on tasks assigned to the instance group.\r\n\r\nThis change chooses to stop considering \"idle\" control nodes an option,\r\nsince we can't acurrately identify them.\r\n\r\nThe way things are without any change, is we are continuing to over consume capacity on control nodes\r\nbecause this method sees all control nodes as \"idle\" at the beginning\r\nof the task manager run, and then only counts jobs started in that run\r\nin the in-memory tracking. So jobs which last over a number of task\r\nmanager runs build up consuming capacity, which is accurately reported\r\nvia Instance.consumed_capacity\r\n\r\n* Reduce default task impact for control nodes\r\n\r\nThis is something we can experiment with as far as what users\r\nwant at install time, but start with just 1 for now.\r\n\r\n* update capacity docs\r\n\r\nDescribe usage of the new setting and the concept of control impact.\r\n\r\nCo-authored-by: Alan Rominger \r\nCo-authored-by: Rebeccah ", "code": "def consumed_capacity(self):\n capacity_consumed = 0\n if self.node_type in ('hybrid', 'execution'):\n capacity_consumed += sum(x.task_impact for x in UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting')))\n if self.node_type in ('hybrid', 'control'):\n capacity_consumed += sum(\n settings.AWX_CONTROL_NODE_TASK_IMPACT for x in UnifiedJob.objects.filter(controller_node=self.hostname, status__in=('running', 'waiting'))\n )\n return capacity_consumed\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 112, "n_words": 37, "vocab_size": 24, "complexity": 5, "nloc": 9, "token_counts": 94, "n_ast_nodes": 155, "n_identifiers": 16, "random_cut": "def consumed_capacity(self):\n capacity_consumed = 0\n if self.node_type in ('hybrid', 'execution'):\n " }, { "id": 64116, "commit_id": "c5782b0e7107ae9ccabc923d480d49d602bafb39", "repo": "erpnext", "path": "erpnext/regional/report/e_invoice_summary/e_invoice_summary.py", "file_name": "e_invoice_summary.py", "fun_name": "execute", "commit_message": "revert: \"refactor!: drop e-invoicing integration from erpnext (#26940)\"\n\nThis reverts commit c335962827e4927f7ada084e9ba4ab2db15e3eb6.", "code": "def execute(filters=None):\n\tvalidate_filters(filters)\n\n\tcolumns = get_columns()\n\tdata = get_data(filters)\n\n\treturn columns, data\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 7, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 7, "random_cut": "def execute(filters=None):\n\tvalidate_filters(filters)\n\n\tcolumns = get_columns()\n\tdata = get_data(filters)\n\n\t" }, { "id": 334400, "commit_id": "b76eea041234a75ebb5451a7c2aba3eb7f844f9f", "repo": "diffusers", "path": "tests/test_modeling_utils.py", "file_name": "test_modeling_utils.py", "fun_name": "test_sample", "commit_message": "check with other device", "code": "def test_sample(self):\n generator = torch.manual_seed(0)\n\n # 1. Load models\n scheduler = GaussianDDPMScheduler.from_config(\"fusing/ddpm-lsun-church\")\n model = UNetModel.from_pretrained(\"fusing/ddpm-lsun-church\").to(torch_device)\n\n # 2. Sample gaussian noise\n image = scheduler.sample_noise((1, model.in_channels, model.resolution, model.resolution), device=torch_device, generator=generator)\n\n # 3. Denoise\n for t in reversed(range(len(scheduler))):\n # i) define coefficients for time step t\n clipped_image_coeff = 1 / torch.sqrt(scheduler.get_alpha_prod(t))\n clipped_noise_coeff = torch.sqrt(1 / scheduler.get_alpha_prod(t) - 1)\n image_coeff = (1 - scheduler.get_alpha_prod(t - 1)) * torch.sqrt(scheduler.get_alpha(t)) / (1 - scheduler.get_alpha_prod(t))\n clipped_coeff = torch.sqrt(scheduler.get_alpha_prod(t - 1)) * scheduler.get_beta(t) / (1 - scheduler.get_alpha_prod(t))\n\n # ii) predict noise residual\n with torch.no_grad():\n noise_residual = model(image, t)\n\n # iii) compute predicted image from residual\n # See 2nd formula at https://github.com/hojonathanho/diffusion/issues/5#issue-896554416 for comparison\n pred_mean = clipped_image_coeff * image - clipped_noise_coeff * noise_residual\n pred_mean = torch.clamp(pred_mean, -1, 1)\n prev_image = clipped_coeff * pred_mean + image_coeff * image\n\n # iv) sample variance\n prev_variance = scheduler.sample_variance(t, prev_image.shape, device=torch_device, generator=generator)\n\n # v) sample x_{t-1} ~ N(prev_image, prev_variance)\n sampled_prev_image = prev_image + prev_variance\n image = sampled_prev_image\n\n # Note: The better test is to simply check with the following lines of code that the image is sensible\n # import PIL\n # import numpy as np\n # image_processed = image.cpu().permute(0, 2, 3, 1)\n # image_processed = (image_processed + 1.0) * 127.5\n # image_processed = image_processed.numpy().astype(np.uint8)\n # image_pil = PIL.Image.fromarray(image_processed[0])\n # image_pil.save(\"test.png\")\n\n assert image.shape == (1, 3, 256, 256)\n image_slice = image[0, -1, -3:, -3:].cpu()\n import ipdb; ipdb.set_trace()\n assert (image_slice - torch.tensor([[-0.0598, -0.0611, -0.0506], [-0.0726, 0.0220, 0.0103], [-0.0723, -0.1310, -0.2458]])).abs().sum() < 1e-3\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 578, "n_words": 236, "vocab_size": 144, "complexity": 2, "nloc": 22, "token_counts": 369, "n_ast_nodes": 555, "n_identifiers": 46, "random_cut": "def test_sample(self):\n generator = torch.manual_seed(0)\n\n # 1. Load models\n scheduler = GaussianDDPMScheduler.from_config(\"fusing/ddpm-lsun-church\")\n model = UNetModel.from_pretrained(\"fusing/ddpm-lsun-church\").to(torch_device)\n\n # 2. Sample gaussian noise\n image = scheduler.sample_noise((1, model.in_channels, model.resolution, model.resolution), device=torch_device, generator=generator)\n\n # 3. Denoise\n for t in reversed(range(len(scheduler))):\n # i) define coefficients for time step t\n clipped_image_coeff = 1 / torch.sqrt(scheduler.get_alpha_prod(t))\n clipped_noise_coeff = torch.sqrt(1 / scheduler.get_alpha_prod(t) - 1)\n image_coeff = (1 - scheduler.get_alpha_prod(t - 1)) * torch.sqrt(scheduler.get_alpha(t)) / (1 - scheduler.get_alpha_prod(t))\n clipped_coeff = torch.sqrt(scheduler.get_alpha_prod(t - 1)) * scheduler.get_beta(t) / (1 - scheduler.get_alpha_prod(t))\n\n # ii) predict noise residual\n with torch.no_grad():\n noise_residual = model(image, t)\n\n # iii) compute predicted image from residual\n # See 2nd formula at https://github.com/hojonathanho/diffusion/issues/5#issue-896554416 for comparison\n pred_mean = clipped_image_coeff * image - clipped_noise_coeff * noise_residual\n pred_mean = torch.clamp(pred_mean, -1, 1)\n prev_image = cl" }, { "id": 114976, "commit_id": "18134ada88727d64eb2b3f04f303e3b66e43a1ec", "repo": "mindsdb", "path": "mindsdb/api/mysql/mysql_proxy/executor/executor_commands.py", "file_name": "executor_commands.py", "fun_name": "answer_create_predictor", "commit_message": "del datastore", "code": "def answer_create_predictor(self, statement):\n integration_name = None\n struct = {\n 'predictor_name': statement.name.parts[-1],\n 'select': statement.query_str,\n 'predict': [x.parts[-1] for x in statement.targets]\n }\n\n if len(struct['predict']) > 1:\n raise SqlApiException(\"Only one field can be in 'PREDICT'\")\n if isinstance(statement.integration_name, Identifier):\n struct['integration_name'] = statement.integration_name.parts[-1]\n if statement.using is not None:\n struct['using'] = statement.using\n if statement.datasource_name is not None:\n struct['datasource_name'] = statement.datasource_name.parts[-1]\n if statement.order_by is not None:\n struct['order_by'] = [x.field.parts[-1] for x in statement.order_by]\n if len(struct['order_by']) > 1:\n raise SqlApiException(\"Only one field can be in 'OPRDER BY'\")\n if statement.group_by is not None:\n struct['group_by'] = [x.parts[-1] for x in statement.group_by]\n if statement.window is not None:\n struct['window'] = statement.window\n if statement.horizon is not None:\n struct['horizon'] = statement.horizon\n\n model_interface = self.session.model_interface\n\n models = model_interface.get_models()\n model_names = [x['name'] for x in models]\n if struct['predictor_name'] in model_names:\n raise SqlApiException(f\"Predictor with name '{struct['predictor_name']}' already exists. Each predictor must have unique name.\")\n\n predictor_name = struct['predictor_name']\n integration_name = struct.get('integration_name')\n\n if integration_name is not None:\n handler = self.session.integration_controller.get_handler(integration_name)\n # TODO\n # raise ErBadDbError(f\"Unknown datasource: {integration_name}\")\n result = handler.native_query(struct['select'])\n\n if result.get('type') != RESPONSE_TYPE.TABLE:\n raise Exception(f'Error during query: {result.get(\"error_message\")}')\n\n ds_data_df = result['data_frame']\n ds_column_names = list(ds_data_df.columns)\n\n predict = self._check_predict_columns(struct['predict'], ds_column_names)\n\n for i, p in enumerate(predict):\n predict[i] = get_column_in_case(ds_column_names, p)\n else:\n predict = struct['predict']\n\n timeseries_settings = {}\n for w in ['order_by', 'group_by', 'window', 'horizon']:\n if w in struct:\n timeseries_settings[w] = struct.get(w)\n\n kwargs = struct.get('using', {})\n if len(timeseries_settings) > 0:\n if 'timeseries_settings' not in kwargs:\n kwargs['timeseries_settings'] = timeseries_settings\n else:\n if isinstance(kwargs.get('timeseries_settings'), str):\n kwargs['timeseries_settings'] = json.loads(kwargs['timeseries_settings'])\n kwargs['timeseries_settings'].update(timeseries_settings)\n\n # Cast all column names to same case\n if isinstance(kwargs.get('timeseries_settings'), dict):\n order_by = kwargs['timeseries_settings'].get('order_by')\n if order_by is not None:\n for i, col in enumerate(order_by):\n new_name = get_column_in_case(ds_column_names, col)\n if new_name is None:\n raise Exception(\n f'Cant get appropriate cast column case. Columns: {ds_column_names}, column: {col}'\n )\n kwargs['timeseries_settings']['order_by'][i] = new_name\n group_by = kwargs['timeseries_settings'].get('group_by')\n if group_by is not None:\n for i, col in enumerate(group_by):\n new_name = get_column_in_case(ds_column_names, col)\n kwargs['timeseries_settings']['group_by'][i] = new_name\n if new_name is None:\n raise Exception(\n f'Cant get appropriate cast column case. Columns: {ds_column_names}, column: {col}'\n )\n\n model_interface.learn(predictor_name, ds_data_df, predict, kwargs=kwargs)\n\n return ExecuteAnswer(ANSWER_TYPE.OK)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1257, "n_words": 325, "vocab_size": 168, "complexity": 30, "nloc": 77, "token_counts": 642, "n_ast_nodes": 1101, "n_identifiers": 60, "random_cut": "def answer_create_predictor(self, statement):\n integration_name = None\n struct = {\n 'predictor_name': statement.name.parts[-1],\n 'select': statement.query_str,\n 'predict': [x.parts[-1] for x in statement.targets]\n }\n\n if len(struct['predict']) > 1:\n raise SqlApiException(\"Only one field can be in 'PREDICT'\")\n if isinstance(statement.integration_name, Identifier):\n struct['integration_name'] = statement.integration_name.parts[-1]\n if statement.using is not None:\n struct['using'] = statement.using\n if statement.datasource_name is not None:\n struct['datasource_name'] = statement.datasource_name.parts[-1]\n if statement.order_by is not None:\n struct['order_by'] = [x.field.parts[-1] for x in statement.order_by]\n if len(struct['order_by']) > 1:\n raise SqlApiException(\"Only one field can be in 'OPRDER BY'\")\n if statement.group_by is not None:\n struct['group_by'] = [x.parts[-1] for x in statement.group_by]\n if statement.window is not None:\n struct['window'] = statement.window\n if statement.horizon is not None:\n struct['horizon'] = statement.horizon\n\n m" }, { "id": 251781, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/data/addonscripts/configure.py", "file_name": "configure.py", "fun_name": "load", "commit_message": "make it black!", "code": "def load(self, loader):\n loader.add_option(\n name=\"optionaddon\",\n typespec=Optional[int],\n default=None,\n help=\"Option Addon\",\n )\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 67, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 7, "token_counts": 31, "n_ast_nodes": 48, "n_identifiers": 10, "random_cut": "def load(self, loader):\n loader.add_option(\n name=\"optionaddon\",\n typespec=Optional[int],\n default=None,\n help=\"Option Addon\",\n )\n" }, { "id": 12707, "commit_id": "c3849c6fee4a65a77a82b2cfda9670d727ff0f53", "repo": "jina", "path": "tests/unit/types/request/test_request.py", "file_name": "test_request.py", "fun_name": "test_status", "commit_message": "feat: allow to access parameters of data request wo loading data (#4991)", "code": "def test_status():\n r = DataRequest()\n r.docs.extend([Document()])\n r.add_exception(ValueError('intentional_error'))\n byte_array = DataRequestProto.SerializeToString(r)\n\n deserialized_request = DataRequestProto.FromString(byte_array)\n assert not deserialized_request.is_decompressed_with_data\n assert deserialized_request.status.code == jina_pb2.StatusProto.ERROR\n assert deserialized_request.is_decompressed_wo_data\n assert not deserialized_request.is_decompressed_with_data\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 25, "vocab_size": 18, "complexity": 1, "nloc": 10, "token_counts": 72, "n_ast_nodes": 120, "n_identifiers": 20, "random_cut": "def test_status():\n r = DataRequest()\n r.docs.extend([Document()])\n r.add_exception(ValueError('intentional_" }, { "id": 197297, "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", "repo": "sympy", "path": "sympy/combinatorics/fp_groups.py", "file_name": "fp_groups.py", "fun_name": "equals", "commit_message": "Remove abbreviations in documentation", "code": "def equals(self, word1, word2):\n \n if self.reduce(word1*word2**-1) == self.identity:\n return True\n elif self._rewriting_system.is_confluent:\n return False\n return None\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 16, "vocab_size": 14, "complexity": 3, "nloc": 6, "token_counts": 40, "n_ast_nodes": 64, "n_identifiers": 8, "random_cut": "def equals(self, word1, word2):\n \n if self.reduce(word1*word2**-1) == self.identity:\n re" }, { "id": 201378, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/auth_tests/test_management.py", "file_name": "test_management.py", "fun_name": "test_existing", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_existing(self):\n User.objects.create(username=\"joe\")\n management.get_system_username = lambda: \"joe\"\n self.assertEqual(management.get_default_username(), \"\")\n self.assertEqual(management.get_default_username(check_db=False), \"joe\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 38, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 49, "n_ast_nodes": 86, "n_identifiers": 11, "random_cut": "def test_existing(self):\n User.objects.create(username=\"joe\")\n management.get_system_username = lambda: \"joe\"\n self.assertEqual(management.get_default_username(), \"\")\n self.assertEqual(management.get_default" }, { "id": 205843, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/query.py", "file_name": "query.py", "fun_name": "has_select_fields", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def has_select_fields(self):\n return bool(\n self.select or self.annotation_select_mask or self.extra_select_mask\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 10, "vocab_size": 9, "complexity": 3, "nloc": 4, "token_counts": 20, "n_ast_nodes": 32, "n_identifiers": 6, "random_cut": "def has_select_fields(self):\n return bool(\n self.select or self.annotation_select_mask or self.extra_select_mask\n )\n" }, { "id": 25283, "commit_id": "d4a4c07c561421832f3207e41a0eba3460d431d7", "repo": "PaddleOCR", "path": "ppstructure/kie/predict_kie_token_ser_re.py", "file_name": "predict_kie_token_ser_re.py", "fun_name": "__call__", "commit_message": "add ser to ppstructure system", "code": "def __call__(self, img):\n starttime = time.time()\n ser_results, ser_inputs, ser_elapse = self.ser_engine(img)\n if self.predictor is None:\n return ser_results, ser_elapse\n\n re_input, entity_idx_dict_batch = make_input(ser_inputs, ser_results)\n if self.use_visual_backbone == False:\n re_input.pop(4)\n for idx in range(len(self.input_tensor)):\n self.input_tensor[idx].copy_from_cpu(re_input[idx])\n\n self.predictor.run()\n outputs = []\n for output_tensor in self.output_tensors:\n output = output_tensor.copy_to_cpu()\n outputs.append(output)\n preds = dict(\n loss=outputs[1],\n pred_relations=outputs[2],\n hidden_states=outputs[0], )\n\n post_result = self.postprocess_op(\n preds,\n ser_results=ser_results,\n entity_idx_dict_batch=entity_idx_dict_batch)\n\n elapse = time.time() - starttime\n return post_result, elapse\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 277, "n_words": 66, "vocab_size": 50, "complexity": 5, "nloc": 25, "token_counts": 173, "n_ast_nodes": 266, "n_identifiers": 35, "random_cut": "def __call__(self, img):\n starttime = time.time()\n ser_results, ser_inputs, ser_elapse = self.ser_engine(img)\n if self.predictor is None:\n return ser_results, ser_elapse\n\n re_input, entity_idx_dict_batch = make_input(ser_inputs, ser_results)\n if self.use_visual_backbone == False:\n " }, { "id": 105478, "commit_id": "b88a656cf94c4ad972154371c83c1af759fde522", "repo": "datasets", "path": "datasets/exams/exams.py", "file_name": "exams.py", "fun_name": "_generate_examples", "commit_message": "Fix bug and checksums in exams dataset (#4853)\n\n* Fix KeyError in exams dataset\r\n\r\n* Update metadata JSON\r\n\r\n* Fix dataset card", "code": "def _generate_examples(self, filepath, files=None):\n if self.config.name == \"alignments\":\n with open(filepath, encoding=\"utf-8\") as f:\n for id_, line in enumerate(f):\n line_dict = json.loads(line.strip())\n in_id, out_list = list(line_dict.items())[0]\n yield id_, {\"source_id\": in_id, \"target_id_list\": out_list}\n else:\n for path, f in files:\n if path == filepath:\n for id_, line in enumerate(f):\n line_dict = json.loads(line.strip())\n for choice in line_dict[\"question\"][\"choices\"]:\n choice[\"para\"] = choice.get(\"para\", \"\")\n yield id_, {\n \"id\": line_dict[\"id\"],\n \"question\": line_dict[\"question\"],\n \"answerKey\": line_dict[\"answerKey\"],\n \"info\": line_dict[\"info\"],\n }\n break\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 457, "n_words": 70, "vocab_size": 50, "complexity": 7, "nloc": 21, "token_counts": 175, "n_ast_nodes": 300, "n_identifiers": 23, "random_cut": "def _generate_examples(self, filepath, files=None):\n if self.config.name == \"alignments\":\n with open(filepath, encoding=\"utf-8\") as f:\n for id_, line in enumerate(f):\n line_dict = json.loads(line.strip())\n in_id, out_list = list(line_dict.items())[0]\n yield id_, {\"source_id\": in_id, \"target_id_list\": out_list}\n else:\n for path, f in files:\n if path == filepath:\n for id_, line in enumerate(f):\n line_dict = json.loads(line.strip())\n for choice in line_dict[\"question\"][\"choices\"]:\n choice[\"para\"] = choice.get(\"para\", \"\")\n yield id_, {\n \"id\": line_dict[\"id\"],\n " }, { "id": 17935, "commit_id": "a485e7e15458907349ee510684112af2430e39e4", "repo": "ccxt", "path": "python/ccxt/coinbasepro.py", "file_name": "coinbasepro.py", "fun_name": "fetch_markets", "commit_message": "1.72.67\n\n[ci skip]", "code": "def fetch_markets(self, params={}):\n response = self.publicGetProducts(params)\n #\n # [\n # {\n # \"id\":\"ZEC-BTC\",\n # \"base_currency\":\"ZEC\",\n # \"quote_currency\":\"BTC\",\n # \"base_min_size\":\"0.01000000\",\n # \"base_max_size\":\"1500.00000000\",\n # \"quote_increment\":\"0.00000100\",\n # \"base_increment\":\"0.00010000\",\n # \"display_name\":\"ZEC/BTC\",\n # \"min_market_funds\":\"0.001\",\n # \"max_market_funds\":\"30\",\n # \"margin_enabled\":false,\n # \"post_only\":false,\n # \"limit_only\":false,\n # \"cancel_only\":false,\n # \"trading_disabled\":false,\n # \"status\":\"online\",\n # \"status_message\":\"\"\n # }\n # ]\n #\n result = []\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'id')\n baseId = self.safe_string(market, 'base_currency')\n quoteId = self.safe_string(market, 'quote_currency')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n status = self.safe_string(market, 'status')\n result.append(self.extend(self.fees['trading'], {\n 'id': id,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': self.safe_value(market, 'margin_enabled'),\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': (status == 'online'),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': self.safe_number(market, 'base_increment'),\n 'price': self.safe_number(market, 'quote_increment'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.safe_number(market, 'base_min_size'),\n 'max': self.safe_number(market, 'base_max_size'),\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': self.safe_number(market, 'min_market_funds'),\n 'max': self.safe_number(market, 'max_market_funds'),\n },\n },\n 'info': market,\n }))\n return result\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1496, "n_words": 187, "vocab_size": 110, "complexity": 2, "nloc": 60, "token_counts": 341, "n_ast_nodes": 609, "n_identifiers": 23, "random_cut": "def fetch_markets(self, params={}):\n response = self.publicGetProducts(params)\n #\n # [\n # {\n # \"id\":\"ZEC-BTC\",\n # \"base_currency\":\"ZEC\",\n # \"quote_currency\":\"BTC\",\n # \"base_min_size\":\"0.01000000\",\n # " }, { "id": 106621, "commit_id": "b4115c0337b1bacc876bef1ece97e8fa8b3e2834", "repo": "visdom", "path": "example/components/plot_line.py", "file_name": "plot_line.py", "fun_name": "plot_line_pytorch", "commit_message": "test: split demo.py into seperate files and functions", "code": "def plot_line_pytorch(viz, env):\n try:\n import torch\n viz.line(Y=torch.Tensor([[0., 0.], [1., 1.]]))\n except ImportError:\n print('Skipped PyTorch example')\n\n# stemplot", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 42, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 6, "token_counts": 47, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "def plot_line_pytorch(viz, env):\n try:\n import torch\n viz.line(Y=torch.Tens" }, { "id": 321966, "commit_id": "cafbf96fdd7a835789d075f9d3d7c34f25a0f459", "repo": "qutebrowser", "path": "qutebrowser/mainwindow/tabwidget.py", "file_name": "tabwidget.py", "fun_name": "_get_icon_rect", "commit_message": "Use QProxyStyle for TabBarStyle\n\nCloses #812\n\nThis does *not* seem to help with the crashes in #5385 or #5124, weirdly\nenough, but it still seems simpler, so why not do it.", "code": "def _get_icon_rect(self, opt, text_rect):\n \n icon_size = opt.iconSize\n if not icon_size.isValid():\n icon_extent = self.pixelMetric(QStyle.PixelMetric.PM_SmallIconSize)\n icon_size = QSize(icon_extent, icon_extent)\n icon_mode = (QIcon.Mode.Normal if opt.state & QStyle.StateFlag.State_Enabled\n else QIcon.Mode.Disabled)\n icon_state = (QIcon.State.On if opt.state & QStyle.StateFlag.State_Selected\n else QIcon.State.Off)\n # reserve space for favicon when tab bar is vertical (issue #1968)\n position = config.cache['tabs.position']\n if (position in [QTabWidget.TabPosition.East, QTabWidget.TabPosition.West] and\n config.cache['tabs.favicons.show'] != 'never'):\n tab_icon_size = icon_size\n else:\n actual_size = opt.icon.actualSize(icon_size, icon_mode, icon_state)\n tab_icon_size = QSize(\n min(actual_size.width(), icon_size.width()),\n min(actual_size.height(), icon_size.height()))\n\n icon_top = text_rect.center().y() + 1 - tab_icon_size.height() // 2\n icon_rect = QRect(QPoint(text_rect.left(), icon_top), tab_icon_size)\n icon_rect = self.baseStyle().visualRect(opt.direction, opt.rect, icon_rect)\n return icon_rect\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 328, "n_words": 96, "vocab_size": 74, "complexity": 6, "nloc": 22, "token_counts": 241, "n_ast_nodes": 381, "n_identifiers": 51, "random_cut": "def _get_icon_rect(self, opt, text_rect):\n \n icon_size = opt.iconSize\n if not icon_size.isValid():\n icon_extent = self.pixelMetric(QStyle.PixelMetric.PM_SmallIconSize)\n icon_size = QSize(icon_extent, icon_extent)\n icon_mo" }, { "id": 288861, "commit_id": "f23b1750e85f07091eb896a0b12b8f95e5646338", "repo": "core", "path": "tests/components/homekit_controller/specific_devices/test_ecobee_occupancy.py", "file_name": "test_ecobee_occupancy.py", "fun_name": "test_ecobee_occupancy_setup", "commit_message": "Migrate HomeKit Controller to use stable identifiers (#80064)", "code": "async def test_ecobee_occupancy_setup(hass):\n \n accessories = await setup_accessories_from_file(hass, \"ecobee_occupancy.json\")\n await setup_test_accessories(hass, accessories)\n\n await assert_devices_and_entities_created(\n hass,\n DeviceTestInfo(\n unique_id=HUB_TEST_ACCESSORY_ID,\n name=\"Master Fan\",\n model=\"ecobee Switch+\",\n manufacturer=\"ecobee Inc.\",\n sw_version=\"4.5.130201\",\n hw_version=\"\",\n serial_number=\"111111111111\",\n devices=[],\n entities=[\n EntityTestInfo(\n entity_id=\"binary_sensor.master_fan\",\n friendly_name=\"Master Fan\",\n unique_id=\"00:00:00:00:00:00_1_56\",\n state=\"off\",\n ),\n ],\n ),\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 289, "n_words": 37, "vocab_size": 33, "complexity": 1, "nloc": 24, "token_counts": 90, "n_ast_nodes": 152, "n_identifiers": 21, "random_cut": "async def test_ecobee_occupancy_setup(hass):\n \n accessories = await setup_accessories_from_file(hass, \"ecobee_occupancy.json\")\n await setup_test_accessories(hass, accessories)\n\n await assert_devices_and_entities_created(\n hass,\n DeviceTestInfo(\n unique_id=HUB_TEST_ACCESSORY_ID,\n name=\"Master Fan\",\n model=\"ecobee Switch+\",\n manufacturer=\"ecobee Inc.\",\n sw_version=\"4.5.130201\",\n hw_version=\"\",\n serial_number=\"111111111111\",\n devices=[],\n entities=[\n EntityTestInfo(\n entity_id=\"binary_sensor.master_fan\",\n friendly_name=\"Master" }, { "id": 136833, "commit_id": "38821e6acd93f48d28e1adea61d6b3c0ec350eda", "repo": "ray", "path": "rllib/algorithms/mock.py", "file_name": "mock.py", "fun_name": "step", "commit_message": "[RLlib] AlgorithmConfig: Finish cleanups, replace remaining python dicts, change docstrings. (#30468)", "code": "def step(self):\n if (\n self.config.mock_error\n and self.iteration == 1\n and (self.config.persistent_error or not self.restored)\n ):\n raise Exception(\"mock error\")\n result = dict(\n episode_reward_mean=10, episode_len_mean=10, timesteps_this_iter=10, info={}\n )\n if self.config.user_checkpoint_freq > 0 and self.iteration > 0:\n if self.iteration % self.config.user_checkpoint_freq == 0:\n result.update({tune_result.SHOULD_CHECKPOINT: True})\n return result\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 166, "n_words": 44, "vocab_size": 33, "complexity": 8, "nloc": 14, "token_counts": 101, "n_ast_nodes": 157, "n_identifiers": 18, "random_cut": "def step(self):\n if (\n self." }, { "id": 147452, "commit_id": "cf7b4e65c28ddedb5bdbe9dee8b552b2428b21bd", "repo": "ray", "path": "python/ray/serve/tests/test_pipeline_dag.py", "file_name": "test_pipeline_dag.py", "fun_name": "test_shared_deployment_handle", "commit_message": "[serve] Implement `serve.build` (#23232)\n\nThe Serve REST API relies on YAML config files to specify and deploy deployments. This change introduces `serve.build()` and `serve build`, which translate Pipelines to YAML files.\r\n\r\nCo-authored-by: Shreyas Krishnaswamy ", "code": "def test_shared_deployment_handle(serve_instance, use_build):\n with InputNode() as dag_input:\n m = Model.bind(2)\n combine = Combine.bind(m, m2=m)\n combine_output = combine.bind(dag_input)\n serve_dag = DAGDriver.bind(combine_output, input_schema=json_resolver)\n\n handle = serve.run(serve_dag)\n assert ray.get(handle.predict.remote(1)) == 4\n assert requests.post(\"http://127.0.0.1:8000/\", json=1).json() == 4\n\n\n@pytest.mark.parametrize(\"use_build\", [False, True])", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"use_build\", [False, True])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 74, "n_words": 36, "vocab_size": 29, "complexity": 1, "nloc": 9, "token_counts": 95, "n_ast_nodes": 178, "n_identifiers": 29, "random_cut": "def test_shared_deployment_handle(serve_instance, use_build):\n with InputNode() as dag_input:\n m = Model.bind(2)\n combine = Combine.bind(m, m2=m)\n combine_output = combine.bind(dag_input)\n serve_dag = DAGDriver.bind(combine_output, input_schema=json_resolver)\n\n handle = serve.run(serve_dag)\n assert ray.get(handle.predict.remote(1)) == 4\n assert requests.post(\"http://127.0.0.1:8000/\", json=1).json() == 4\n\n\n@pytest.mark.parametrize(\"use_build\", [False, True])" }, { "id": 30555, "commit_id": "b17fb6138980531ffe67fdafa3e07b916feedd82", "repo": "OCRmyPDF", "path": "src/ocrmypdf/subprocess/__init__.py", "file_name": "__init__.py", "fun_name": "_error_missing_program", "commit_message": "Configure pylint in pyproject and delint", "code": "def _error_missing_program(program, package, required_for, recommended):\n # pylint: disable=unused-argument\n if recommended:\n log.warning(MISSING_RECOMMEND_PROGRAM.format(**locals()))\n elif required_for:\n log.error(MISSING_OPTIONAL_PROGRAM.format(**locals()))\n else:\n log.error(MISSING_PROGRAM.format(**locals()))\n _error_trailer(**locals())\n\n", "url": "https://github.com/ocrmypdf/OCRmyPDF.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 52, "n_words": 17, "vocab_size": 17, "complexity": 3, "nloc": 8, "token_counts": 68, "n_ast_nodes": 115, "n_identifiers": 14, "random_cut": "def _error_missing_program(program, package, required_for, recommended):\n # pylint: disable=unused-argument\n if recommended:\n log.warning(MISSING_RECOMMEND_PROGRAM.format(**locals()))\n elif required_fo" }, { "id": 91422, "commit_id": "284e980df0018f8baee659999268bdd4c7d08255", "repo": "sentry", "path": "tests/sentry/models/test_group.py", "file_name": "test_group.py", "fun_name": "test_qualified_share_id_bulk", "commit_message": "ref: replace self.assertRaises with pytest.raises (#35685)\n\n* add flake8 plugin to detect assertRaises\r\n\r\n* ref: replace self.assertRaises with pytest.raises\r\n\r\n* non-sed fixes", "code": "def test_qualified_share_id_bulk(self):\n project = self.create_project(name=\"foo bar\")\n group = self.create_group(project=project, short_id=project.next_short_id())\n group_2 = self.create_group(project=project, short_id=project.next_short_id())\n group_short_id = group.qualified_short_id\n group_2_short_id = group_2.qualified_short_id\n assert [group] == Group.objects.by_qualified_short_id_bulk(\n group.organization.id, [group_short_id]\n )\n assert {group, group_2} == set(\n Group.objects.by_qualified_short_id_bulk(\n group.organization.id,\n [group_short_id, group_2_short_id],\n )\n )\n\n group.update(status=GroupStatus.PENDING_DELETION)\n with pytest.raises(Group.DoesNotExist):\n Group.objects.by_qualified_short_id_bulk(\n group.organization.id, [group_short_id, group_2_short_id]\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 222, "n_words": 46, "vocab_size": 30, "complexity": 1, "nloc": 20, "token_counts": 149, "n_ast_nodes": 231, "n_identifiers": 26, "random_cut": "def test_qualified_share_id_bulk(self):\n project = self.create_project(name=\"foo bar\")\n " }, { "id": 7083, "commit_id": "f54818e970b1986244b3e41b89e3eeb2f1ed3cac", "repo": "ludwig", "path": "tests/ludwig/features/test_sequence_features.py", "file_name": "test_sequence_features.py", "fun_name": "test_text_preproc_module_space_punct_tokenizer", "commit_message": "[Torchscript] Adds NaN handling to preprocessing modules (#2179)\n\n* wip\r\n\r\n* minimum working\r\n\r\n* revert changes to h3\r\n\r\n* cleanup\r\n\r\n* adds sequence/timeseries nan handling\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* fixed failing h3 test\r\n\r\n* updated sequence tests\r\n\r\n* update to_inference_module", "code": "def test_text_preproc_module_space_punct_tokenizer():\n metadata = {\n \"preprocessing\": {\n \"lowercase\": True,\n \"tokenizer\": \"space_punct\",\n \"unknown_symbol\": \"\",\n \"padding_symbol\": \"\",\n \"computed_fill_value\": \"\",\n },\n \"max_sequence_length\": SEQ_SIZE,\n \"str2idx\": {\n \"\": 0,\n \"\": 1,\n \"\": 2,\n \"\": 3,\n \"this\": 4,\n \"sentence\": 5,\n \"has\": 6,\n \"punctuation\": 7,\n \",\": 8,\n \".\": 9,\n },\n }\n module = _SequencePreprocessing(metadata)\n\n res = module([\"punctuation\", \",,,,\", \"this... this... punctuation\", \"unknown\"])\n\n assert torch.allclose(\n res, torch.tensor([[1, 7, 0, 2, 2, 2], [1, 8, 8, 8, 8, 0], [1, 4, 9, 9, 9, 4], [1, 3, 0, 2, 2, 2]])\n )\n\n\n@pytest.mark.skipif(\n torch.torch_version.TorchVersion(torchtext.__version__) < (0, 12, 0), reason=\"requires torchtext 0.12.0 or higher\"\n)", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(\n torch.torch_version.TorchVersion(torchtext.__version__) < (0, 12, 0), reason=\"requires torchtext 0.12.0 or higher\"\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 320, "n_words": 95, "vocab_size": 70, "complexity": 1, "nloc": 28, "token_counts": 172, "n_ast_nodes": 318, "n_identifiers": 17, "random_cut": "def test_text_preproc_module_space_punct_tokenizer():\n metadata = {\n \"preprocessing\": {\n \"lowercase\": True,\n \"tokenizer\": \"space_punct\",\n \"unknown_symbol\": \"\",\n \"padding_symbol\": \"\",\n \"computed_fill_value\": \"\",\n },\n \"max_sequence_length\": SEQ_SIZE,\n \"str2idx\": {\n \"\": 0,\n \"\": 1,\n \"\": 2,\n \"\": 3,\n \"this\": 4,\n \"sentence\": 5,\n \"has\": 6,\n \"punctuation\": 7,\n \"" }, { "id": 100168, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/data_export/endpoints/test_data_export.py", "file_name": "test_data_export.py", "fun_name": "test_export_invalid_date_params", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_export_invalid_date_params(self):\n \n payload = self.make_payload(\"discover\", {\"statsPeriod\": \"shrug\"})\n with self.feature(\"organizations:discover-query\"):\n response = self.get_error_response(self.org.slug, status_code=400, **payload)\n assert response.data == {\"non_field_errors\": [\"Invalid statsPeriod: 'shrug'\"]}\n\n payload = self.make_payload(\n \"discover\",\n {\n \"start\": \"2021-02-27T12:07:37\",\n \"end\": \"shrug\",\n },\n )\n with self.feature(\"organizations:discover-query\"):\n response = self.get_error_response(self.org.slug, status_code=400, **payload)\n assert response.data == {\"non_field_errors\": [\"shrug is not a valid ISO8601 date query\"]}\n\n payload = self.make_payload(\n \"discover\",\n {\n \"start\": \"shrug\",\n \"end\": \"2021-02-27T12:07:37\",\n },\n )\n with self.feature(\"organizations:discover-query\"):\n response = self.get_error_response(self.org.slug, status_code=400, **payload)\n assert response.data == {\"non_field_errors\": [\"shrug is not a valid ISO8601 date query\"]}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 324, "n_words": 81, "vocab_size": 37, "complexity": 1, "nloc": 25, "token_counts": 177, "n_ast_nodes": 317, "n_identifiers": 11, "random_cut": "def test_export_invalid_date_params(self):\n \n payload = self.make_payload(\"discover\", {\"statsPeriod\": \"shrug\"})\n with self.feature(\"organizations:discover-query\"):\n response = self.get_error_response(self.org.slug, status_code=400, **payload)\n assert response.data == {\"non_field_errors\": [\"Invalid statsPeriod: 'shrug'\"]}\n\n payload = self.make_payload(\n \"discover\",\n {\n \"start\": \"2021-02-27T12:07:37\",\n \"end\": \"shrug\",\n },\n )\n with self.feature(\"organizations:discover-query\"):\n response = self.get_error_response(self.org.slug, status_code=400, **payload)\n assert response.data == {\"non_field_errors\": [\"shrug is not a valid ISO8601 date query\"]}\n\n payload = self.make_payload(\n \"discover\",\n {\n \"start\": \"shrug\",\n \"end\": \"2021-02-27T12:07:37\",\n },\n )\n with self.feature(\"organizations:discover-query\"):\n response = self.get_error_response(se" }, { "id": 75885, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/tests/test_related_fields.py", "file_name": "test_related_fields.py", "fun_name": "test_select_on_queryset_with_taggable_manager", "commit_message": "Reformat with black", "code": "def test_select_on_queryset_with_taggable_manager(self):\n fields = index.RelatedFields(\n \"tags\",\n [\n index.SearchField(\"name\"),\n ],\n )\n\n queryset = fields.select_on_queryset(Novel.objects.all())\n\n # Tags should be prefetch_related\n self.assertIn(\"tags\", queryset._prefetch_related_lookups)\n self.assertFalse(queryset.query.select_related)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 10, "token_counts": 58, "n_ast_nodes": 98, "n_identifiers": 16, "random_cut": "def test_select_on_queryset_with_taggable_manager(self):\n fields = index.RelatedFields(\n \"tags\",\n [\n index.SearchField(\"name\"),\n ],\n )\n\n queryset = fields.select_on_queryset(Novel.objects.all())\n\n # Tags should be prefetch_related\n self.assertIn(\"tags\", queryset._prefetch_related_lookups)\n " }, { "id": 22554, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "balance_parenthesis.py", "file_name": "balance_parenthesis.py", "fun_name": "is_balanced", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def is_balanced(check_string):\n s = Stack()\n index = 0\n is_bal = True\n while index < len(check_string) and is_bal:\n paren = check_string[index]\n if paren in \"{[(\":\n s.push(paren)\n else:\n if s.is_empty():\n is_bal = False\n else:\n top = s.pop()\n if not is_same(top, paren):\n is_bal = False\n index += 1\n\n if s.is_empty() and is_bal:\n return True\n else:\n return False\n\n\nprint(is_balanced(\"[((())})]\"))\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 210, "n_words": 55, "vocab_size": 33, "complexity": 8, "nloc": 20, "token_counts": 94, "n_ast_nodes": 173, "n_identifiers": 14, "random_cut": "def is_balanced(check_string):\n s = Stack()\n index = 0\n is_bal = True\n while index < len(check_string) and is_bal:\n paren = check_string[index]\n if paren in \"{[(\":\n s.push(paren)\n else:\n if s.is_empty():\n is_bal = False\n else:\n top = s.pop()\n if not is_same(top, paren):\n is_bal = False\n " }, { "id": 335694, "commit_id": "3e2cff4da25642e964c48fa44d7c00d3314b1ce8", "repo": "diffusers", "path": "src/diffusers/models/unet_sde_score_estimation.py", "file_name": "unet_sde_score_estimation.py", "fun_name": "_variance_scaling", "commit_message": "better names and more cleanup", "code": "def _variance_scaling(scale=1.0, in_axis=1, out_axis=0, dtype=torch.float32, device=\"cpu\"):\n \n scale = 1e-10 if scale == 0 else scale\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 15, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 45, "n_ast_nodes": 54, "n_identifiers": 8, "random_cut": "def _variance_scaling(scale=1.0, in_axis=1, out_axis=0, dtype=torch.float32, device=\"cpu\"):\n \n scale = 1e-10 if scale == 0 else scale\n" }, { "id": 252064, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/tools/web/test_app.py", "file_name": "test_app.py", "fun_name": "test_options", "commit_message": "make it black!", "code": "def test_options(self):\n j = get_json(self.fetch(\"/options\"))\n assert type(j) == dict\n assert type(j[\"anticache\"]) == dict\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 33, "n_words": 13, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 57, "n_identifiers": 7, "random_cut": "def test_options(self):\n j = get_json(self.fetch(\"/options\"))\n assert type(j) == dict\n " }, { "id": 82065, "commit_id": "385a2eabcecb4b618c6ce28a338d2fe502c93892", "repo": "awx", "path": "awx/api/validators.py", "file_name": "validators.py", "fun_name": "__str__", "commit_message": "hostname validation in InstanceSerializer (#12979)\n\n* initial commit of hostname validation to InstanceSerializer\r\n\r\nCo-authored-by: Cesar Francisco San Nicolas Martinez ", "code": "def __str__(self):\n return f\"regex={self.regex}, message={self.message}, code={self.code}, inverse_match={self.inverse_match}, flags={self.flags}\"\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 49, "n_identifiers": 7, "random_cut": "def __str__(self):\n return f\"regex={" }, { "id": 249485, "commit_id": "ebfeac7c5ded851a2639911ec6adf9d0fcdb029a", "repo": "synapse", "path": "synapse/util/rust.py", "file_name": "rust.py", "fun_name": "_dist_is_editable", "commit_message": "Check if Rust lib needs rebuilding. (#13759)\n\nThis protects against the common mistake of failing to remember to rebuild Rust code after making changes.", "code": "def _dist_is_editable() -> bool:\n \n for path_item in sys.path:\n egg_link = os.path.join(path_item, \"matrix-synapse.egg-link\")\n if os.path.isfile(egg_link):\n return True\n return False\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 52, "n_words": 18, "vocab_size": 17, "complexity": 3, "nloc": 7, "token_counts": 40, "n_ast_nodes": 67, "n_identifiers": 9, "random_cut": "def _dist_is_editable() -> bool:\n \n for path_item in sys.path" }, { "id": 319661, "commit_id": "f97f9b857b1780ba0c87f13ec04fa2d31d7e6595", "repo": "paperless-ngx", "path": "src/documents/serialisers.py", "file_name": "serialisers.py", "fun_name": "get_result", "commit_message": "Task results popover", "code": "def get_result(self, obj):\n result = \"\"\n if hasattr(obj, \"attempted_task\"):\n result = obj.attempted_task.result\n return result\n\n status = serializers.SerializerMethodField()\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 17, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 27, "n_ast_nodes": 58, "n_identifiers": 9, "random_cut": "def get_result(self, obj):\n result = \"\"\n if hasattr(obj, \"attempted_task\"):\n result = obj.attempted_task.result\n return result\n\n status = serializers.Seria" }, { "id": 206811, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/views/debug.py", "file_name": "debug.py", "fun_name": "get_safe_request_meta", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_safe_request_meta(self, request):\n \n if not hasattr(request, \"META\"):\n return {}\n return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 18, "vocab_size": 17, "complexity": 3, "nloc": 4, "token_counts": 45, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def get_safe_request_meta(self, request):\n \n if not hasattr(requ" }, { "id": 308880, "commit_id": "e222e1b6f05b630bef5aed73e307ca5072b6f286", "repo": "core", "path": "homeassistant/components/flux_led/number.py", "file_name": "number.py", "fun_name": "_pixels_and_segments_fit_in_music_mode", "commit_message": "Add device configuration entities to flux_led (#62786)\n\nCo-authored-by: Chris Talkington ", "code": "def _pixels_and_segments_fit_in_music_mode(self) -> bool:\n \n pixels_per_segment = self._device.pixels_per_segment\n segments = self._device.segments\n assert pixels_per_segment is not None\n assert segments is not None\n return bool(\n pixels_per_segment <= MUSIC_PIXELS_PER_SEGMENT_MAX\n and segments <= MUSIC_SEGMENTS_MAX\n and pixels_per_segment * segments <= MUSIC_PIXELS_MAX\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 36, "vocab_size": 22, "complexity": 3, "nloc": 14, "token_counts": 49, "n_ast_nodes": 78, "n_identifiers": 9, "random_cut": "def _pixels_and_segments_fit_in_music_mode(self) -> bool:\n \n pixels_per_segment = self._device.pixels_per_segment\n segments = self._device.segments\n assert pixels_per_segment is not None\n assert segments is not None\n return bool(\n " }, { "id": 49697, "commit_id": "8468e1ac6cfe165aa1e3cf4f77ab6fb66ce98614", "repo": "PaddleHub", "path": "paddlehub/compat/task/base_task.py", "file_name": "base_task.py", "fun_name": "_run_with_predictor", "commit_message": "Remove fluid api in modules and pkg. (#1906)", "code": "def _run_with_predictor(self) -> List[RunState]:\n \n global_run_states = []\n period_run_states = []\n\n feed_var_shape = []\n feed_var_type = []\n for var in self.feed_var_list:\n feed_var_shape.append(var.shape)\n feed_var_type.append(paddle_utils.dtype_map[var.dtype])\n\n data_reader = self.generator\n for batch in data_reader():\n\n step_run_state = RunState(len(self.fetch_list))\n step_run_state.run_step = 1\n num_batch_examples = len(batch)\n\n # Preocessing data to the suitable shape and type for the model\n processed_batch = [[] for i in range(len(self.feed_list))]\n\n for sample in batch:\n for i, data in enumerate(sample):\n processed_batch[i].append(data)\n tensor_batch = [[] for i in range(len(self.feed_list))]\n for i in range(len(processed_batch)):\n processed_batch[i] = np.array(processed_batch[i]).reshape(feed_var_shape[i]).astype(feed_var_type[i])\n tensor_batch[i] = core.PaddleTensor(processed_batch[i])\n\n fetch_result = self._predictor.run(tensor_batch)\n for index, result in enumerate(fetch_result):\n step_run_state.run_results[index] = result.as_ndarray()\n step_run_state.run_examples += num_batch_examples\n step_run_state.update()\n period_run_states += [step_run_state]\n self._run_step_event(step_run_state)\n\n global_run_states += period_run_states\n return global_run_states\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 433, "n_words": 108, "vocab_size": 64, "complexity": 9, "nloc": 35, "token_counts": 262, "n_ast_nodes": 415, "n_identifiers": 47, "random_cut": "def _run_with_predictor(self) -> List[RunState]:\n \n global_run_states = []\n period_run_states = []\n\n feed_var_shape = []\n feed_var_type = []\n for var in self.feed_var_list:\n feed_var_shape.append(var.shape)\n feed_var_type.append(paddle_utils.dtype_map[var.dtype])\n\n data_reader = self.generator\n for batch in data_reader():\n\n step_run_state = RunState(len(self.fetch_list))\n step_run_state.run_step = 1\n num_batch_examples = len(batch)\n\n # Preocessing data to the suitable shape and type for the model\n processed_batch = [[] for i in range(len(self.feed_list))]\n\n for sample in batch:\n for i, data in enumerate(sample):\n processed_batch[i].append(data)\n tensor_batch = [[] for i in range(len(self.feed_list))]\n for i in range(len(processed_batch)):\n processed_batch[i] = np.array(processed_batch[i]).reshape(feed_var_shape[i]).astype(feed_var_type[i])\n tensor_batch[i] = core.PaddleTensor(processed_batch[i])\n\n fetch_result = self._predictor.run(tensor_batch)\n for index, result in enumerate(fetch_result):\n step_run_state.run_results[index] = result.as_ndarray()\n step_run_state.run_examples += num_batch_examples\n " }, { "id": 214900, "commit_id": "98cbbaffd1bc0c191951e0b09c4f9ff8e083a61c", "repo": "flair", "path": "tests/embedding_test_utils.py", "file_name": "embedding_test_utils.py", "fun_name": "test_embedding_also_sets_trailing_whitespaces", "commit_message": "unify embedding tests", "code": "def test_embedding_also_sets_trailing_whitespaces(self, args):\n if not self.is_token_embedding:\n pytest.skip(\"The test is only valid for token embeddings\")\n embeddings = self.create_embedding_with_args(args)\n\n sentence: Sentence = Sentence([\"hello\", \" \", \"hm\", \" \"])\n embeddings.embed(sentence)\n names = embeddings.get_names()\n for token in sentence:\n assert len(token.get_embedding(names)) == embeddings.embedding_length\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 101, "n_words": 38, "vocab_size": 32, "complexity": 3, "nloc": 9, "token_counts": 75, "n_ast_nodes": 127, "n_identifiers": 17, "random_cut": "def test_embedding_also_sets_trailing_whitespaces(self, args):\n if " }, { "id": 336343, "commit_id": "e30e1b89d0a85073f8f5338b86445bf8ed892086", "repo": "diffusers", "path": "tests/test_modeling_utils.py", "file_name": "test_modeling_utils.py", "fun_name": "test_ldm_text2img_fast", "commit_message": "Support one-string prompts and custom image size in LDM (#212)\n\n* Support one-string prompts in LDM\r\n\r\n* Add other features from SD too", "code": "def test_ldm_text2img_fast(self):\n ldm = LDMTextToImagePipeline.from_pretrained(\"CompVis/ldm-text2im-large-256\")\n\n prompt = \"A painting of a squirrel eating a burger\"\n generator = torch.manual_seed(0)\n image = ldm(prompt, generator=generator, num_inference_steps=1, output_type=\"numpy\")[\"sample\"]\n\n image_slice = image[0, -3:, -3:, -1]\n\n assert image.shape == (1, 256, 256, 3)\n expected_slice = np.array([0.3163, 0.8670, 0.6465, 0.1865, 0.6291, 0.5139, 0.2824, 0.3723, 0.4344])\n assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 109, "n_words": 54, "vocab_size": 45, "complexity": 1, "nloc": 9, "token_counts": 141, "n_ast_nodes": 183, "n_identifiers": 20, "random_cut": "def test_ldm_text2img_fast(self):\n ldm = LDMTextToImagePipeline.from_pretrained(\"CompVis/ldm-text2im-large-256\")\n\n prompt = \"A painting of a squirrel eating a burger\"\n generator = torch.manual_seed(0)\n image = ldm(prompt, generator=generator, num_inference_steps=1, output_type=\"numpy\")[\"sample\"]\n\n image_slice = image[0, -3:, -3:, -1]\n\n assert image.shape == (1, 256, 256, 3)\n expected_slice = np.array([0.3163, 0" }, { "id": 246573, "commit_id": "64c73c6ac88a740ee480a0ad1f9afc8596bccfa4", "repo": "synapse", "path": "tests/rest/client/test_capabilities.py", "file_name": "test_capabilities.py", "fun_name": "test_get_does_include_msc3244_fields_when_enabled", "commit_message": "Add type hints to `tests/rest/client` (#12066)", "code": "def test_get_does_include_msc3244_fields_when_enabled(self) -> None:\n access_token = self.get_success(\n self.auth_handler.create_access_token_for_user_id(\n self.user, device_id=None, valid_until_ms=None\n )\n )\n\n channel = self.make_request(\"GET\", self.url, access_token=access_token)\n capabilities = channel.json_body[\"capabilities\"]\n\n self.assertEqual(channel.code, 200)\n for details in capabilities[\"m.room_versions\"][\n \"org.matrix.msc3244.room_capabilities\"\n ].values():\n if details[\"preferred\"] is not None:\n self.assertTrue(\n details[\"preferred\"] in KNOWN_ROOM_VERSIONS,\n str(details[\"preferred\"]),\n )\n\n self.assertGreater(len(details[\"support\"]), 0)\n for room_version in details[\"support\"]:\n self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, str(room_version))\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 262, "n_words": 50, "vocab_size": 39, "complexity": 4, "nloc": 20, "token_counts": 145, "n_ast_nodes": 233, "n_identifiers": 24, "random_cut": "def test_get_does_include_msc3244_fields_when_enabled(self) -> None:\n access_token = self.get_success(\n self.auth_handler.create_access_token_for_user_id(\n self.user, device_id=None, valid_until_ms=None\n )\n )\n\n channel = self.make_request(\"GET\", self.url, access_token=access_token)\n capabilities = channel.json_body[\"capabilities\"]\n\n self.assertEqual(channel.code, 200)\n for details in capabilit" }, { "id": 4733, "commit_id": "0964c83ea304c699d632a0b0c27383223019b312", "repo": "airbyte", "path": "octavia-cli/unit_tests/test_list/test_commands.py", "file_name": "test_commands.py", "fun_name": "test_sources", "commit_message": "🎁 octavia-cli: add telemetry (#11896)", "code": "def test_sources(mocker, context_object):\n mocker.patch.object(commands, \"Sources\", mocker.Mock(return_value=\"SourcesRepr\"))\n runner = CliRunner()\n result = runner.invoke(commands.sources, obj=context_object)\n commands.Sources.assert_called_with(context_object[\"API_CLIENT\"], context_object[\"WORKSPACE_ID\"])\n assert result.output == \"SourcesRepr\\n\"\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 33, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 67, "n_ast_nodes": 112, "n_identifiers": 17, "random_cut": "def test_sources(mocker, context_object):\n mocker.patch.object(commands, \"Sources\", mocker.Mock(return_value=\"SourcesRepr\"))\n runner = CliRunner()\n result = r" }, { "id": 189410, "commit_id": "d8dc0b462d973f0c1ddd62e557d2da89e45f6265", "repo": "manim", "path": "manim/mobject/types/vectorized_mobject.py", "file_name": "vectorized_mobject.py", "fun_name": "generate_rgbas_array", "commit_message": "Cleanup `simple_functions.py` (#2437)\n\n* Remove fdiv\n\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\n\nfor more information, see https://pre-commit.ci\n\n* actually remove fdiv\n\n* Use lru cache and scipy's func\n\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\n\nfor more information, see https://pre-commit.ci\n\n* set maxsize\n\nshould be enough for how it's used\n\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\n\nfor more information, see https://pre-commit.ci\n\n* Remove get_num_args\n\n* Remove one instance of clip_in_place\n\n* Readd clip_in_place, it has a use\n\n* rm unnecessary line\n\n* Properly clip color\n\n* Revert \"Properly clip color\"\n\nThis reverts commit 0591c7833457930b399f4125958f81d038c96e69.\n\n* remove clip in place\n\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\n\nfor more information, see https://pre-commit.ci\n\n* actually remove\n\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def generate_rgbas_array(self, color, opacity):\n \n colors = list(tuplify(color))\n opacities = list(tuplify(opacity))\n rgbas = np.array(\n [color_to_rgba(c, o) for c, o in zip(*make_even(colors, opacities))],\n )\n\n sheen_factor = self.get_sheen_factor()\n if sheen_factor != 0 and len(rgbas) == 1:\n light_rgbas = np.array(rgbas)\n light_rgbas[:, :3] += sheen_factor\n np.clip(light_rgbas, 0, 1, out=light_rgbas)\n rgbas = np.append(rgbas, light_rgbas, axis=0)\n return rgbas\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 162, "n_words": 51, "vocab_size": 42, "complexity": 4, "nloc": 13, "token_counts": 125, "n_ast_nodes": 193, "n_identifiers": 24, "random_cut": "def generate_rgbas_array(self, color, opacity):\n \n colors = list(tuplify(color))\n opacities = list(tuplify(opacity))\n rgbas = np.array(\n [color_to_rgba(c, o) for c, o in zip(*make_even(colors, opacities))],\n )\n\n sheen_" }, { "id": 62117, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/resources.py", "file_name": "resources.py", "fun_name": "iterator", "commit_message": "upd; format", "code": "def iterator(self, resource_name):\n resource = self.find(resource_name)\n if resource is not None:\n todo = [resource]\n while todo:\n resource = todo.pop(0)\n yield resource\n if resource.is_container:\n rname = resource.name\n for name in resource.resources:\n if not rname:\n new_name = name\n else:\n new_name = '/'.join([rname, name])\n child = self.find(new_name)\n if child.is_container:\n todo.append(child)\n else:\n yield child\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 391, "n_words": 50, "vocab_size": 32, "complexity": 7, "nloc": 19, "token_counts": 100, "n_ast_nodes": 164, "n_identifiers": 15, "random_cut": "def iterator(self, resource_name):\n resource = self.find(resource_name)\n if resource is not None:\n todo = [resource]\n while todo:\n resource = todo.pop(0)\n yield resource\n if resource.is_container:\n rname = resource.name\n for name in resource.resources:\n if not rname:\n new_name = name\n else:\n new_name = '/'.join([rname, name])\n child " }, { "id": 277573, "commit_id": "8401e08334d4b1f102a6ee9479738bacfee0600c", "repo": "keras", "path": "keras/layers/preprocessing/hashing_test.py", "file_name": "hashing_test.py", "fun_name": "test_hash_dense_input_mask_value_farmhash", "commit_message": "reduce layers line-too-long", "code": "def test_hash_dense_input_mask_value_farmhash(self):\n empty_mask_layer = hashing.Hashing(num_bins=3, mask_value=\"\")\n omar_mask_layer = hashing.Hashing(num_bins=3, mask_value=\"omar\")\n inp = np.asarray(\n [[\"omar\"], [\"stringer\"], [\"marlo\"], [\"wire\"], [\"skywalker\"]]\n )\n empty_mask_output = empty_mask_layer(inp)\n omar_mask_output = omar_mask_layer(inp)\n # Outputs should be one more than test_hash_dense_input_farmhash (the\n # zeroth bin is now reserved for masks).\n self.assertAllClose([[1], [1], [2], [1], [1]], empty_mask_output)\n # 'omar' should map to 0.\n self.assertAllClose([[0], [1], [2], [1], [1]], omar_mask_output)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 147, "n_words": 60, "vocab_size": 47, "complexity": 1, "nloc": 10, "token_counts": 129, "n_ast_nodes": 203, "n_identifiers": 14, "random_cut": "def test_hash_dense_input_mask_value_farmhash(self):\n empty_mask_layer = hashing.Hashing(num_bins=3, mask_value=\"\")\n" }, { "id": 161021, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg2mel/rnn_decoder_mol.py", "file_name": "rnn_decoder_mol.py", "fun_name": "get_go_frame", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def get_go_frame(self, memory):\n B = memory.size(0)\n go_frame = torch.zeros((B, self.num_mels), dtype=torch.float,\n device=memory.device)\n return go_frame\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 64, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 43, "n_ast_nodes": 64, "n_identifiers": 12, "random_cut": "def get_go_frame(self, memory):\n B = memory.size(0)\n go_frame = torch.zeros((B, self.num_mels), dtype=torch.float,\n device=memory.device)\n retu" }, { "id": 167101, "commit_id": "297c59abbe218f3ccb89fb25bee48f619c1e0d2d", "repo": "pandas", "path": "pandas/tests/plotting/frame/test_frame.py", "file_name": "test_frame.py", "fun_name": "test_group_subplot_multiindex_notimplemented", "commit_message": "ENH: Allow column grouping in DataFrame.plot (#29944)", "code": "def test_group_subplot_multiindex_notimplemented(self):\n df = DataFrame(np.eye(2), columns=MultiIndex.from_tuples([(0, 1), (1, 2)]))\n msg = \"An iterable subplots for a DataFrame with a MultiIndex\"\n with pytest.raises(NotImplementedError, match=msg):\n df.plot(subplots=[(0, 1)])\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 56, "n_words": 25, "vocab_size": 22, "complexity": 1, "nloc": 5, "token_counts": 66, "n_ast_nodes": 103, "n_identifiers": 16, "random_cut": "def test_group_subplot_multiindex_notimplemented(self):\n df = DataFrame(np.eye(2), columns=MultiIndex.from_tuples([(0," }, { "id": 250735, "commit_id": "e2f42ddb301737a1d8179c1034226a838ccd74f1", "repo": "mitmproxy", "path": "mitmproxy/tools/main.py", "file_name": "main.py", "fun_name": "mitmdump", "commit_message": "exit for all tools on startup error, fix #4544 (#5187)", "code": "def mitmdump(args=None) -> typing.Optional[int]: # pragma: no cover\n from mitmproxy.tools import dump\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 15, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 38, "n_ast_nodes": 32, "n_identifiers": 8, "random_cut": "def mitmdump(args=None) -> typing.Optional[int]: # pragma: no cover\n from mitmproxy.tools imp" }, { "id": 8390, "commit_id": "cb90e21a91d4eb378a38ce607c93b412932e4fe3", "repo": "ludwig", "path": "tests/ludwig/schema/test_validate_config_misc.py", "file_name": "test_validate_config_misc.py", "fun_name": "test_validate_defaults_schema", "commit_message": "Switch defaults to use mixins and improve test (#2669)", "code": "def test_validate_defaults_schema():\n config = {\n \"input_features\": [\n category_feature(),\n number_feature(),\n ],\n \"output_features\": [category_feature()],\n \"defaults\": {\n \"category\": {\n \"preprocessing\": {\n \"missing_value_strategy\": \"drop_row\",\n },\n \"encoder\": {\n \"type\": \"sparse\",\n },\n \"decoder\": {\n \"type\": \"classifier\",\n \"norm_params\": None,\n \"dropout\": 0.0,\n \"use_bias\": True,\n },\n \"loss\": {\n \"type\": \"softmax_cross_entropy\",\n \"confidence_penalty\": 0,\n },\n },\n \"number\": {\n \"preprocessing\": {\n \"missing_value_strategy\": \"fill_with_const\",\n \"fill_value\": 0,\n },\n \"loss\": {\"type\": \"mean_absolute_error\"},\n },\n },\n }\n\n validate_config(config)\n\n config[DEFAULTS][CATEGORY][NAME] = \"TEST\"\n\n with pytest.raises(ValidationError):\n validate_config(config)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 544, "n_words": 67, "vocab_size": 44, "complexity": 1, "nloc": 39, "token_counts": 147, "n_ast_nodes": 267, "n_identifiers": 11, "random_cut": "def test_validate_defaults_schema():\n config = {\n \"input_features\": [\n category_feature(),\n number_feature(),\n ],\n \"output_features\": [category_feature()],\n \"defaults\": {\n \"category\": {\n \"preprocessing\": {\n \"missing_value_strategy\": \"drop_row\",\n },\n \"encoder\": {\n \"type\": \"sparse\",\n },\n \"decoder\": {\n \"type\": \"classifier\",\n \"norm_params\": None,\n \"dropout\": 0.0,\n \"use_bias\": True,\n },\n \"loss\": {\n \"type\": \"softmax_cross_entropy\",\n \"confidence_penalty\": 0,\n },\n },\n \"number\": {\n " }, { "id": 198684, "commit_id": "e123ef3b398df40722105f97a60c092dc7ee0dde", "repo": "sympy", "path": "sympy/core/expr.py", "file_name": "expr.py", "fun_name": "doit", "commit_message": "Cleanup doit-functions", "code": "def doit(self, **hints):\n if hints.get(\"deep\", True):\n return self.args[0].doit(**hints)\n else:\n return self.args[0]\n\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 46, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 40, "n_ast_nodes": 64, "n_identifiers": 5, "random_cut": "def doit(self, **hints):\n if hints.get(\"deep\", True):\n return self." }, { "id": 192507, "commit_id": "647016bd1f791720834e00b1f7dfa61f19c965f3", "repo": "vision", "path": "test/test_prototype_transforms_functional.py", "file_name": "test_prototype_transforms_functional.py", "fun_name": "make_segmentation_mask", "commit_message": "[proto] Added functional affine_segmentation_mask op (#5613)\n\n* Added functional affine_bounding_box op with tests\r\n\r\n* Updated comments and added another test case\r\n\r\n* Update _geometry.py\r\n\r\n* Added affine_segmentation_mask with tests\r\n\r\n* Fixed device mismatch issue\r\nAdded a cude/cpu test\r\nReduced the number of test samples\r\n\r\n* Added test_correctness_affine_segmentation_mask_on_fixed_input\r\n\r\n* Updates according to the review\r\n\r\n* Replaced [None, ...] by [None, :]\r\n\r\n* Adressed review comments\r\n\r\n* Fixed formatting and more updates according to the review\r\n\r\n* Fixed bad merge", "code": "def make_segmentation_mask(size=None, *, num_categories=80, extra_dims=(), dtype=torch.long):\n size = size or torch.randint(16, 33, (2,)).tolist()\n shape = (*extra_dims, 1, *size)\n data = make_tensor(shape, low=0, high=num_categories, dtype=dtype)\n return features.SegmentationMask(data)\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 37, "n_words": 26, "vocab_size": 23, "complexity": 2, "nloc": 5, "token_counts": 81, "n_ast_nodes": 118, "n_identifiers": 16, "random_cut": "def make_segmentation_mask(size=None, *, num_categories=80, extra_dims=(), dtype=torch.long):\n size = size or torch.randint(16, 33, (2,)).tolist(" }, { "id": 115019, "commit_id": "96c9d1599588e4df9748d78f57fcb1516d847eb9", "repo": "mindsdb", "path": "mindsdb/interfaces/database/integrations.py", "file_name": "integrations.py", "fun_name": "_load_handler_modules", "commit_message": "check connections throug handlers", "code": "def _load_handler_modules(self):\n handlers_list = ['postgres_handler', 'mysql_handler', 'file_handler']\n self.handler_modules = {}\n\n for module_name in handlers_list:\n try:\n handler_module = importlib.import_module(f'mindsdb.integrations.{module_name}')\n self.handler_modules[handler_module.Handler.type] = handler_module\n except Exception as e:\n print(f'Cand import module {module_name}: {e}')\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 117, "n_words": 30, "vocab_size": 26, "complexity": 3, "nloc": 9, "token_counts": 58, "n_ast_nodes": 111, "n_identifiers": 13, "random_cut": "def _load_handler_modules(self):\n handlers_list = ['postgres_handler', 'mysql_handler', 'file_handler']\n self.handler_modules = {}\n\n for module_name in handlers_list:\n try:\n handler_module = importlib.import_module(f'mindsdb.integrations.{module_name}')\n self.handler_modules[handler_module.Handler.type] = handler_module\n except Exception as e:\n print(f'Cand import module {module_name}: {e}'" }, { "id": 155474, "commit_id": "b9a2cab3d9835071b6c11e9c8240b3515b9baeae", "repo": "modin", "path": "modin/pandas/test/test_series.py", "file_name": "test_series.py", "fun_name": "test_values_non_numeric", "commit_message": "FIX-#1503: Proper implementation of `Series.values` (#5469)\n\nCo-authored-by: Dmitry Chigarev \r\nSigned-off-by: Anatoly Myachev ", "code": "def test_values_non_numeric():\n data = [\"str{0}\".format(i) for i in range(0, 10**3)]\n modin_series, pandas_series = create_test_series(data)\n\n modin_series = modin_series.astype(\"category\")\n pandas_series = pandas_series.astype(\"category\")\n\n df_equals(modin_series.values, pandas_series.values)\n\n\n@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)\n@pytest.mark.parametrize(\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\n@pytest.mark.parametrize(\"ddof\", int_arg_values, ids=arg_keys(\"ddof\", int_arg_keys))", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)\n@pytest.mark.parametrize(\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\n@pytest.mark.parametrize(\"ddof\", int_arg_values, ids=arg_keys(\"ddof\", int_arg_keys))", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 48, "n_words": 35, "vocab_size": 31, "complexity": 2, "nloc": 6, "token_counts": 59, "n_ast_nodes": 187, "n_identifiers": 22, "random_cut": "def test_values_non_numeric():\n data = [\"str{0}\".format(i) for i in range(0, 10**3)]\n modin_series, pandas_series = create_test_series(data)\n\n modin_series = modin_series.astype(\"category\")\n pandas_series = pandas_series.astype(\"category\")\n\n df_equals(modin_series.values, pandas_series.values)\n\n\n@pytest.mark.parametrize(\"data\", test_data_values, ids=test_data_keys)\n@pytest.mark.parametrize(\n \"skipna\", bool_a" }, { "id": 321811, "commit_id": "ed19d7f58b2664bb310c7cb6b52c5b9a06ea60b2", "repo": "qutebrowser", "path": "tests/unit/browser/test_qutescheme.py", "file_name": "test_qutescheme.py", "fun_name": "test_default_config", "commit_message": "Add --include-hidden for :config-diff\n\nNeeded it for debugging, so why not implement it properly.\nTODO: Changelog, pick to master?", "code": "def test_default_config(self, config_stub, url, expected):\n _mimetype, data = qutescheme.data_for_url(QUrl(url))\n assert data == expected\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 26, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 42, "n_identifiers": 10, "random_cut": "def test_default_config(self, config_stub, url, expected):\n _mimetype, data = qutescheme.data_for_url(QUrl(url))\n assert data == expected\n" }, { "id": 246213, "commit_id": "901b264c0c88f39cbfb8b2229e0dc57968882658", "repo": "synapse", "path": "tests/rest/admin/test_user.py", "file_name": "test_user.py", "fun_name": "test_return_zero_when_null", "commit_message": "Add type hints to `tests/rest/admin` (#11851)", "code": "def test_return_zero_when_null(self) -> None:\n \n\n self.get_success(\n self.store.db_pool.simple_upsert(\n table=\"ratelimit_override\",\n keyvalues={\"user_id\": self.other_user},\n values={\n \"messages_per_second\": None,\n \"burst_count\": None,\n },\n )\n )\n\n # request status\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.admin_user_tok,\n )\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertEqual(0, channel.json_body[\"messages_per_second\"])\n self.assertEqual(0, channel.json_body[\"burst_count\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 250, "n_words": 34, "vocab_size": 30, "complexity": 1, "nloc": 22, "token_counts": 112, "n_ast_nodes": 181, "n_identifiers": 21, "random_cut": "def test_return_zero_when_null(self) -> None:\n \n\n self.get_success(\n self.store.db_pool.simple_upsert(\n table=\"ratelimit_override\",\n keyvalues={\"user_id\": self.other_user},\n values={\n \"messages_per_second\": None,\n \"burst_count\": None,\n },\n )\n )\n\n # request status\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.admin_user_tok,\n )\n self.assertEqual(HTTPStatus.OK, channel.code, ms" }, { "id": 252717, "commit_id": "51611be2dd3b440f20f071ed3888936cf75d768b", "repo": "mitmproxy", "path": "test/mitmproxy/net/test_tls.py", "file_name": "test_tls.py", "fun_name": "test_sslkeylogfile", "commit_message": "share `SSL.Context` for all client <-> proxy connections (#5340)\n\n* share `SSL.Context` for all client connections\r\n\r\nThis should cause not functional differences, but increase performance.\r\n\r\n* update tests", "code": "def test_sslkeylogfile(tdata, monkeypatch):\n keylog = []\n monkeypatch.setattr(\n tls, \"log_master_secret\", lambda conn, secrets: keylog.append(secrets)\n )\n\n store = certs.CertStore.from_files(\n Path(tdata.path(\"mitmproxy/net/data/verificationcerts/trusted-root.pem\")),\n Path(tdata.path(\"mitmproxy/net/data/dhparam.pem\")),\n )\n entry = store.get_cert(\"example.com\", [], None)\n\n cctx = tls.create_proxy_server_context(\n method=tls.Method.TLS_CLIENT_METHOD,\n min_version=tls.DEFAULT_MIN_VERSION,\n max_version=tls.DEFAULT_MAX_VERSION,\n cipher_list=None,\n verify=tls.Verify.VERIFY_NONE,\n ca_path=None,\n ca_pemfile=None,\n client_cert=None,\n )\n sctx = tls.create_client_proxy_context(\n method=tls.Method.TLS_SERVER_METHOD,\n min_version=tls.DEFAULT_MIN_VERSION,\n max_version=tls.DEFAULT_MAX_VERSION,\n cipher_list=None,\n chain_file=entry.chain_file,\n alpn_select_callback=None,\n request_client_cert=False,\n extra_chain_certs=(),\n dhparams=store.dhparams,\n )\n\n server = SSL.Connection(sctx)\n server.set_accept_state()\n\n # Use pyOpenSSL API once it has shipped: https://github.com/pyca/pyopenssl/pull/1121\n ok = SSL._lib.SSL_use_certificate(server._ssl, entry.cert.to_pyopenssl()._x509) # type: ignore\n SSL._openssl_assert(ok == 1) # type: ignore\n ok = SSL._lib.SSL_use_PrivateKey(server._ssl, crypto.PKey.from_cryptography_key(entry.privatekey)._pkey) # type: ignore\n SSL._openssl_assert(ok == 1) # type: ignore\n\n client = SSL.Connection(cctx)\n client.set_connect_state()\n\n read, write = client, server\n while True:\n try:\n print(read)\n read.do_handshake()\n except SSL.WantReadError:\n write.bio_write(read.bio_read(2 ** 16))\n else:\n break\n read, write = write, read\n\n assert keylog\n assert keylog[0].startswith(b\"SERVER_HANDSHAKE_TRAFFIC_SECRET\")\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 403, "n_words": 119, "vocab_size": 84, "complexity": 3, "nloc": 51, "token_counts": 331, "n_ast_nodes": 515, "n_identifiers": 69, "random_cut": "def test_sslkeylogfile(tdata, monkeypatch):\n keylog = []\n monkeypatch.setattr(\n tls, \"log_master_secret\", lambda conn, secrets: keylog.append(secrets)\n )\n\n store = certs.CertStore.from_files(\n Path(tdata.path(\"mitmproxy/net/data/verificationcerts/trusted-root.pem\")),\n Path(tdata.path(\"mitmproxy/net/data/dhparam.pem\")),\n )\n entry = store.get_cert(\"example.com\", [], None)\n\n cctx = tls.create_proxy_server_context(\n method=tls.Method.TLS_CLIENT_METHOD,\n min_version=tls.DEFAULT_MIN_VERSION,\n max_version=tls.DEFAULT_MAX_VERSION,\n cipher_list=None,\n verify=tls.Verify.VERIFY_NONE,\n ca_path=None,\n ca_pemfile=None,\n client_cert=None,\n )\n sctx = tls.create_client_proxy_context(\n method=tls.Method.TLS_SERVER_METHOD,\n min_version=tls.DEFAULT_MIN_VERSION,\n max_version=tls.DEFAULT_MAX_VERSION,\n cipher_list=None,\n chain_file=entry.chain_file,\n alpn_select_callback=None,\n request_client_cert=False,\n extra_chain_certs=(),\n dhparams=store.dhparams,\n )\n\n server = SSL.Connection(sctx)\n server.set_accept_state()\n\n # Use pyOpenSSL API once it has shipped: https://github.com/pyca/pyopenssl/pull/1121\n ok = SSL._lib.SSL_use_certificate(server._ssl, entry.cert.to_pyopenssl()._x509) # type: ignore\n SSL._openssl_assert(ok == 1) # type: ignore\n ok = SSL._lib.SSL_use_PrivateKey(server._" }, { "id": 69284, "commit_id": "22299d2382c912e8bc8dddca3af7d4cf94374339", "repo": "erpnext", "path": "erpnext/stock/report/item_price_stock/item_price_stock.py", "file_name": "item_price_stock.py", "fun_name": "get_item_price_qty_data", "commit_message": "refactor: rewrite `Item Price Stock Report` queries in `QB`", "code": "def get_item_price_qty_data(filters):\n\titem_price = frappe.qb.DocType(\"Item Price\")\n\tbin = frappe.qb.DocType(\"Bin\")\n\n\tquery = (\n\t\tfrappe.qb.from_(item_price)\n\t\t.left_join(bin)\n\t\t.on(item_price.item_code == bin.item_code)\n\t\t.select(\n\t\t\titem_price.item_code,\n\t\t\titem_price.item_name,\n\t\t\titem_price.name.as_(\"price_list_name\"),\n\t\t\titem_price.brand.as_(\"brand\"),\n\t\t\tbin.warehouse.as_(\"warehouse\"),\n\t\t\tbin.actual_qty.as_(\"actual_qty\"),\n\t\t)\n\t)\n\n\tif filters.get(\"item_code\"):\n\t\tquery = query.where(item_price.item_code == filters.get(\"item_code\"))\n\n\titem_results = query.run(as_dict=True)\n\n\tprice_list_names = list(set(item.price_list_name for item in item_results))\n\n\tbuying_price_map = get_price_map(price_list_names, buying=1)\n\tselling_price_map = get_price_map(price_list_names, selling=1)\n\n\tresult = []\n\tif item_results:\n\t\tfor item_dict in item_results:\n\t\t\tdata = {\n\t\t\t\t\"item_code\": item_dict.item_code,\n\t\t\t\t\"item_name\": item_dict.item_name,\n\t\t\t\t\"brand\": item_dict.brand,\n\t\t\t\t\"warehouse\": item_dict.warehouse,\n\t\t\t\t\"stock_available\": item_dict.actual_qty or 0,\n\t\t\t\t\"buying_price_list\": \"\",\n\t\t\t\t\"buying_rate\": 0.0,\n\t\t\t\t\"selling_price_list\": \"\",\n\t\t\t\t\"selling_rate\": 0.0,\n\t\t\t}\n\n\t\t\tprice_list = item_dict[\"price_list_name\"]\n\t\t\tif buying_price_map.get(price_list):\n\t\t\t\tdata[\"buying_price_list\"] = buying_price_map.get(price_list)[\"Buying Price List\"] or \"\"\n\t\t\t\tdata[\"buying_rate\"] = buying_price_map.get(price_list)[\"Buying Rate\"] or 0\n\t\t\tif selling_price_map.get(price_list):\n\t\t\t\tdata[\"selling_price_list\"] = selling_price_map.get(price_list)[\"Selling Price List\"] or \"\"\n\t\t\t\tdata[\"selling_rate\"] = selling_price_map.get(price_list)[\"Selling Rate\"] or 0\n\n\t\t\tresult.append(data)\n\n\treturn result\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 75, "n_words": 120, "vocab_size": 82, "complexity": 12, "nloc": 45, "token_counts": 333, "n_ast_nodes": 556, "n_identifiers": 39, "random_cut": "def get_item_price_qty_data(filters):\n\titem_price = frappe.qb.DocType(\"Item Price\")\n\tbin = frappe.qb.DocType(\"Bin\")\n\n\tquery = (\n\t\tfrappe.qb.from_(item_price)\n\t\t.left_join(bin)\n\t\t.on(item_price.item_code == bin.item_code)\n\t\t.select(\n\t\t\titem_price.item_code,\n\t\t\titem_price.item_name,\n\t\t\titem_price.name.as_(\"price_list_name\"),\n\t\t\titem_price.bra" }, { "id": 243006, "commit_id": "b1ba0909edb8f4c2c7815397c5e39c6a36e3bbb3", "repo": "Pillow", "path": "src/PIL/ImageGrab.py", "file_name": "ImageGrab.py", "fun_name": "grab", "commit_message": "Prefer gnome-screenshot if xdisplay is None\n\nCo-authored-by: Ondrej Baranovič ", "code": "def grab(bbox=None, include_layered_windows=False, all_screens=False, xdisplay=None):\n if xdisplay is None:\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".png\")\n os.close(fh)\n args = [\"screencapture\"]\n if bbox:\n left, top, right, bottom = bbox\n args += [\"-R\", f\"{left},{top},{right-left},{bottom-top}\"]\n subprocess.call(args + [\"-x\", filepath])\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n if bbox:\n im_resized = im.resize((right - left, bottom - top))\n im.close()\n return im_resized\n return im\n elif sys.platform == \"win32\":\n offset, size, data = Image.core.grabscreen_win32(\n include_layered_windows, all_screens\n )\n im = Image.frombytes(\n \"RGB\",\n size,\n data,\n # RGB, 32-bit line padding, origin lower left corner\n \"raw\",\n \"BGR\",\n (size[0] * 3 + 3) & -4,\n -1,\n )\n if bbox:\n x0, y0 = offset\n left, top, right, bottom = bbox\n im = im.crop((left - x0, top - y0, right - x0, bottom - y0))\n return im\n elif shutil.which(\"gnome-screenshot\"):\n fh, filepath = tempfile.mkstemp(\".png\")\n os.close(fh)\n subprocess.call([\"gnome-screenshot\", \"-f\", filepath])\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n if bbox:\n im_cropped = im.crop(bbox)\n im.close()\n return im_cropped\n return im\n # use xdisplay=None for default display on non-win32/macOS systems\n if not Image.core.HAVE_XCB:\n raise OSError(\"Pillow was built without XCB support\")\n size, data = Image.core.grabscreen_x11(xdisplay)\n im = Image.frombytes(\"RGB\", size, data, \"raw\", \"BGRX\", size[0] * 4, 1)\n if bbox:\n im = im.crop(bbox)\n return im\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 808, "n_words": 189, "vocab_size": 108, "complexity": 11, "nloc": 55, "token_counts": 369, "n_ast_nodes": 617, "n_identifiers": 42, "random_cut": "def grab(bbox=None, include_layered_windows=False, all_screens=False, xdisplay=None):\n if xdisplay is None:\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".png\")\n os.close(fh)\n args = [\"screencapture\"]\n if bbox:\n left, top, right, bottom = bbox\n args += [\"-R\", f\"{left},{top},{right-left},{bottom-top}\"]\n subprocess.call(args + [\"-x\", filepath])\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n if bbox:\n im_resized = im.resize((right - left, bottom - top))\n im.close()\n return im_resized\n return im\n elif sys.platform == \"win32\":\n offset, size, data = Image.core.grabscreen_win32(\n include_layered_windows, all_screens\n )\n im = Image.frombytes(\n \"RGB\",\n size,\n data,\n # RGB, 32-bit line padding, origin lower left corner\n \"raw\",\n \"BGR\",\n (size[0] * 3 + 3) & -4,\n -1,\n )\n if bbox:\n x0, y0 = offset\n left, top, right, bottom = bbox\n " }, { "id": 100228, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/incidents/endpoints/test_organization_incident_details.py", "file_name": "test_organization_incident_details.py", "fun_name": "test_simple", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_simple(self, mock_now):\n mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)\n\n incident = self.create_incident(seen_by=[self.user])\n with self.feature(\"organizations:incidents\"):\n resp = self.get_success_response(incident.organization.slug, incident.identifier)\n\n expected = serialize(incident)\n\n user_data = serialize(self.user)\n seen_by = [user_data]\n\n assert resp.data[\"id\"] == expected[\"id\"]\n assert resp.data[\"identifier\"] == expected[\"identifier\"]\n assert resp.data[\"projects\"] == expected[\"projects\"]\n assert resp.data[\"dateDetected\"] == expected[\"dateDetected\"]\n assert resp.data[\"dateCreated\"] == expected[\"dateCreated\"]\n assert resp.data[\"projects\"] == expected[\"projects\"]\n assert [item[\"id\"] for item in resp.data[\"seenBy\"]] == [item[\"id\"] for item in seen_by]\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 161, "n_words": 60, "vocab_size": 37, "complexity": 3, "nloc": 15, "token_counts": 181, "n_ast_nodes": 303, "n_identifiers": 25, "random_cut": "def test_simple(self, mock_now):\n mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)\n\n incident = self.create_incident(seen_by=[self.user])\n with self.feature(\"organizations:incidents\"):\n resp = self.get_success_response(incident.organization.slug, incident.identifier)\n\n expected = serialize(incident)\n\n user_data = serialize(self.user)\n seen_by = [user_data]\n\n assert resp.data[\"id\"] == expected[\"id\"]\n assert resp.data[\"identifier\"] == expected[\"identifier\"]\n assert resp.data[\"projects\"] == expected[\"projects\"]\n assert resp.data[\"dateDetected\"] == expected[\"dateDetected\"]\n assert resp.data[\"dateCreated\"] == expected[\"dateCreated\"]\n assert resp.da" }, { "id": 40793, "commit_id": "bb1a521d7397339217538494972310da77a03d93", "repo": "seaborn", "path": "seaborn/tests/_core/test_mappings.py", "file_name": "test_mappings.py", "fun_name": "test_categorical_dict_with_missing_keys", "commit_message": "Implement most of the framework for semantic mapping using Plot\n\nSquashed commit of the following:\n\ncommit 597cd89d9ffddc67ef3b92ceb94b2c4810412cfe\nAuthor: Michael Waskom \nDate: Sat Oct 16 15:50:15 2021 -0400\n\n Satisfy linter\n\ncommit f62d8740f08a31b07c34566dd4e89c98b5fa75b5\nAuthor: Michael Waskom \nDate: Sat Oct 16 14:12:45 2021 -0400\n\n Simplify color transform and tests\n\ncommit 42020a0dda4c537a5360c7dcecbb15ffa51844d2\nAuthor: Michael Waskom \nDate: Sat Oct 16 12:42:32 2021 -0400\n\n Initialize default semantics with relevant variable names\n\ncommit c7777d9b71a561afd75199c40d71c815ddce9a46\nAuthor: Michael Waskom \nDate: Tue Oct 12 20:34:03 2021 -0400\n\n Make scale a required parameter of mapping setup\n\ncommit 81482fd4c452fec254f2c1d5907311760a2313b9\nAuthor: Michael Waskom \nDate: Mon Oct 11 21:32:01 2021 -0400\n\n Add from_inferred_type alternate constructor for ScaleWrapper\n\ncommit c3ea2a875c0c672bec73ded24283323e9f554eaf\nAuthor: Michael Waskom \nDate: Sun Oct 10 20:13:50 2021 -0400\n\n Add basic datetime mapping tests\n\ncommit b32633ca0d5057749d32c5461a53954c9e815ba3\nAuthor: Michael Waskom \nDate: Sat Oct 9 17:59:53 2021 -0400\n\n Very messy prototype of mapping datetime data\n\ncommit 8c51ab7d9de549fe556b0eeb3e8c621afde9d610\nAuthor: Michael Waskom \nDate: Sat Oct 9 13:47:46 2021 -0400\n\n Use linestyle rather than dash\n\ncommit 6cb547063887e89a3e7746e0a821479fa4d99639\nAuthor: Michael Waskom \nDate: Sat Oct 9 13:39:25 2021 -0400\n\n Clear out some TODOs\n\ncommit 636f8681c07c95fbfb07c7965fd5912a75ae0f59\nAuthor: Michael Waskom \nDate: Fri Oct 8 20:08:24 2021 -0400\n\n Matplotlib compatability\n\ncommit 30eadfb4450f8139f60c5aea98f3fa8ea8d2c8f5\nAuthor: Michael Waskom \nDate: Fri Oct 8 20:00:52 2021 -0400\n\n Move norm->rgb transform into class and fix typing\n\ncommit 58660ffd962433bb1433b65ec6bfce377c0b1ad3\nAuthor: Michael Waskom \nDate: Thu Oct 7 20:59:01 2021 -0400\n\n Build out continuous semantic tests\n\ncommit 72f60d7df708f14e2b6f65c6c7748defaaf563be\nAuthor: Michael Waskom \nDate: Tue Oct 5 20:47:05 2021 -0400\n\n Start building out boolean and continuous mapping tests\n\ncommit a8408ab57048db3e9e480f478d974d8a9356524f\nAuthor: Michael Waskom \nDate: Mon Oct 4 20:57:11 2021 -0400\n\n Add abstraction in discrete semantic tests\n\ncommit 966218f065aa54a0af159394d7458bbbd4031868\nAuthor: Michael Waskom \nDate: Mon Oct 4 20:37:31 2021 -0400\n\n Name bikeshedding\n\ncommit 7e4a62b1107f21a3f29d3e04725f607c16fe291d\nAuthor: Michael Waskom \nDate: Mon Oct 4 20:30:22 2021 -0400\n\n Move default semantics out of Plot\n\ncommit 51729363a1d35695e677c5c5c9bb01d44ad95ec6\nAuthor: Michael Waskom \nDate: Sun Oct 3 22:23:21 2021 -0400\n\n Add linewidth to prototype out continuous semantic\n\ncommit fc8f466f2cb2c55dcfc58e566c5a94a06473bab1\nAuthor: Michael Waskom \nDate: Sun Oct 3 17:33:28 2021 -0400\n\n Attempt (unsuccessfully) to clean up Point draw logic\n\ncommit af8d37758ea6490b26753798067ae8291c2fc07c\nAuthor: Michael Waskom \nDate: Thu Sep 30 21:19:35 2021 -0400\n\n Fix base attribute typing on Semantic.variable\n\ncommit d861fda490608bfa25810c24c0461236830c3b53\nAuthor: Michael Waskom \nDate: Thu Sep 30 20:44:40 2021 -0400\n\n Change test for too-short palette reaction to warning\n\ncommit 4761b092233c1b2c99dd0fd57d7506f9e1956e5b\nAuthor: Michael Waskom \nDate: Wed Sep 29 20:54:21 2021 -0400\n\n Add prototype of ContinuousSemantic\n\ncommit 8519b5b61ead0701481795c7698778ba330ffe86\nAuthor: Michael Waskom \nDate: Tue Sep 28 20:51:11 2021 -0400\n\n Spec out a BooleanSemantic\n\ncommit 83604c6c271d17839c97136c34002ad34513bfff\nAuthor: Michael Waskom \nDate: Tue Sep 28 19:21:47 2021 -0400\n\n Fix more complex positional variables\n\ncommit cc8f73a548e6337dace4b372873583a8b02b6b39\nAuthor: Michael Waskom \nDate: Tue Sep 28 08:20:10 2021 -0400\n\n Clear mypy failures\n\ncommit 82828708fd9a4529043ea0a887aa67f3946ecdad\nAuthor: Michael Waskom \nDate: Mon Sep 27 07:01:19 2021 -0400\n\n MPL compat\n\ncommit 0b69940a164059dbfec834e029af51a369f70901\nAuthor: Michael Waskom \nDate: Sun Sep 26 22:42:02 2021 -0400\n\n PEP8\n\ncommit a7bfca26e7ce095f6ed8cba5878250efaf4bcd6a\nAuthor: Michael Waskom \nDate: Sun Sep 26 22:24:25 2021 -0400\n\n Add numeric ColorMapping\n\ncommit 06116145750a75b20faece231ea153caca15f40d\nAuthor: Michael Waskom \nDate: Sun Sep 26 20:17:54 2021 -0400\n\n Rename objects in mapping tests\n\ncommit aa8bbd53eb195649e5e1d309527247a770c525fc\nAuthor: Michael Waskom \nDate: Sun Sep 26 20:15:09 2021 -0400\n\n Remove vestigial code\n\ncommit b527b5767e929c3f741d6ed612eab96dca3013d5\nAuthor: Michael Waskom \nDate: Sun Sep 26 17:53:03 2021 -0400\n\n Have map_ methods call scale_ method when appropriate\n\ncommit a8194b4e3c1dade124e16e680a930cfe199b9634\nAuthor: Michael Waskom \nDate: Sun Sep 26 14:43:27 2021 -0400\n\n Begin exposing order in map methods\n\ncommit 708391b1eff34db93798722a93cd921ed66eac6e\nAuthor: Michael Waskom \nDate: Sun Sep 26 14:27:05 2021 -0400\n\n More consistency in argument order\n\ncommit e0be5ff82abe52fbd0facc9482bd5b7950d5f88f\nAuthor: Michael Waskom \nDate: Sun Sep 26 12:41:05 2021 -0400\n\n Partial fix to scale transformation logic\n\ncommit b706c89c30c425ba1ce148c5d5a69fb96a2613e5\nAuthor: Michael Waskom \nDate: Sun Sep 26 08:26:32 2021 -0400\n\n Make it optional to have x/y scale defined\n\ncommit 7e758f8a04c39142dc5b43e4924cda3744c72eba\nAuthor: Michael Waskom \nDate: Sat Sep 25 20:42:02 2021 -0400\n\n Refactor _setup_mappings\n\ncommit 42b2481962630c634d5e00c55f181fa454e198c8\nAuthor: Michael Waskom \nDate: Sat Sep 25 20:21:32 2021 -0400\n\n Begin refactoring setup pipeline\n\ncommit edf272961db0f60d4a7c7aec2e6eae868d62468e\nAuthor: Michael Waskom \nDate: Thu Sep 23 21:02:51 2021 -0400\n\n Partial rearrangement of mapping code into new organization\n\ncommit 7417eb70997e7cd0be5a82fd3773187290e39b48\nAuthor: Michael Waskom \nDate: Mon Sep 20 19:36:39 2021 -0400\n\n Consistent sorting of missing keys\n\ncommit a179cdcd129c2e0f7c963b92a7b2ca07c4a8dce4\nAuthor: Michael Waskom \nDate: Mon Sep 20 19:36:31 2021 -0400\n\n Add compat layer for MarkerStyle\n\ncommit 917600d522844193318be7fe37e52ca5b3a320c1\nAuthor: Michael Waskom \nDate: Sun Sep 19 20:52:12 2021 -0400\n\n Add tests for MarkerMapping and DashMapping\n\ncommit 4ece96368c2f78f6e84bc55bdfa481c4f01dc0c0\nAuthor: Michael Waskom \nDate: Mon Sep 13 20:51:16 2021 -0400\n\n Refactor DictionaryMapping and add DashMapping\n\ncommit 0bf214d24e767fbfc39e4c9557abc292c329b707\nAuthor: Michael Waskom \nDate: Sun Sep 12 18:51:13 2021 -0400\n\n Add (untested/incomplete) prototype of marker mapping\n\ncommit 4ef6d612e9bc62a55159ef04156ed8687e7ab367\nAuthor: Michael Waskom \nDate: Sat Sep 11 21:18:46 2021 -0400\n\n Rename 'hue' -> 'color' in the rest of the new code\n\ncommit d357b3fcad99b384de5ffee5983b3c564c62ea8e\nAuthor: Michael Waskom \nDate: Sat Sep 11 19:01:41 2021 -0400\n\n Add facecolor and edgecolor mappings\n\ncommit 8e87e2857cd39bf02b8d7a9b6d56fb95df95756e\nAuthor: Michael Waskom \nDate: Sat Sep 11 18:07:54 2021 -0400\n\n Rename hue -> color in semantic mapping code", "code": "def test_categorical_dict_with_missing_keys(self, cat_vector, cat_order):\n\n palette = dict(zip(cat_order[1:], color_palette(\"Purples\")))\n scale = ScaleWrapper.from_inferred_type(cat_vector)\n with pytest.raises(ValueError):\n ColorSemantic(palette=palette).setup(cat_vector, scale)\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 56, "n_ast_nodes": 91, "n_identifiers": 16, "random_cut": "def test_categorical_dict_with_missing_keys(self, cat_vector, cat_order):\n\n palette = dict(zip(cat_order[1:], color_palette(\"Purples\")))\n scale = ScaleWrapper.from_inferred_type(cat_vector)\n with pytest.raises(ValueError):\n ColorSemantic(palette=palette).setup(cat_vector, scale)\n" }, { "id": 9907, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/peapods/runtimes/gateway/__init__.py", "file_name": "__init__.py", "fun_name": "_set_connection_pool", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def _set_connection_pool(self):\n import json\n\n pods_addresses = json.loads(self.args.pods_addresses)\n # add the connections needed\n self._connection_pool = create_connection_pool(\n logger=self.logger,\n k8s_connection_pool=self.args.k8s_connection_pool,\n k8s_namespace=self.args.k8s_namespace,\n )\n for pod_name, addresses in pods_addresses.items():\n for address in addresses:\n self._connection_pool.add_connection(\n pod=pod_name, address=address, head=True\n )\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 167, "n_words": 33, "vocab_size": 29, "complexity": 3, "nloc": 13, "token_counts": 82, "n_ast_nodes": 125, "n_identifiers": 18, "random_cut": "def _set_connection_pool(self):\n" }, { "id": 61582, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/chardet/hebrewprober.py", "file_name": "hebrewprober.py", "fun_name": "set_model_probers", "commit_message": "upd; format", "code": "def set_model_probers(self, logicalProber, visualProber):\n self._logical_prober = logicalProber\n self._visual_prober = visualProber\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 23, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 29, "n_identifiers": 6, "random_cut": "def set_model_probers(self, logicalProber, visualProber):\n self._logical_prober = logical" }, { "id": 223338, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/test_unixccompiler.py", "file_name": "test_unixccompiler.py", "fun_name": "test_runtime_libdir_option", "commit_message": "add python 3.10.4 for windows", "code": "def test_runtime_libdir_option(self):\n # Issue#5900\n #\n # Ensure RUNPATH is added to extension modules with RPATH if\n # GNU ld is used\n\n # darwin\n sys.platform = 'darwin'\n self.assertEqual(self.cc.rpath_foo(), '-L/foo')\n\n # hp-ux\n sys.platform = 'hp-ux'\n old_gcv = sysconfig.get_config_var", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 105, "n_words": 36, "vocab_size": 27, "complexity": 1, "nloc": 35, "token_counts": 244, "n_ast_nodes": 67, "n_identifiers": 10, "random_cut": "def test_runtime_libdir_option(self):\n # Issue#5900\n #\n # Ensure RUNPATH is added to extension modules with RPATH if\n # GNU ld is used\n\n # darwin\n sys.platform = 'darwin'\n self.assertEqual(self.cc.rpath_foo(), '-L/foo')" }, { "id": 314136, "commit_id": "754fe86dd988b51a20229f8d88dfcdecb60e90d8", "repo": "core", "path": "homeassistant/components/fan/__init__.py", "file_name": "__init__.py", "fun_name": "capability_attributes", "commit_message": "Add fan to strict typing (#73820)\n\n* Add fan to strict typing\r\n\r\n* Adjust state_attributes\r\n\r\n* Adjust capability_attributes\r\n\r\n* Adjust is_on\r\n\r\n* Adjust vallox component\r\n\r\n* Revert \"Adjust is_on\"\r\n\r\nThis reverts commit 48d207f250f99d8126702342c05a6be6e877e4d5.\r\n\r\n* Fix is_on property", "code": "def capability_attributes(self) -> dict[str, list[str] | None]:\n \n attrs = {}\n\n if (\n self.supported_features & FanEntityFeature.SET_SPEED\n or self.supported_features & FanEntityFeature.PRESET_MODE\n ):\n attrs[ATTR_PRESET_MODES] = self.preset_modes\n\n return attrs\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 93, "n_words": 25, "vocab_size": 21, "complexity": 3, "nloc": 9, "token_counts": 51, "n_ast_nodes": 80, "n_identifiers": 12, "random_cut": "def capability_attributes(self) -> dict[str, list[str] | None]:\n \n attrs = {}\n\n if (\n self.supported_features & FanEntityFeature.SET_SPEED\n or" }, { "id": 168663, "commit_id": "252ae0555abf488522f947107dcdee684be6ac8a", "repo": "pandas", "path": "pandas/tests/reshape/test_cut.py", "file_name": "test_cut.py", "fun_name": "test_no_right", "commit_message": "Revert Interval/IntervalIndex/interval_range.inclusive deprecation (#48116)\n\n* Revert \"Cln tests interval wrt inclusive (#47775)\"\r\n\r\nThis reverts commit 2d6e0b251955d3a2c0c88f7e6ddb57b335ed09b7.\r\n\r\n* Revert \"CLN: Rename private variables to inclusive (#47655)\"\r\n\r\nThis reverts commit 102b3ca2119df822e2b0f346fa936d0fe9f17501.\r\n\r\n* Revert \"TYP: Improve typing interval inclusive (#47646)\"\r\n\r\nThis reverts commit 55064763e8ba55f6ff5370a8dd083767a189d7a4.\r\n\r\n* Revert \"DEPR: Deprecate set_closed and add set_incluive (#47636)\"\r\n\r\nThis reverts commit bd4ff395cbbf4cbde1fc8f1f746cae064a401638.\r\n\r\n* Revert \"DEPR: Remove deprecation from private class IntervalTree (#47637)\"\r\n\r\nThis reverts commit f6658ef9fdef5972214fdc338e2c6b5ee308dbf4.\r\n\r\n* Revert \"Revert inclusive default change of IntervalDtype (#47367)\"\r\n\r\nThis reverts commit d9dd1289e07d86928d144e53beb3d5b8ab3c2215.\r\n\r\n* Revert \"ENH: consistency of input args for boundaries - Interval (#46522)\"\r\n\r\nThis reverts commit 7e23a37e1c5bda81234801a6584563e2880769eb.\r\n\r\n* Revert \"ENH: consistency of input args for boundaries - pd.interval_range (#46355)\"\r\n\r\nThis reverts commit 073b3535d7a5171102e5915c38b57c21d13795ae.\r\n\r\n* Fix ArrowIntervalType manually\r\n\r\n* Remove unused import\r\n\r\n* Fix doctest and leftover usage\r\n\r\n* Fix remaining tests\r\n\r\n* Fix wording in doctoring\r\n\r\nCo-authored-by: Patrick Hoefler <61934744+phofl@users.noreply.github.com>", "code": "def test_no_right():\n data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=False, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3), closed=\"left\")\n intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])\n expected = Categorical(intervals, ordered=True)\n\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095]))\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 43, "vocab_size": 35, "complexity": 1, "nloc": 8, "token_counts": 148, "n_ast_nodes": 175, "n_identifiers": 21, "random_cut": "def test_no_right():\n data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=False, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3), closed=\"left\")\n intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])\n expected = Categorical(intervals, ordered=True)\n\n tm.assert_categorical_equa" }, { "id": 218711, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/fixes/fix_paren.py", "file_name": "fix_paren.py", "fun_name": "transform", "commit_message": "add python 3.10.4 for windows", "code": "def transform(self, node, results):\n target = results[\"target\"]\n\n lparen = LParen()\n lparen.prefix = target.prefix\n target.prefix = \"\" # Make it hug the parentheses\n target.insert_child(0, lparen)\n target.append_child(RParen())\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 66, "n_words": 25, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 48, "n_ast_nodes": 81, "n_identifiers": 11, "random_cut": "def transform(self, node, results):\n target = results[\"target\"]\n\n lparen = LParen()\n lparen.prefix = target.prefix\n target.prefix = \"\" # Make it hug the parentheses\n target.insert_child(0, lparen)\n targ" }, { "id": 157408, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/modules/diffusionmodules/model.py", "file_name": "model.py", "fun_name": "make_attn", "commit_message": "release more models", "code": "def make_attn(in_channels, attn_type=\"vanilla\", attn_kwargs=None):\n assert attn_type in [\"vanilla\", \"vanilla-xformers\", \"memory-efficient-cross-attn\", \"linear\", \"none\"], f'attn_type {attn_type} unknown'\n if XFORMERS_IS_AVAILBLE and attn_type == \"vanilla\":\n attn_type = \"vanilla-xformers\"\n print(f\"making attention of type '{attn_type}' with {in_channels} in_channels\")\n if attn_type == \"vanilla\":\n assert attn_kwargs is None\n return AttnBlock(in_channels)\n elif attn_type == \"vanilla-xformers\":\n print(f\"building MemoryEfficientAttnBlock with {in_channels} in_channels...\")\n return MemoryEfficientAttnBlock(in_channels)\n elif type == \"memory-efficient-cross-attn\":\n attn_kwargs[\"query_dim\"] = in_channels\n return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)\n elif attn_type == \"none\":\n return nn.Identity(in_channels)\n else:\n raise NotImplementedError()\n\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 157, "n_words": 71, "vocab_size": 50, "complexity": 7, "nloc": 18, "token_counts": 109, "n_ast_nodes": 211, "n_identifiers": 13, "random_cut": "def make_attn(in_channels, attn_type=\"vanilla\", attn_kwargs=None):\n assert attn_type in [\"vanilla\", \"vanilla-xformers\", \"memo" }, { "id": 19097, "commit_id": "964f5ab75098c55f028f8acfeeae05df35ea68d5", "repo": "mlflow", "path": "tests/models/test_evaluation.py", "file_name": "test_evaluation.py", "fun_name": "test_classifier_evaluate", "commit_message": "Evaluation Default evaluator (#5092)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* rename module\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert black change\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* change module path\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert export\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix curcit import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix conftest.py\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* Revert \"fix conftest.py\"\r\n\r\nThis reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b.\r\n\r\n* fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* default evaluator\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update hash algo\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comment\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add more tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* remove scikitplot dep\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add pr curve\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap.summary_plot\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* log explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve explainer code\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update explainer creating\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update predict_proba\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add multi-class metrics artifacts\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add log_loss metric\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address ben comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* prevent show shap logo, add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* support spark model\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap version check\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update docs, loose classifier label limit\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* multiclass classifier merge metrics/plots\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* zfill feature name\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve label handling\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* black\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* increase plot dpi\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix test fixture\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use matplot rc_context\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix shap import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor EvaluationDataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* limit user specify shap algos\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* clean\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update evaluation dataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use svg fig\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert svg\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* curve dashline, legend display ap/roc, legend move out\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* linewidth 1\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* keyword arguments for evaluate, fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* mark abc.abstractmethod, kw args for ModelEvaluator methods\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def test_classifier_evaluate(multiclass_logistic_regressor_model_uri, iris_dataset):\n y_true = iris_dataset.labels_data\n classifier_model = mlflow.pyfunc.load_model(multiclass_logistic_regressor_model_uri)\n y_pred = classifier_model.predict(iris_dataset.features_data)\n expected_accuracy_score = accuracy_score(y_true, y_pred)\n expected_metrics = {\n \"accuracy_score\": expected_accuracy_score,\n }\n expected_saved_metrics = {\n \"accuracy_score_on_iris_dataset\": expected_accuracy_score,\n }\n\n expected_artifact = confusion_matrix(y_true, y_pred)\n\n with mlflow.start_run() as run:\n eval_result = evaluate(\n model=classifier_model,\n model_type=\"classifier\",\n dataset=iris_dataset,\n run_id=None,\n evaluators=\"dummy_evaluator\",\n )\n\n artifact_name = \"confusion_matrix_on_iris_dataset.csv\"\n saved_artifact_path = get_local_artifact_path(run.info.run_id, artifact_name)\n\n _, saved_metrics, _, saved_artifacts = get_run_data(run.info.run_id)\n assert saved_metrics == expected_saved_metrics\n assert saved_artifacts == [artifact_name]\n\n assert eval_result.metrics == expected_metrics\n confusion_matrix_artifact = eval_result.artifacts[artifact_name]\n assert np.array_equal(confusion_matrix_artifact.content, expected_artifact)\n assert confusion_matrix_artifact.uri == get_artifact_uri(run.info.run_id, artifact_name)\n assert np.array_equal(confusion_matrix_artifact.load(saved_artifact_path), expected_artifact)\n\n with TempDir() as temp_dir:\n temp_dir_path = temp_dir.path()\n eval_result.save(temp_dir_path)\n\n with open(temp_dir.path(\"metrics.json\"), \"r\") as fp:\n assert json.load(fp) == eval_result.metrics\n\n with open(temp_dir.path(\"artifacts_metadata.json\"), \"r\") as fp:\n assert json.load(fp) == {\n \"confusion_matrix_on_iris_dataset.csv\": {\n \"uri\": confusion_matrix_artifact.uri,\n \"class_name\": \"mlflow_test_plugin.dummy_evaluator.Array2DEvaluationArtifact\",\n }\n }\n\n assert os.listdir(temp_dir.path(\"artifacts\")) == [\"confusion_matrix_on_iris_dataset.csv\"]\n\n loaded_eval_result = EvaluationResult.load(temp_dir_path)\n assert loaded_eval_result.metrics == eval_result.metrics\n loaded_confusion_matrix_artifact = loaded_eval_result.artifacts[artifact_name]\n assert confusion_matrix_artifact.uri == loaded_confusion_matrix_artifact.uri\n assert np.array_equal(\n confusion_matrix_artifact.content,\n loaded_confusion_matrix_artifact.content,\n )\n\n new_confusion_matrix_artifact = Array2DEvaluationArtifact(uri=confusion_matrix_artifact.uri)\n new_confusion_matrix_artifact.load()\n assert np.array_equal(\n confusion_matrix_artifact.content,\n new_confusion_matrix_artifact.content,\n )\n\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 545, "n_words": 150, "vocab_size": 85, "complexity": 1, "nloc": 57, "token_counts": 369, "n_ast_nodes": 602, "n_identifiers": 59, "random_cut": "def test_classifier_evaluate(multiclass_logistic_regressor_model_uri, iris_dataset):\n y_true = iris_dataset.labels_data\n classifier_model = mlflow.pyfunc.load_model(multiclass_logistic_regressor_model_uri)\n y_pred = classifier_model.predict(iris_dataset.features_data)\n expected_accuracy_score = accuracy_score(y_true, y_pred)\n expected_metrics = {\n \"accuracy_score\": expected_accuracy_score,\n }\n expected_saved_metrics = {\n \"accuracy_score_on_iris_dataset\": expected_accuracy_score,\n }\n\n expected_artifact = confusion_matrix(y_true, y_pred)\n\n with mlflow.start_run() as run:\n eval_result = evaluate(\n model=classifier_model,\n model_type=\"classifier\",\n dataset=iris_dataset,\n run_id=None,\n evaluators=\"dummy_evaluator\",\n )\n\n artifact_name = \"confusion_matrix_on_iris_dataset.csv\"\n saved_artifact_path = get_local_artifact_path(run.info.run_id, artifact_name)\n\n _, saved_metrics, _, saved_artifacts = get_run_data(run.info.run_id)\n assert saved_metrics == expected_saved_metrics\n assert saved_artifacts == [artifact_name]\n\n assert eval_result.metrics == expected_metrics\n confusion_matrix_artifact = eval_result.artifacts[artifact_name]\n assert np.array_equal(confusion_matrix_artifact.content, expected_artifact)\n assert confusion_matrix_artifact.uri == get_artifact_uri(run.info.run_id, artifact_name)\n assert np.array_equal(confusion_matrix_artifact.load(saved_artifact_path), expected_artifact)\n\n with TempDir() as temp_dir:\n temp_dir_path = temp_dir.path()\n eval_result.save(temp_dir_path)\n\n with open(temp_dir.path(\"metric" }, { "id": 97310, "commit_id": "f9dcd325304b37e3bff3869c1589354755e9300e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_tags.py", "file_name": "test_organization_metric_tags.py", "fun_name": "test_metric_tags_metric_does_not_have_data", "commit_message": "ref(metrics-indexer): Change bulk_record, record signatures (#32811)\n\n* ref(metrics-indexer): Change bulk_record, record signatures", "code": "def test_metric_tags_metric_does_not_have_data(self):\n indexer.record(self.organization.id, \"foo.bar\")\n assert (\n self.get_response(\n self.organization.slug,\n metric=[\"foo.bar\"],\n ).data\n == []\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 96, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 9, "token_counts": 42, "n_ast_nodes": 68, "n_identifiers": 10, "random_cut": "def test_metric_tags_metric_does_not_have_data(self):\n indexer.record(self.organization.id, \"foo.bar\")\n assert (\n self.get_response(\n self.organization.slug,\n metric=[\"foo.bar\"],\n ).data\n == []\n )\n" }, { "id": 11533, "commit_id": "51403a57d03f0b1ddfd7fc533ccee78e23f5faa1", "repo": "jina", "path": "tests/distributed/test_local_flow_use_remote_executor/test_integration.py", "file_name": "test_integration.py", "fun_name": "external_deployment_args", "commit_message": "refactor: unify port args (#4382)", "code": "def external_deployment_args():\n args = ['--port', str(45678)]\n args = vars(set_deployment_parser().parse_args(args))\n del args['external']\n del args['deployment_role']\n del args['host']\n return args\n\n\n@pytest.fixture", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 34, "n_words": 18, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 44, "n_ast_nodes": 84, "n_identifiers": 8, "random_cut": "def external_deployment_args():\n args = ['--port', str(45678)]\n args = vars(set_deployment_parser().parse_args(args))\n del args['external']\n del args['deployment_role']\n del" }, { "id": 261424, "commit_id": "7dc7f8a3d34d98d38bf88b18cf11417f49cdc918", "repo": "scikit-learn", "path": "sklearn/utils/estimator_checks.py", "file_name": "estimator_checks.py", "fun_name": "check_transformer_general", "commit_message": "FIX Fixes common test for requires_positive_X (#24667)\n\nCo-authored-by: Guillaume Lemaitre ", "code": "def check_transformer_general(name, transformer, readonly_memmap=False):\n X, y = make_blobs(\n n_samples=30,\n centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0,\n n_features=2,\n cluster_std=0.1,\n )\n X = StandardScaler().fit_transform(X)\n X = _enforce_estimator_tags_X(transformer, X)\n\n if readonly_memmap:\n X, y = create_memmap_backed_data([X, y])\n\n _check_transformer(name, transformer, X, y)\n\n\n@ignore_warnings(category=FutureWarning)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@ignore_warnings(category=FutureWarning)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 96, "n_words": 38, "vocab_size": 30, "complexity": 2, "nloc": 13, "token_counts": 99, "n_ast_nodes": 149, "n_identifiers": 20, "random_cut": "def check_transformer_general(name, transformer, readonly_memmap=False):\n X, y = make_blobs(\n n_samples=3" }, { "id": 249657, "commit_id": "4283bd1cf9c3da2157c3642a7c4f105e9fac2636", "repo": "synapse", "path": "tests/storage/test_stream.py", "file_name": "test_stream.py", "fun_name": "test_filter_relation_senders_and_type", "commit_message": "Support filtering the /messages API by relation type (MSC3874). (#14148)\n\nGated behind an experimental configuration flag.", "code": "def test_filter_relation_senders_and_type(self):\n # Messages which second user reacted to.\n filter = {\n \"related_by_senders\": [self.second_user_id],\n \"related_by_rel_types\": [RelationTypes.ANNOTATION],\n }\n chunk = self._filter_messages(filter)\n self.assertEqual(chunk, [self.event_id_1])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 78, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 45, "n_ast_nodes": 74, "n_identifiers": 10, "random_cut": "def test_filter_relation_senders_and_type(self):\n # Messages which second user reacted to.\n filter = {\n \"related_by_senders\": [self.second_user_id],\n \"related_by_rel_types\": [RelationT" }, { "id": 46965, "commit_id": "c0157e6a3c6129b143a30954d53e7f49ed4d74f6", "repo": "airflow", "path": "tests/www/views/test_views_home.py", "file_name": "test_views_home.py", "fun_name": "test_sorting_home_view", "commit_message": "Support for sorting DAGs in the web UI (#22671)\n\n* Add sort + small test\r\n\r\n* clean code\r\n\r\n* Remove useless forgotten macro, fix nullslast for mysql\r\n\r\n* Changes following code review\r\n\r\n* Remove nullslast\r\n\r\n* Changes desc syntax", "code": "def test_sorting_home_view(url, lower_key, greater_key, user_client, working_dags):\n resp = user_client.get(url, follow_redirects=True)\n resp_html = resp.data.decode('utf-8')\n lower_index = resp_html.find(lower_key)\n greater_index = resp_html.find(greater_key)\n assert lower_index < greater_index\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 23, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 55, "n_ast_nodes": 85, "n_identifiers": 15, "random_cut": "def test_sorting_home_view(url, lower_key, greater_key, user_client, working_dags):\n " }, { "id": 14955, "commit_id": "84740d50cc833e9dd80495d17f32755207469720", "repo": "ccxt", "path": "python/ccxt/async_support/huobi.py", "file_name": "huobi.py", "fun_name": "fetch_closed_contract_orders", "commit_message": "1.66.12\n\n[ci skip]", "code": "async def fetch_closed_contract_orders(self, symbol=None, since=None, limit=None, params={}):\n request = {\n 'status': '5,6,7',\n }\n return await self.fetch_contract_orders(symbol, since, limit, self.extend(request, params))\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 20, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 51, "n_ast_nodes": 77, "n_identifiers": 9, "random_cut": "async def fetch_closed_contract_orders(self, symbol=None, since=None, limit=None, params={}):\n request = {\n 'status': '5,6,7',\n }\n " }, { "id": 7885, "commit_id": "6b78e36f9d6f6c15a9f863acf2d2130ffd1733ab", "repo": "ludwig", "path": "tests/integration_tests/test_ray.py", "file_name": "test_ray.py", "fun_name": "test_ray_distributed_predict", "commit_message": "Fix mulitple partition predict (#2422)\n\n* fix\r\n\r\n* add test\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def test_ray_distributed_predict(tmpdir):\n preprocessing_params = {\n \"audio_file_length_limit_in_s\": 3.0,\n \"missing_value_strategy\": BACKFILL,\n \"in_memory\": True,\n \"padding_value\": 0,\n \"norm\": \"per_file\",\n \"type\": \"fbank\",\n \"window_length_in_s\": 0.04,\n \"window_shift_in_s\": 0.02,\n \"num_filter_bands\": 80,\n }\n audio_dest_folder = os.path.join(tmpdir, \"generated_audio\")\n input_features = [audio_feature(folder=audio_dest_folder, preprocessing=preprocessing_params)]\n output_features = [binary_feature()]\n\n with ray_start(num_cpus=2):\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n TRAINER: {\"epochs\": 2, \"batch_size\": 8},\n }\n\n with tempfile.TemporaryDirectory() as tmpdir:\n backend_config = {**RAY_BACKEND_CONFIG}\n csv_filename = os.path.join(tmpdir, \"dataset.csv\")\n dataset_csv = generate_data(input_features, output_features, csv_filename, num_examples=100)\n dataset = create_data_set_to_use(\"csv\", dataset_csv, nan_percent=0.0)\n model = LudwigModel(config, backend=backend_config)\n output_dir = None\n\n _, _, output_dir = model.train(\n dataset=dataset,\n training_set=dataset,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n skip_save_log=True,\n )\n\n preds, _ = model.predict(dataset=dataset)\n\n # compute the predictions\n preds = preds.compute()\n assert preds.iloc[1].name != preds.iloc[42].name\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 462, "n_words": 106, "vocab_size": 86, "complexity": 1, "nloc": 39, "token_counts": 256, "n_ast_nodes": 403, "n_identifiers": 45, "random_cut": "def test_ray_distributed_predict(tmpdir):\n preprocessing_params = {\n \"audio_file_length_limit_in_s\": 3.0,\n \"missing_value_strategy\": BACKFILL,\n \"in_memory\": True,\n \"padding_value\": 0,\n \"norm\": \"per_file\",\n \"type\": \"fbank\",\n \"window_length_in_s\": 0.04,\n \"window_shift_in_s\": 0.02,\n \"num_filter_bands\": 80,\n }\n audio_dest_folder = os.path.join(tmpdir, \"generated_audio\")\n input_features = [audio_feature(folder=audio_dest_folder, preprocessing=preprocessing_params)]\n output_features = [binary_feature()]\n\n with ray_start(num_cpus=2):\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n TRAINER: {\"epochs\": 2, \"batch_size\": 8},\n }\n\n with tempfile.TemporaryDirectory() as tmpdir:\n backend_config = {**RAY_BACKEND_CONFIG}\n csv_filename = os.path.join(tmpdir, \"dataset.csv\")\n dataset_csv = generate_data(input_features, output_features, csv_filename, num_examples=100)\n dataset = create_data_set_to_use(\"csv\", dataset_csv, nan_percent=0.0)\n model = LudwigModel(config, backend=backend_config)\n output_dir = None\n\n _, _, output_dir = model.train(\n dataset=datase" }, { "id": 30537, "commit_id": "5d0cc0a092f93640e1d83baaf1c738768481d208", "repo": "OCRmyPDF", "path": "tests/test_main.py", "file_name": "test_main.py", "fun_name": "invalid_tess_config", "commit_message": "tests: Extract some test fixtures for better clarity", "code": "def invalid_tess_config(outdir):\n cfg_file = outdir / 'test.cfg'\n with cfg_file.open('w') as f:\n f.write(\n \n )\n yield cfg_file\n\n\n@pytest.mark.slow # This test sometimes times out in CI\n@pytest.mark.parametrize('renderer', RENDERERS)", "url": "https://github.com/ocrmypdf/OCRmyPDF.git", "language": "Python", "ast_errors": "@pytest.mark.slow # This test sometimes times out in CI\n@pytest.mark.parametrize('renderer', RENDERERS)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 59, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 9, "token_counts": 28, "n_ast_nodes": 86, "n_identifiers": 11, "random_cut": "def invalid_tess_config(outdir):" }, { "id": 244723, "commit_id": "a96e9c1b3e6c04680a33c54bdf94a9b7213edebb", "repo": "mmdetection", "path": "tests/test_metrics/test_coco_panoptic_metric.py", "file_name": "test_coco_panoptic_metric.py", "fun_name": "test_evaluate_without_json", "commit_message": "coco panoptic metric", "code": "def test_evaluate_without_json(self):\n # subset of dataset.data_list\n data_batch = [{\n 'input': None,\n 'data_sample': {\n 'img_id':\n 0,\n 'ori_shape': (60, 80),\n 'segments_info': [{\n 'id': 1,\n 'category': 0,\n 'is_thing': 1\n }, {\n 'id': 2,\n 'category': 0,\n 'is_thing': 1\n }, {\n 'id': 3,\n 'category': 1,\n 'is_thing': 1\n }, {\n 'id': 4,\n 'category': 2,\n 'is_thing': 0\n }],\n 'file_name':\n 'fake_name1.jpg',\n 'seg_map_path':\n osp.join(self.gt_seg_dir, 'fake_name1.png')\n }\n }]\n\n metric = CocoPanopticMetric(\n ann_file=None,\n seg_prefix=None,\n classwise=False,\n nproc=1,\n outfile_prefix=None)\n\n metric.dataset_meta = self.dataset_meta\n metric.process(data_batch, deepcopy(self.predictions))\n eval_results = metric.evaluate(size=1)\n self.assertDictEqual(eval_results, self.target)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 627, "n_words": 76, "vocab_size": 52, "complexity": 1, "nloc": 40, "token_counts": 171, "n_ast_nodes": 283, "n_identifiers": 22, "random_cut": "def test_evaluate_without_json(self):\n # subset of dataset.data_list\n data_batch = [{\n 'input': None,\n 'data_sample': {\n 'img_id':\n 0,\n 'ori_shape': (60, 80),\n 'segments_info': [{\n 'id': 1,\n 'category': 0,\n 'is_thing': 1\n }, {\n 'id': 2,\n 'category': 0,\n 'is_thing': 1\n }, {\n 'id': 3,\n 'category': 1,\n 'is_thing': 1\n }, {\n 'id': 4,\n 'category': 2,\n 'is_thing': 0\n }],\n 'file_name':\n 'fake_name1.jpg',\n 'seg_map_path':\n osp.join(self.gt_seg_dir, 'fake_name1.png')\n }\n }]\n\n metric = CocoPanopticMetric(\n ann_file=None,\n seg_prefi" }, { "id": 291214, "commit_id": "a55fb445b0ed4efd625227b4f13a01a0f469c358", "repo": "core", "path": "homeassistant/components/arcam_fmj/media_player.py", "file_name": "media_player.py", "fun_name": "sound_mode_list", "commit_message": "Bump to Arcam 1.0.1 and make strictly typed (#82487)\n\n* Make arcam_fmj strictly typed\r\n\r\n* Add test for invalid UDN", "code": "def sound_mode_list(self) -> list[str] | None:\n \n if (values := self._state.get_decode_modes()) is None:\n return None\n return [x.name for x in values]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 52, "n_words": 20, "vocab_size": 18, "complexity": 3, "nloc": 5, "token_counts": 40, "n_ast_nodes": 64, "n_identifiers": 9, "random_cut": "def sound_mode_list(self) -> list[str] | None:\n \n if (values := self._state.get_decode_modes()) is None:\n return None\n return [x.name for x in values]\n" }, { "id": 65332, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/sales_payment_summary/sales_payment_summary.py", "file_name": "sales_payment_summary.py", "fun_name": "get_pos_sales_payment_data", "commit_message": "style: format code with black", "code": "def get_pos_sales_payment_data(filters):\n\tsales_invoice_data = get_pos_invoice_data(filters)\n\tdata = [\n\t\t[\n\t\t\trow[\"posting_date\"],\n\t\t\trow[\"owner\"],\n\t\t\trow[\"mode_of_payment\"],\n\t\t\trow[\"net_total\"],\n\t\t\trow[\"total_taxes\"],\n\t\t\trow[\"paid_amount\"],\n\t\t\trow[\"warehouse\"],\n\t\t\trow[\"cost_center\"],\n\t\t]\n\t\tfor row in sales_invoice_data\n\t]\n\n\treturn data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 9, "n_words": 25, "vocab_size": 20, "complexity": 2, "nloc": 16, "token_counts": 63, "n_ast_nodes": 101, "n_identifiers": 6, "random_cut": "def get_pos_sales_payment_data(filters):\n\tsales_invoice_data = get_pos_invoice_data(filters)\n\tdata = [\n\t\t[\n\t\t\trow[\"posting_date\"],\n\t\t\trow[\"owner\"],\n\t\t\trow[\"" }, { "id": 115728, "commit_id": "f57105a050803eadc94fdb3fd435da5a98c19bdc", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/lightwood_handler/lightwood_handler/lightwood_handler.py", "file_name": "lightwood_handler.py", "fun_name": "default", "commit_message": "lw handler select", "code": "def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n elif isinstance(obj, (np.float, np.float32, np.float64)):\n return float(obj)\n else:\n return super().default(obj)\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 71, "n_words": 18, "vocab_size": 15, "complexity": 3, "nloc": 7, "token_counts": 59, "n_ast_nodes": 91, "n_identifiers": 11, "random_cut": "def default(self, obj):\n if isinstance(obj, np.ndarray" }, { "id": 286142, "commit_id": "4fa0b98fb8cce09ecfae008752b20f8d0bffe9a5", "repo": "OpenBBTerminal", "path": "openbb_terminal/forecast/forecast_model.py", "file_name": "forecast_model.py", "fun_name": "get_default_files", "commit_message": "Allowed for newly exported files to be loaded (#2894)", "code": "def get_default_files() -> Dict[str, Path]:\n \n default_files = {\n filepath.name: filepath\n for file_type in base_file_types\n for filepath in chain(\n USER_EXPORTS_DIRECTORY.rglob(f\"*.{file_type}\"),\n USER_CUSTOM_IMPORTS_DIRECTORY.rglob(f\"*.{file_type}\"),\n )\n if filepath.is_file()\n }\n return default_files\n\n\n@log_start_end(log=logger)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@log_start_end(log=logger)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 95, "n_words": 27, "vocab_size": 23, "complexity": 4, "nloc": 18, "token_counts": 55, "n_ast_nodes": 106, "n_identifiers": 17, "random_cut": "def get_default_files() -> Dict[str, Path]:\n \n defaul" }, { "id": 91702, "commit_id": "7f60db924ea37f34e0cfe6856777239e2a2ffe13", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_crashed_user_sessions", "commit_message": "feat(metrics): make indexer more configurable (#35604)\n\nThis makes the sentry_metrics indexer more configurable in the following ways, to enable indexing on the ingest-performance-metrics topic:\r\n\r\n- configurable input Kafka topic\r\n- configurable output Kafka topic\r\n- configurable model from which to pull index results\r\n- tags for internal metrics to distinguish between the two modes operationally", "code": "def test_crashed_user_sessions(self):\n org_id = self.organization.id\n user_ts = time.time()\n self._send_buckets(\n [\n {\n \"org_id\": org_id,\n \"project_id\": self.project.id,\n \"metric_id\": self.session_user_metric,\n \"timestamp\": user_ts,\n \"tags\": {\n self.session_status_tag: _indexer_record(org_id, \"crashed\"),\n self.release_tag: _indexer_record(org_id, \"foo\"),\n },\n \"type\": \"s\",\n \"value\": [1, 2, 4],\n \"retention_days\": 90,\n },\n {\n \"org_id\": self.organization.id,\n \"project_id\": self.project.id,\n \"metric_id\": self.session_user_metric,\n \"timestamp\": user_ts,\n \"tags\": {\n self.session_status_tag: _indexer_record(org_id, \"crashed\"),\n self.release_tag: _indexer_record(org_id, \"bar\"),\n },\n \"type\": \"s\",\n \"value\": [1, 2, 4, 8, 9, 5],\n \"retention_days\": 90,\n },\n ],\n entity=\"metrics_sets\",\n )\n response = self.get_success_response(\n self.organization.slug,\n field=[\"session.crashed_user\"],\n statsPeriod=\"6m\",\n interval=\"1m\",\n groupBy=[\"release\"],\n orderBy=[\"-session.crashed_user\"],\n )\n foo_group, bar_group = response.data[\"groups\"][1], response.data[\"groups\"][0]\n assert foo_group[\"by\"][\"release\"] == \"foo\"\n assert foo_group[\"totals\"] == {\"session.crashed_user\": 3}\n assert foo_group[\"series\"] == {\"session.crashed_user\": [0, 0, 0, 0, 0, 3]}\n assert bar_group[\"by\"][\"release\"] == \"bar\"\n assert bar_group[\"totals\"] == {\"session.crashed_user\": 6}\n assert bar_group[\"series\"] == {\"session.crashed_user\": [0, 0, 0, 0, 0, 6]}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 806, "n_words": 123, "vocab_size": 71, "complexity": 1, "nloc": 49, "token_counts": 331, "n_ast_nodes": 539, "n_identifiers": 25, "random_cut": "def test_crashed_user_sessions(self):\n org_id = self.organization.id\n user_ts = time.time()\n self._send_buckets(\n [\n {\n \"org_id\": org_id,\n \"project_id\": self.project.id,\n \"metric_id\": self.session_user_metric,\n \"timestamp\": user_ts,\n \"tags\": {\n self.session_status_tag: _indexer_record(org_id, \"crashed\"),\n self.release_tag: _indexer_record(org_id, \"foo\"),\n },\n \"type\": \"s\",\n \"value\": [1, 2, 4],\n \"retention_days\": 90,\n },\n {\n \"org_id\": self.organization.id,\n \"project_id\": self.project.id,\n \"metric_id\": self.session_user_metric,\n \"timestamp\": user_ts,\n \"tags\": {\n self.session_status_tag: _indexer_record(org_id, \"crashed\"),\n self.release_tag: _indexer_record(org_id, \"bar\"),\n },\n \"type\": \"s\",\n \"value\": [1, 2, 4, 8, 9, 5],\n \"retention_days\": 90,\n },\n ],\n entity=\"metrics_sets\",\n )\n response = self.get_success_response(\n self.organization.slug,\n field=[\"session.crashed_user\"],\n statsPeriod=\"6m\",\n interval=\"1m\",\n groupBy=[\"release\"],\n orderBy=[\"-session.crashed_user\"],\n )\n foo_group, bar_group = response.data[\"groups\"][1], response.data[\"groups\"][0]\n assert foo_group[\"by\"][\"release\"] == \"foo\"\n assert foo_group[\"totals\"] == {\"session.crashed_user\": 3}\n assert foo_group[\"series\"] == {\"session.crashed_user\": [0, 0, 0, 0, 0, 3]}\n assert bar_group[\"by\"][\"release\"] == \"bar\"\n assert bar_group[\"totals\"] == {\"session.crashed_user\": 6}\n " }, { "id": 321674, "commit_id": "232bea57e3d3517e2f4bc4ba454c8056ce9d4ead", "repo": "qutebrowser", "path": "tests/unit/keyinput/test_keyutils.py", "file_name": "test_keyutils.py", "fun_name": "test_key_info_repr", "commit_message": "Add a KeyInfo.__repr__", "code": "def test_key_info_repr():\n info = keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.ShiftModifier)\n expected = (\n \"\")\n assert repr(info) == expected\n\n\n@pytest.mark.parametrize('info1, info2, equal', [\n (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n True),\n (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n keyutils.KeyInfo(Qt.Key.Key_B, Qt.KeyboardModifier.NoModifier),\n False),\n (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n keyutils.KeyInfo(Qt.Key.Key_B, Qt.KeyboardModifier.ControlModifier),\n False),\n])", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('info1, info2, equal', [\n (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n True),\n (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n keyutils.KeyInfo(Qt.Key.Key_B, Qt.KeyboardModifier.NoModifier),\n False),\n (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n keyutils.KeyInfo(Qt.Key.Key_B, Qt.KeyboardModifier.ControlModifier),\n False),\n])", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 91, "n_words": 38, "vocab_size": 27, "complexity": 1, "nloc": 6, "token_counts": 35, "n_ast_nodes": 240, "n_identifiers": 17, "random_cut": "def test_key_info_repr():\n info = keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.ShiftModifier)\n expected = (\n \"\")\n assert repr(info) == expected\n\n\n@pytest.mark.parametrize('info1, info2, equal', [\n (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n True),\n (keyutils.KeyInfo(Qt.Key.Key_A, Qt.KeyboardModifier.NoModifier),\n keyutils.KeyInfo(Qt.Key.Key_B, Qt.KeyboardModifier.NoModifier),\n False),\n (keyutils.KeyInfo(Qt.Key.Key_A, Qt" }, { "id": 177363, "commit_id": "73afc6768021f46279d05e6b8a8e722bffac1d79", "repo": "networkx", "path": "networkx/drawing/tests/test_pylab.py", "file_name": "test_pylab.py", "fun_name": "test_labels_and_colors", "commit_message": "Warn on unused visualization kwargs that only apply to FancyArrowPatch edges (#6098)\n\n* Add test for UserWarning with ignored vizopts.\r\n\r\n* Implement UserWarning for bad vizopts combos.\r\n\r\n* Fix instance in test suite caught by new warnings.\r\n\r\n* Update test to check warnings are *not* raised when using FAPs.", "code": "def test_labels_and_colors():\n G = nx.cubical_graph()\n pos = nx.spring_layout(G) # positions for all nodes\n # nodes\n nx.draw_networkx_nodes(\n G, pos, nodelist=[0, 1, 2, 3], node_color=\"r\", node_size=500, alpha=0.75\n )\n nx.draw_networkx_nodes(\n G,\n pos,\n nodelist=[4, 5, 6, 7],\n node_color=\"b\",\n node_size=500,\n alpha=[0.25, 0.5, 0.75, 1.0],\n )\n # edges\n nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)\n nx.draw_networkx_edges(\n G,\n pos,\n edgelist=[(0, 1), (1, 2), (2, 3), (3, 0)],\n width=8,\n alpha=0.5,\n edge_color=\"r\",\n )\n nx.draw_networkx_edges(\n G,\n pos,\n edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)],\n width=8,\n alpha=0.5,\n edge_color=\"b\",\n )\n nx.draw_networkx_edges(\n G,\n pos,\n edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)],\n arrows=True,\n min_source_margin=0.5,\n min_target_margin=0.75,\n width=8,\n edge_color=\"b\",\n )\n # some math labels\n labels = {}\n labels[0] = r\"$a$\"\n labels[1] = r\"$b$\"\n labels[2] = r\"$c$\"\n labels[3] = r\"$d$\"\n labels[4] = r\"$\\alpha$\"\n labels[5] = r\"$\\beta$\"\n labels[6] = r\"$\\gamma$\"\n labels[7] = r\"$\\delta$\"\n nx.draw_networkx_labels(G, pos, labels, font_size=16)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=None, rotate=False)\n nx.draw_networkx_edge_labels(G, pos, edge_labels={(4, 5): \"4-5\"})\n # plt.show()\n\n\n@pytest.mark.mpl_image_compare", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@pytest.mark.mpl_image_compare", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 415, "n_words": 140, "vocab_size": 91, "complexity": 1, "nloc": 53, "token_counts": 395, "n_ast_nodes": 534, "n_identifiers": 27, "random_cut": "def test_labels_and_colors():\n G = nx.cubical_graph()\n pos = nx.spring_layout(G) # positions for all nodes\n # nodes\n nx.draw_networkx_nodes(\n G, pos, nodelist=[0, 1, 2, 3], node_color=\"r\", node_size=500, alpha=0.75\n )\n nx.draw_networkx_nodes(\n G,\n pos,\n nodelist=[4, 5, 6, 7],\n node_color=\"b\",\n node_size=500,\n alpha=[0.25, 0.5, 0.75, 1.0],\n )\n # edges\n nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)\n nx.draw_networkx_edges(\n G,\n pos,\n edgelist=[(0, 1), (1, 2), (2, 3), (3, 0)],\n width=8,\n alpha=0.5,\n edge_color=\"r\",\n )\n nx.draw_networkx_edges(\n G,\n pos,\n edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)],\n width=8,\n alpha=0.5,\n edge_color=\"b\",\n )\n nx.draw_networkx_edges(\n G,\n pos,\n edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)],\n arrows=True,\n m" }, { "id": 49520, "commit_id": "e0c027f34acd27794a483509cdddeadf1ac4a504", "repo": "PaddleHub", "path": "modules/image/keypoint_detection/pp-tinypose/keypoint_infer.py", "file_name": "keypoint_infer.py", "fun_name": "predict_image", "commit_message": "add pp_tinypose", "code": "def predict_image(self, image_list, run_benchmark=False, repeats=1, visual=True):\n results = []\n batch_loop_cnt = math.ceil(float(len(image_list)) / self.batch_size)\n for i in range(batch_loop_cnt):\n start_index = i * self.batch_size\n end_index = min((i + 1) * self.batch_size, len(image_list))\n batch_image_list = image_list[start_index:end_index]\n if run_benchmark:\n # preprocess\n inputs = self.preprocess(batch_image_list) # warmup\n self.det_times.preprocess_time_s.start()\n inputs = self.preprocess(batch_image_list)\n self.det_times.preprocess_time_s.end()\n\n # model prediction\n result_warmup = self.predict(repeats=repeats) # warmup\n self.det_times.inference_time_s.start()\n result = self.predict(repeats=repeats)\n self.det_times.inference_time_s.end(repeats=repeats)\n\n # postprocess\n result_warmup = self.postprocess(inputs, result) # warmup\n self.det_times.postprocess_time_s.start()\n result = self.postprocess(inputs, result)\n self.det_times.postprocess_time_s.end()\n self.det_times.img_num += len(batch_image_list)\n\n cm, gm, gu = get_current_memory_mb()\n self.cpu_mem += cm\n self.gpu_mem += gm\n self.gpu_util += gu\n\n else:\n # preprocess\n self.det_times.preprocess_time_s.start()\n inputs = self.preprocess(batch_image_list)\n self.det_times.preprocess_time_s.end()\n\n # model prediction\n self.det_times.inference_time_s.start()\n result = self.predict()\n self.det_times.inference_time_s.end()\n\n # postprocess\n self.det_times.postprocess_time_s.start()\n result = self.postprocess(inputs, result)\n self.det_times.postprocess_time_s.end()\n self.det_times.img_num += len(batch_image_list)\n\n if visual:\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n visualize(batch_image_list, result, visual_thresh=self.threshold, save_dir=self.output_dir)\n\n results.append(result)\n if visual:\n print('Test iter {}'.format(i))\n results = self.merge_batch_result(results)\n return results\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 840, "n_words": 140, "vocab_size": 78, "complexity": 6, "nloc": 45, "token_counts": 394, "n_ast_nodes": 638, "n_identifiers": 52, "random_cut": "def predict_image(self, image_list, run_benchmark=False, repeats=1, visual=True):\n results = []\n batch_loop_cnt = math.ceil(float(len(image_list)) / self.batch_size)\n for i in range(batch_loop_cnt):\n start_index = i * self.batch_size\n end_index = min((i + 1) * self.batch_size, len(image_list))\n batch_image_list = image_list[start_index:end_index]\n if run_benchmark:\n # preprocess\n inputs = self.preprocess(batch_image_list) # warmup\n self.det_times.preprocess_time_s.start()\n inputs = self.preprocess(batch_image_list)\n self.det_times.preprocess_time_s.end()\n\n # model prediction\n result_warmup = self.predict(repeats=repeats) # warmup\n self.det_times.inference_time_s.start()\n result = self.predict(repeats=repeats)\n self.det_times.inference_time_s.end(repeats=repeats)\n\n # postprocess\n result_warmup = self.postprocess(inputs, result) # warmup\n self.det_times.postprocess" }, { "id": 81820, "commit_id": "34e8087aeef0de19642e7dd9cd076adcdf5fbe9c", "repo": "awx", "path": "awx/main/access.py", "file_name": "access.py", "fun_name": "assure_relationship_exists", "commit_message": "DRY edits to access classes for new prompts\n\nRemove if-not-data conditional from WFJTnode.can_change\n these are cannonical for can_add, but this looks like a bug\n\nChange JTaccess.can_unattach to call same method in super()\n previously called can_attach, which is problematic\n\nBetter consolidate launch config m2m related checks\n\nTest and fix pre-existing WFJT node RBAC bug\n\nrecognize not-provided instance group list on launch, avoiding bug where it fell back to default\n\nfix bug where timeout field was saved on WFJT nodes after creating approval node\n\nremove labels from schedule serializer summary_fields\n\nremove unnecessary prefetch of credentials from WFJT node queryset", "code": "def assure_relationship_exists(self, obj, relationship):\n if '.' in relationship:\n return # not attempting validation for complex relationships now\n try:\n obj._meta.get_field(relationship)\n except FieldDoesNotExist:\n raise NotImplementedError(f'The relationship {relationship} does not exist for model {type(obj)}')\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 85, "n_words": 31, "vocab_size": 29, "complexity": 3, "nloc": 7, "token_counts": 34, "n_ast_nodes": 71, "n_identifiers": 9, "random_cut": "def assure_relationship_exists(self, obj, relationship):\n if '.' in relationship:\n return # not attempting validation for complex relationships now\n try:\n obj._meta.get_field(relationship)\n " }, { "id": 94532, "commit_id": "7bc55e1ccdfcdce5357c866213851e05d0cbb20e", "repo": "sentry", "path": "tests/sentry/utils/performance_issues/test_performance_detection.py", "file_name": "test_performance_detection.py", "fun_name": "test_calls_slow_span_threshold", "commit_message": "ref(perf): Split threshold for perf issues (#37758)\n\n* ref(perf): Split threshold for perf issues\r\n\r\nThis allows for multiple settings per detector class, which allows us to set different settings for different span ops and more.\r\n\r\nOther:\r\n- Also moved this class out of tasks for clarity to address an earlier comments in PRs\r\n- Cleaned up the tests a bit with some helper functions\r\n- Move the detected span issue object into it's own file for clarity\r\n- Added the directory to codeowners so we don't have to manually add for review", "code": "def test_calls_slow_span_threshold(self):\n http_span_event = create_event(\n [create_span(\"http.client\", 1001.0, \"http://example.com\")] * 1\n )\n db_span_event = create_event([create_span(\"db.query\", 1001.0)] * 1)\n\n sdk_span_mock = Mock()\n\n _detect_performance_issue(http_span_event, sdk_span_mock)\n assert sdk_span_mock.containing_transaction.set_tag.call_count == 0\n\n _detect_performance_issue(db_span_event, sdk_span_mock)\n assert sdk_span_mock.containing_transaction.set_tag.call_count == 3\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 98, "n_words": 32, "vocab_size": 25, "complexity": 1, "nloc": 10, "token_counts": 78, "n_ast_nodes": 120, "n_identifiers": 12, "random_cut": "def test_calls_slow_span_threshold(self):\n http_span_event = create_event(\n [create_span(\"http.client\", 1001.0, \"http://example.com\")] * 1\n )\n db" }, { "id": 205553, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/fields/__init__.py", "file_name": "__init__.py", "fun_name": "deconstruct", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def deconstruct(self):\n \n # Short-form way of fetching all the default parameters\n keywords = {}\n possibles = {\n \"verbose_name\": None,\n \"primary_key\": False,\n \"max_length\": None,\n \"unique\": False,\n \"blank\": False,\n \"null\": False,\n \"db_index\": False,\n \"default\": NOT_PROVIDED,\n \"editable\": True,\n \"serialize\": True,\n \"unique_for_date\": None,\n \"unique_for_month\": None,\n \"unique_for_year\": None,\n \"choices\": None,\n \"help_text\": \"\",\n \"db_column\": None,\n \"db_tablespace\": None,\n \"auto_created\": False,\n \"validators\": [],\n \"error_messages\": None,\n }\n attr_overrides = {\n \"unique\": \"_unique\",\n \"error_messages\": \"_error_messages\",\n \"validators\": \"_validators\",\n \"verbose_name\": \"_verbose_name\",\n \"db_tablespace\": \"_db_tablespace\",\n }\n equals_comparison = {\"choices\", \"validators\"}\n for name, default in possibles.items():\n value = getattr(self, attr_overrides.get(name, name))\n # Unroll anything iterable for choices into a concrete list\n if name == \"choices\" and isinstance(value, collections.abc.Iterable):\n value = list(value)\n # Do correct kind of comparison\n if name in equals_comparison:\n if value != default:\n keywords[name] = value\n else:\n if value is not default:\n keywords[name] = value\n # Work out path - we shorten it for known Django core fields\n path = \"%s.%s\" % (self.__class__.__module__, self.__class__.__qualname__)\n if path.startswith(\"django.db.models.fields.related\"):\n path = path.replace(\"django.db.models.fields.related\", \"django.db.models\")\n elif path.startswith(\"django.db.models.fields.files\"):\n path = path.replace(\"django.db.models.fields.files\", \"django.db.models\")\n elif path.startswith(\"django.db.models.fields.json\"):\n path = path.replace(\"django.db.models.fields.json\", \"django.db.models\")\n elif path.startswith(\"django.db.models.fields.proxy\"):\n path = path.replace(\"django.db.models.fields.proxy\", \"django.db.models\")\n elif path.startswith(\"django.db.models.fields\"):\n path = path.replace(\"django.db.models.fields\", \"django.db.models\")\n # Return basic info - other fields should override this.\n return (self.name, path, [], keywords)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 801, "n_words": 196, "vocab_size": 125, "complexity": 12, "nloc": 54, "token_counts": 324, "n_ast_nodes": 578, "n_identifiers": 24, "random_cut": "def deconstruct(self):\n \n # Short-form way of fetching all the default parameters\n keywords = {}\n possibles = {\n \"verbose_name\": None,\n \"primary_key\": False,\n \"max_length\": None,\n \"unique\": False,\n \"blank\": False,\n \"null\": False,\n \"db_index\": False,\n \"default\": NOT_PROVIDED,\n \"editable\": True,\n \"serialize\": True,\n \"unique_for_date\": None,\n \"unique_for_month\": None,\n \"unique_for_year\": None,\n \"choices\": None,\n \"help_text\": \"\",\n \"db_column\": None,\n \"db_tablespace\": None,\n \"auto_created\": False,\n \"validators\": [],\n \"error_messages\": None,\n }\n attr_overrides = {\n \"unique\": \"_unique\",\n \"error_messages\": \"_error_messages\",\n \"validators\": \"_validators\",\n \"verbose_name\": \"_verbose_name\",\n \"db_tablespace\": \"_db_tablespace\",\n }\n equals_comparison = {\"choices\", \"validators\"}\n for name, default in possibles.items():\n value = getattr(self, attr_overrides.get(name, name))\n # Unroll anything iterable for choices into a concrete list\n if name == \"choices\" and isinstance(value, collections.abc.Iterable):\n value = list(value)\n # Do correct kind of comparison\n if name in equals_comparison:\n if value != default:\n keywords[name] = value\n else:\n if value is not default:\n keywords[name] = value\n # Work out path - we shorten it for known Django core fields\n pa" }, { "id": 265106, "commit_id": "0c915f7de9612c7485da3713cc6d63f368698a5d", "repo": "netbox", "path": "netbox/dcim/svg.py", "file_name": "svg.py", "fun_name": "draw_border", "commit_message": "Clean up rack elevation rendering", "code": "def draw_border(self):\n \n border_width = RACK_ELEVATION_BORDER_WIDTH\n border_offset = RACK_ELEVATION_BORDER_WIDTH / 2\n frame = Rect(\n insert=(self.legend_width + border_offset, border_offset),\n size=(self.unit_width + border_width, self.rack.u_height * self.unit_height + border_width),\n class_='rack'\n )\n self.drawing.add(frame)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 103, "n_words": 28, "vocab_size": 23, "complexity": 1, "nloc": 9, "token_counts": 64, "n_ast_nodes": 101, "n_identifiers": 17, "random_cut": "def draw_border(self):\n \n border_width = RACK_ELEVATION_BORDER_WIDTH\n border_offset = RACK_ELEVATION_BORDER_WIDTH / 2\n frame = Rect(\n insert=(self.legend_width + border_offset, border_offset),\n size=(self.unit_width + border_width, self.rack.u_height * self.unit_height + border_width),\n " }, { "id": 103459, "commit_id": "061a0c8cb6bcd5e2b685dc6793dc95b488899b4c", "repo": "kitty", "path": "kitty_tests/file_transmission.py", "file_name": "file_transmission.py", "fun_name": "setUp", "commit_message": "Reset global options object in the tests", "code": "def setUp(self):\n super().setUp()\n self.tdir = os.path.realpath(tempfile.mkdtemp())\n self.responses = []\n self.orig_home = os.environ.get('HOME')\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 39, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 46, "n_ast_nodes": 79, "n_identifiers": 13, "random_cut": "def setUp(self):\n super().setUp()\n self.tdir = os" }, { "id": 77477, "commit_id": "25138aed92ba3a4ed95554be81615178ce50c2a9", "repo": "wagtail", "path": "wagtail/tests/test_page_model.py", "file_name": "test_page_model.py", "fun_name": "test_url_path_can_exceed_255_characters", "commit_message": "Check that page.save_revision() was given a specific page object", "code": "def test_url_path_can_exceed_255_characters(self):\n event_index = Page.objects.get(url_path=\"/home/events/\").specific\n christmas_event = EventPage.objects.get(\n url_path=\"/home/events/christmas/\"\n ).specific\n\n # Change the christmas_event slug first - this way, we test that the process for\n # updating child url paths also handles >255 character paths correctly\n new_christmas_slug = \"christmas-%s-christmas\" % (\"0123456789\" * 20)\n christmas_event.slug = new_christmas_slug\n christmas_event.save_revision().publish()\n\n # Change the event index slug and publish it\n new_event_index_slug = \"events-%s-events\" % (\"0123456789\" * 20)\n event_index.slug = new_event_index_slug\n event_index.save_revision().publish()\n\n # Check that the url path updated correctly\n new_christmas_event = EventPage.objects.get(id=christmas_event.id)\n expected_url_path = \"/home/%s/%s/\" % (new_event_index_slug, new_christmas_slug)\n self.assertEqual(new_christmas_event.url_path, expected_url_path)\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 208, "n_words": 86, "vocab_size": 59, "complexity": 1, "nloc": 14, "token_counts": 112, "n_ast_nodes": 197, "n_identifiers": 19, "random_cut": "def test_url_path_can_exceed_255_characters(self):\n event_index = Page.objects.get(url_path=\"/home/events/\").specific\n christmas_event = EventPage.objects.get(\n url_path=\"/home/events/christmas/\"\n ).specific\n\n # Change the christmas_event slug first - this way, we test that the process for\n # updating child " }, { "id": 39372, "commit_id": "3bc519622438ae8088690467c90205b1e5173a00", "repo": "recommenders", "path": "tests/unit/recommenders/evaluation/test_python_time.py", "file_name": "test_python_time.py", "fun_name": "test_python_exp_var", "commit_message": "add python time tests", "code": "def test_python_exp_var(rating_true, rating_pred):\n assert exp_var(\n rating_true=rating_true,\n rating_pred=rating_true,\n col_prediction=DEFAULT_RATING_COL,\n ) == pytest.approx(1.0, TOL)\n assert exp_var(rating_true, rating_pred) == pytest.approx(-6.4466, TOL)\n\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 47, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 53, "n_ast_nodes": 71, "n_identifiers": 9, "random_cut": "def test_python_exp_var(rating_true, rating_pred):\n assert exp_var(\n rating_true=rating_true,\n " }, { "id": 306936, "commit_id": "56c4e0391dd4696ee52b20cf2660da8c9cac480b", "repo": "core", "path": "homeassistant/components/hdmi_cec/media_player.py", "file_name": "media_player.py", "fun_name": "media_stop", "commit_message": "Use new media player enums [e-h] (#78049)", "code": "def media_stop(self) -> None:\n \n self.send_keypress(KEY_STOP)\n self._state = MediaPlayerState.IDLE\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def media_stop(self) -> None:\n \n self.send_keypress(KEY_STO" }, { "id": 48166, "commit_id": "03718194f4fa509f16fcaf3d41ff186dbae5d427", "repo": "airflow", "path": "tests/always/test_project_structure.py", "file_name": "test_project_structure.py", "fun_name": "test_missing_examples", "commit_message": "Tests for provider code structure (#23351)\n\nImproved test for code structure that can be re-used among various providders.", "code": "def test_missing_examples(self):\n \n classes = self.list_of_classes()\n assert 0 != len(classes), \"Failed to retrieve operators, override class_paths if needed\"\n classes = set(classes.keys())\n for example in self.example_paths():\n classes -= get_imports_from_file(example)\n\n covered_but_omitted = self.MISSING_EXAMPLES_FOR_CLASSES - classes\n classes -= self.MISSING_EXAMPLES_FOR_CLASSES\n classes -= self.DEPRECATED_CLASSES\n classes -= self.BASE_CLASSES\n if set() != classes:\n print(\"Classes with missing examples:\")\n print_sorted(classes)\n pytest.fail(\n \"Not all classes are covered with example dags. Update self.MISSING_EXAMPLES_FOR_CLASSES \"\n \"if you want to skip this error\"\n )\n if set() != covered_but_omitted:\n print(\"Covered classes that are listed as missing:\")\n print_sorted(covered_but_omitted)\n pytest.fail(\"Operator listed in missing examples but is used in example dag\")\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 288, "n_words": 93, "vocab_size": 64, "complexity": 4, "nloc": 21, "token_counts": 112, "n_ast_nodes": 200, "n_identifiers": 18, "random_cut": "def test_missing_examples(self):\n \n classes = self.list_of_classes()\n assert 0 != len(classes), \"Failed to retrieve operators, override class_paths if needed\"\n classes = set(classes.keys())\n for example in self.example_paths():\n classes -= get_imports_from_file(example)\n\n covered_but_omitted = self.MISSING_EXAMPLES_FOR_CLASSES - classes\n classes -= self.MISSING_EXAMPLES_FOR_CLASSES\n classes -= self.DEPRECATED_CLASSE" }, { "id": 109213, "commit_id": "379d574bef88390aa575690a87ee80ce0980fd35", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_backend_qt.py", "file_name": "test_backend_qt.py", "fun_name": "_test_enums_impl", "commit_message": "Update flake8 ignores", "code": "def _test_enums_impl():\n import sys\n\n from matplotlib.backends.qt_compat import _enum, _to_int\n from matplotlib.backend_bases import cursors, MouseButton\n\n _enum(\"QtGui.QDoubleValidator.State\").Acceptable\n\n _enum(\"QtWidgets.QDialogButtonBox.StandardButton\").Ok\n _enum(\"QtWidgets.QDialogButtonBox.StandardButton\").Cancel\n _enum(\"QtWidgets.QDialogButtonBox.StandardButton\").Apply\n for btn_type in [\"Ok\", \"Cancel\"]:\n getattr(_enum(\"QtWidgets.QDialogButtonBox.StandardButton\"), btn_type)\n\n _enum(\"QtGui.QImage.Format\").Format_ARGB32_Premultiplied\n _enum(\"QtGui.QImage.Format\").Format_ARGB32_Premultiplied\n # SPECIAL_KEYS are Qt::Key that do *not* return their Unicode name instead\n # they have manually specified names.\n SPECIAL_KEYS = {\n _to_int(getattr(_enum(\"QtCore.Qt.Key\"), k)): v\n for k, v in [\n (\"Key_Escape\", \"escape\"),\n (\"Key_Tab\", \"tab\"),\n (\"Key_Backspace\", \"backspace\"),\n (\"Key_Return\", \"enter\"),\n (\"Key_Enter\", \"enter\"),\n (\"Key_Insert\", \"insert\"),\n (\"Key_Delete\", \"delete\"),\n (\"Key_Pause\", \"pause\"),\n (\"Key_SysReq\", \"sysreq\"),\n (\"Key_Clear\", \"clear\"),\n (\"Key_Home\", \"home\"),\n (\"Key_End\", \"end\"),\n (\"Key_Left\", \"left\"),\n (\"Key_Up\", \"up\"),\n (\"Key_Right\", \"right\"),\n (\"Key_Down\", \"down\"),\n (\"Key_PageUp\", \"pageup\"),\n (\"Key_PageDown\", \"pagedown\"),\n (\"Key_Shift\", \"shift\"),\n # In OSX, the control and super (aka cmd/apple) keys are switched.\n (\"Key_Control\", \"control\" if sys.platform != \"darwin\" else \"cmd\"),\n (\"Key_Meta\", \"meta\" if sys.platform != \"darwin\" else \"control\"),\n (\"Key_Alt\", \"alt\"),\n (\"Key_CapsLock\", \"caps_lock\"),\n (\"Key_F1\", \"f1\"),\n (\"Key_F2\", \"f2\"),\n (\"Key_F3\", \"f3\"),\n (\"Key_F4\", \"f4\"),\n (\"Key_F5\", \"f5\"),\n (\"Key_F6\", \"f6\"),\n (\"Key_F7\", \"f7\"),\n (\"Key_F8\", \"f8\"),\n (\"Key_F9\", \"f9\"),\n (\"Key_F10\", \"f10\"),\n (\"Key_F10\", \"f11\"),\n (\"Key_F12\", \"f12\"),\n (\"Key_Super_L\", \"super\"),\n (\"Key_Super_R\", \"super\"),\n ]\n }\n # Define which modifier keys are collected on keyboard events. Elements\n # are (Qt::KeyboardModifiers, Qt::Key) tuples. Order determines the\n # modifier order (ctrl+alt+...) reported by Matplotlib.\n _MODIFIER_KEYS = [\n (\n _to_int(getattr(_enum(\"QtCore.Qt.KeyboardModifier\"), mod)),\n _to_int(getattr(_enum(\"QtCore.Qt.Key\"), key)),\n )\n for mod, key in [\n (\"ControlModifier\", \"Key_Control\"),\n (\"AltModifier\", \"Key_Alt\"),\n (\"ShiftModifier\", \"Key_Shift\"),\n (\"MetaModifier\", \"Key_Meta\"),\n ]\n ]\n cursord = {\n k: getattr(_enum(\"QtCore.Qt.CursorShape\"), v)\n for k, v in [\n (cursors.MOVE, \"SizeAllCursor\"),\n (cursors.HAND, \"PointingHandCursor\"),\n (cursors.POINTER, \"ArrowCursor\"),\n (cursors.SELECT_REGION, \"CrossCursor\"),\n (cursors.WAIT, \"WaitCursor\"),\n ]\n }\n\n buttond = {\n getattr(_enum(\"QtCore.Qt.MouseButton\"), k): v\n for k, v in [\n (\"LeftButton\", MouseButton.LEFT),\n (\"RightButton\", MouseButton.RIGHT),\n (\"MiddleButton\", MouseButton.MIDDLE),\n (\"XButton1\", MouseButton.BACK),\n (\"XButton2\", MouseButton.FORWARD),\n ]\n }\n\n _enum(\"QtCore.Qt.WidgetAttribute\").WA_OpaquePaintEvent\n _enum(\"QtCore.Qt.FocusPolicy\").StrongFocus\n _enum(\"QtCore.Qt.ToolBarArea\").TopToolBarArea\n _enum(\"QtCore.Qt.ToolBarArea\").TopToolBarArea\n _enum(\"QtCore.Qt.AlignmentFlag\").AlignRight\n _enum(\"QtCore.Qt.AlignmentFlag\").AlignVCenter\n _enum(\"QtWidgets.QSizePolicy.Policy\").Expanding\n _enum(\"QtWidgets.QSizePolicy.Policy\").Ignored\n _enum(\"QtCore.Qt.MaskMode\").MaskOutColor\n _enum(\"QtCore.Qt.ToolBarArea\").TopToolBarArea\n _enum(\"QtCore.Qt.ToolBarArea\").TopToolBarArea\n _enum(\"QtCore.Qt.AlignmentFlag\").AlignRight\n _enum(\"QtCore.Qt.AlignmentFlag\").AlignVCenter\n _enum(\"QtWidgets.QSizePolicy.Policy\").Expanding\n _enum(\"QtWidgets.QSizePolicy.Policy\").Ignored\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1074, "n_words": 267, "vocab_size": 206, "complexity": 8, "nloc": 101, "token_counts": 618, "n_ast_nodes": 1101, "n_identifiers": 44, "random_cut": "def _test_enums_impl():\n import sys\n\n from matplotlib.backends.qt_compat import _enum, _to_int\n from matplotlib.backend_bases import cursors, MouseButton\n\n _enum(\"QtGui.QDoubleValidator.State\").Acceptable\n\n _enum(\"QtWidgets.QDialogButtonBox.StandardButton\").Ok\n _enum(\"QtWidgets.QDialogButtonBox.StandardButton\").Cancel\n _enum(\"QtWidgets.QDialogButtonBox.StandardButton\").Apply\n for btn_type in [\"Ok\", \"Cancel\"]:\n getattr(_enum(\"QtWidgets.QDialogButtonBox.StandardButton\"), btn_type)\n\n _enum(\"QtGui.QImage.Format\").Format_ARGB32_Premultiplied\n _enum(\"QtGui.QImage.Format\").Format_ARGB32_Premultiplied\n # SPECIAL_KEYS are Qt::Key that do *not* return their Unicode name instead\n # they have manually specified names.\n SPECIAL_KEYS = {\n _to_int(getattr(_enum(\"QtCore.Qt.Key\"), k)): v\n for k, v in [\n (\"Key_Escape\", \"escape\"),\n (\"Key_Tab\", \"tab\"),\n (\"Key_Backspace\", \"backspace\"),\n (\"Key_Return\", \"enter\"),\n (\"Key_Enter\", \"enter\"),\n (\"Key_Insert\", \"insert\"),\n (\"Key_Delete\", \"delete\"),\n (\"Key_Pause\", \"pause\"),\n (\"Key_SysReq\", \"sysreq\"),\n (\"Key_Clear\", \"clear\"),\n (\"Key_Home\", \"home\"),\n (\"Key_End\", \"end\"),\n (\"Key_Left\", \"left\"),\n (\"Key_Up\", \"up\"),\n (\"Key_Right\", \"right\"),\n (\"Key_Down\", \"down\"),\n (\"Key_PageUp\", \"pageup\"),\n (\"Key_PageDown\", \"pagedown\"),\n (\"Key_Shift\", \"shift\"),\n # In OSX, the control and super (aka cmd/apple) keys are switched.\n (\"Key_Control\", \"control\" if sys.platform != \"darwin\" else \"cmd\"),\n (\"Key_Meta\", \"meta\" if sys.platform != \"darwin\" else \"control\"),\n (\"Key_Alt\", \"alt\"),\n (\"Key_CapsLock\", \"caps_lock\"),\n (\"Key_F1\", \"f1\"),\n (\"Key_F2\", \"f2\"),\n (\"Key_F3\", \"f3\"),\n (\"Key_F4\", \"f4\"),\n (\"Key_F5\", \"f5\"),\n (\"Key_F6\", \"f6\"),\n (\"Key_F7\", \"f7\"),\n (\"Key_F8\", \"f8\"),\n (\"Key_F9\", \"f9\"),\n (\"Key_F10\", \"f10\"),\n (\"Key_F10\", \"f11\"),\n (\"Key_F12\", \"f12\"),\n (\"Key_Super_L\", \"super\"),\n (\"Key_Super_R\", \"super\"),\n ]\n }\n # Define which modifier keys are collected on keyboard events. Elements\n # are (Qt::KeyboardModifiers, Qt::Key) tuples. Order determines the\n # modifier order (ctrl+alt+...) reported by Matplotlib.\n _MODIFIER_KEYS = [\n (\n _to_int(getattr(_enum(\"QtCore.Qt.KeyboardModifier\"), mod)),\n _to_int(getattr(_enum(\"QtCore.Qt.Key\"), key)),\n )\n for mod, key in [\n (\"ControlModifier\", \"Key_Control\"),\n (\"AltModifier\", \"Key_Alt\"),\n (\"ShiftModifier\", \"Key_Shift\"),\n (\"MetaModifier\", \"Key_Meta\"),\n ]\n ]\n cursord =" }, { "id": 136335, "commit_id": "76cb42c578adf19a70a6b4401098a7a21e0d3b29", "repo": "ray", "path": "rllib/evaluation/worker_set.py", "file_name": "worker_set.py", "fun_name": "num_in_flight_async_reqs", "commit_message": "[RLlib] Fault tolerant and elastic WorkerSets used across RLlib's algorithms (for sampling and evaluation). (#30118)", "code": "def num_in_flight_async_reqs(self) -> int:\n \n return self.__worker_manager.num_outstanding_async_reqs()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 29, "n_identifiers": 5, "random_cut": "def num_in_flight_async_reqs(self) -> int:\n \n return self.__worker_ma" }, { "id": 172630, "commit_id": "0aac961cde83c22abd4985f720679aa588f79e05", "repo": "calibre-web", "path": "cps/converter.py", "file_name": "converter.py", "fun_name": "get_unrar_version", "commit_message": "Update readme\nBugfix debug logging during update\nunrar-free is now also recognized for displaying unrar version in about section, removed unused not configured string", "code": "def get_unrar_version():\n unrar_version = _get_command_version(config.config_rarfile_location, r'UNRAR.*\\d')\n if unrar_version == \"not installed\":\n unrar_version = _get_command_version(config.config_rarfile_location, r'unrar.*\\d','-V')\n return unrar_version\n", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 17, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 35, "n_ast_nodes": 59, "n_identifiers": 5, "random_cut": "def get_unrar_version():\n unrar_version = _get_command_version(config.config_rarfile_location, r'UNRAR" }, { "id": 26096, "commit_id": "bf654a5f958fcf0611b61cf43ac13c886761b80a", "repo": "saleor", "path": "saleor/payment/gateways/np_atobarai/tests/test_api_helpers.py", "file_name": "test_api_helpers.py", "fun_name": "test_register_no_billing_address", "commit_message": "Port NP Atobarai gateway to 3.1 (#8684)\n\n* Port net protections (#8640) to 3.1\r\n\r\n* Add NP final code review feedback onto 3.1\r\n\r\n* Fix optional sku in NP payload & add docstrings\r\n\r\n* Refactor tracking_number_updated\r\n\r\n* Change NetProtections timeout value to 20\r\n\r\n* Do not use f-strings in logger warnings\r\n\r\n* Trace only http requests\r\n\r\n* Simplify code\r\n\r\n* Add comment about longer than usual timeout period\r\n\r\n* Remove order from process payment\r\n\r\n* Add comment for 400 status code\r\n\r\n* Reduce scope of Posuto context manager\r\n\r\n* Refactor voucher and shipping amount for payment lines data\r\n\r\n* Update PaymentResult.psp_reference type to Optional[str]\r\n\r\n* Add handler for report error in transaction reregistration\r\n\r\n* Add docstrings to goods functions\r\n\r\n* Add FOR_REREGISTRATION payment status\r\n\r\n* Refactor create_refund_data\r\n\r\n* Fix refund data\r\n\r\n* Add docstrings to goods functions\r\n\r\n* Add prefetch to _create_refund_manual_amount\r\n\r\n* Move refund logic to NP\r\n\r\n* Fix billing amount for partial refunds\r\n\r\n* Fix multiple shipping refunds\r\n\r\n* Set currency to JPY\r\n\r\n* WIP fix refunds\r\n\r\n* Clean up code\r\n\r\n* Refactor\r\n\r\n* Fix get_goods_with_refunds for all returned products\r\n\r\nCo-authored-by: Mateusz Grzyb ", "code": "def test_register_no_billing_address(config, np_payment_data):\n # given\n np_payment_data.billing = None\n\n # when\n np_response = api_helpers.register(config, np_payment_data)\n\n # then\n assert not np_response.result\n assert np_response.error_codes == [f\"{errors.NO_BILLING_ADDRESS}\"]\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 23, "vocab_size": 19, "complexity": 1, "nloc": 5, "token_counts": 36, "n_ast_nodes": 66, "n_identifiers": 11, "random_cut": "def test_register_no_billing_address(config, np_payment_data):\n # given\n np_payment_data.billing = None\n\n # when\n np_response = api_helpers.register(config, np_payment_data)\n\n # then\n assert not np_response.result\n assert np_response.error_codes == [f\"{errors.NO_BILLING_ADDRESS}\"]\n\n" }, { "id": 193816, "commit_id": "7eb5d7fcab73afec976907a855d9e63fa31f5579", "repo": "vision", "path": "test/test_prototype_datasets_builtin.py", "file_name": "test_prototype_datasets_builtin.py", "fun_name": "test_no_simple_tensors", "commit_message": "close streams in prototype datasets (#6647)\n\n* close streams in prototype datasets\r\n\r\n* refactor prototype SBD to avoid closing demux streams at construction time\r\n\r\n* mypy", "code": "def test_no_simple_tensors(self, dataset_mock, config):\n dataset, _ = dataset_mock.load(config)\n\n simple_tensors = {key for key, value in next_consume(iter(dataset)).items() if features.is_simple_tensor(value)}\n if simple_tensors:\n raise AssertionError(\n f\"The values of key(s) \"\n f\"{sequence_to_str(sorted(simple_tensors), separate_last='and ')} contained simple tensors.\"\n )\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 106, "n_words": 34, "vocab_size": 32, "complexity": 4, "nloc": 8, "token_counts": 58, "n_ast_nodes": 113, "n_identifiers": 19, "random_cut": "def test_no_simple_tensors(self, dataset_mock, config):\n dataset, _ = dataset_mock.load(config)\n\n simple_tensors = {key for key, value in next_consume(iter(dataset)).items() if features.is_simple_tensor(va" }, { "id": 167351, "commit_id": "64ad019fae42ea792cc961f903d3c0869025e530", "repo": "pandas", "path": "pandas/core/arrays/timedeltas.py", "file_name": "timedeltas.py", "fun_name": "_scalar_type", "commit_message": "TYP: ndim is consistently a property (#47378)\n\n* TYP: ndim is consistently a property\r\n\r\n* unused import\r\n\r\n* fix test\r\n\r\n* nicer fix", "code": "def _scalar_type(self) -> type[Timedelta]:\n return Timedelta\n\n __array_priority__ = 1000\n # define my properties & methods for delegation\n _other_ops: list[str] = []\n _bool_ops: list[str] = []\n _object_ops: list[str] = [\"freq\"]\n _field_ops: list[str] = [\"days\", \"seconds\", \"microseconds\", \"nanoseconds\"]\n _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops\n _datetimelike_methods: list[str] = [\n \"to_pytimedelta\",\n \"total_seconds\",\n \"round\",\n \"floor\",\n \"ceil\",\n ]\n\n # Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)\n # operates pointwise.\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 141, "n_words": 66, "vocab_size": 51, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 153, "n_identifiers": 13, "random_cut": "def _scalar_type(self) -> type[Timedelta]:\n return Timedelta\n\n __ar" }, { "id": 111359, "commit_id": "a322d6d5f2f85c2da6cded4fcd6143d41b5a9e96", "repo": "spaCy", "path": "spacy/tests/pipeline/test_entity_ruler.py", "file_name": "test_entity_ruler.py", "fun_name": "test_entity_ruler_fix8216", "commit_message": "Add SpanRuler component (#9880)\n\n* Add SpanRuler component\r\n\r\nAdd a `SpanRuler` component similar to `EntityRuler` that saves a list\r\nof matched spans to `Doc.spans[spans_key]`. The matches from the token\r\nand phrase matchers are deduplicated and sorted before assignment but\r\nare not otherwise filtered.\r\n\r\n* Update spacy/pipeline/span_ruler.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Fix cast\r\n\r\n* Add self.key property\r\n\r\n* Use number of patterns as length\r\n\r\n* Remove patterns kwarg from init\r\n\r\n* Update spacy/tests/pipeline/test_span_ruler.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Add options for spans filter and setting to ents\r\n\r\n* Add `spans_filter` option as a registered function'\r\n* Make `spans_key` optional and if `None`, set to `doc.ents` instead of\r\n`doc.spans[spans_key]`.\r\n\r\n* Update and generalize tests\r\n\r\n* Add test for setting doc.ents, fix key property type\r\n\r\n* Fix typing\r\n\r\n* Allow independent doc.spans and doc.ents\r\n\r\n* If `spans_key` is set, set `doc.spans` with `spans_filter`.\r\n* If `annotate_ents` is set, set `doc.ents` with `ents_fitler`.\r\n * Use `util.filter_spans` by default as `ents_filter`.\r\n * Use a custom warning if the filter does not work for `doc.ents`.\r\n\r\n* Enable use of SpanC.id in Span\r\n\r\n* Support id in SpanRuler as Span.id\r\n\r\n* Update types\r\n\r\n* `id` can only be provided as string (already by `PatternType`\r\ndefinition)\r\n\r\n* Update all uses of Span.id/ent_id in Doc\r\n\r\n* Rename Span id kwarg to span_id\r\n\r\n* Update types and docs\r\n\r\n* Add ents filter to mimic EntityRuler overwrite_ents\r\n\r\n* Refactor `ents_filter` to take `entities, spans` args for more\r\n filtering options\r\n* Give registered filters more descriptive names\r\n* Allow registered `filter_spans` filter\r\n (`spacy.first_longest_spans_filter.v1`) to take any number of\r\n `Iterable[Span]` objects as args so it can be used for spans filter\r\n or ents filter\r\n\r\n* Implement future entity ruler as span ruler\r\n\r\nImplement a compatible `entity_ruler` as `future_entity_ruler` using\r\n`SpanRuler` as the underlying component:\r\n* Add `sort_key` and `sort_reverse` to allow the sorting behavior to be\r\n customized. (Necessary for the same sorting/filtering as in\r\n `EntityRuler`.)\r\n* Implement `overwrite_overlapping_ents_filter` and\r\n `preserve_existing_ents_filter` to support\r\n `EntityRuler.overwrite_ents` settings.\r\n* Add `remove_by_id` to support `EntityRuler.remove` functionality.\r\n* Refactor `entity_ruler` tests to parametrize all tests to test both\r\n `entity_ruler` and `future_entity_ruler`\r\n* Implement `SpanRuler.token_patterns` and `SpanRuler.phrase_patterns`\r\n properties.\r\n\r\nAdditional changes:\r\n\r\n* Move all config settings to top-level attributes to avoid duplicating\r\n settings in the config vs. `span_ruler/cfg`. (Also avoids a lot of\r\n casting.)\r\n\r\n* Format\r\n\r\n* Fix filter make method name\r\n\r\n* Refactor to use same error for removing by label or ID\r\n\r\n* Also provide existing spans to spans filter\r\n\r\n* Support ids property\r\n\r\n* Remove token_patterns and phrase_patterns\r\n\r\n* Update docstrings\r\n\r\n* Add span ruler docs\r\n\r\n* Fix types\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Move sorting into filters\r\n\r\n* Check for all tokens in seen tokens in entity ruler filters\r\n\r\n* Remove registered sort key\r\n\r\n* Set Token.ent_id in a backwards-compatible way in Doc.set_ents\r\n\r\n* Remove sort options from API docs\r\n\r\n* Update docstrings\r\n\r\n* Rename entity ruler filters\r\n\r\n* Fix and parameterize scoring\r\n\r\n* Add id to Span API docs\r\n\r\n* Fix typo in API docs\r\n\r\n* Include explicit labeled=True for scorer\r\n\r\nCo-authored-by: Sofie Van Landeghem ", "code": "def test_entity_ruler_fix8216(nlp, patterns, entity_ruler_factory):\n \n ruler = nlp.add_pipe(\n entity_ruler_factory, name=\"entity_ruler\", config={\"validate\": True}\n )\n ruler.add_patterns(patterns)\n pattern_count = sum(len(mm) for mm in ruler.matcher._patterns.values())\n assert pattern_count > 0\n ruler.add_patterns([])\n after_count = sum(len(mm) for mm in ruler.matcher._patterns.values())\n assert after_count == pattern_count\n\n\n@pytest.mark.parametrize(\"entity_ruler_factory\", ENTITY_RULERS)", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"entity_ruler_factory\", ENTITY_RULERS)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 71, "n_words": 38, "vocab_size": 27, "complexity": 3, "nloc": 10, "token_counts": 93, "n_ast_nodes": 169, "n_identifiers": 21, "random_cut": "def test_entity_ruler_fix8216(nlp, patterns, entity_ruler_factory):\n \n ruler = nlp.add_pipe(\n entity_ruler_factory, name=\"entity_ruler\", config={\"validate\": True}\n )\n " }, { "id": 57107, "commit_id": "972c832cb3c3eee177980feff58b0172104ef271", "repo": "prefect", "path": "tests/test_storage.py", "file_name": "test_storage.py", "fun_name": "test_write_to_different_scheme", "commit_message": "Add remote and local file systems", "code": "async def test_write_to_different_scheme(self):\n fs = RemoteFileSystem(basepath=\"memory://foo\")\n with pytest.raises(\n ValueError,\n match=\"with scheme 'file' must use the same scheme as the base path 'memory'\",\n ):\n await fs.write_path(\"file://foo/test.txt\", content=b\"hello\")\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 79, "n_words": 26, "vocab_size": 24, "complexity": 1, "nloc": 7, "token_counts": 38, "n_ast_nodes": 69, "n_identifiers": 11, "random_cut": "async def test_write_to_different_scheme(self):\n fs = RemoteFileSystem(basepath=\"memory://foo\")\n with pytest.raises(\n ValueError,\n match=\"with scheme 'file' must use the same scheme as the base path 'memory'\",\n ):\n await fs.write_path(\"file://foo/test.txt\", content=b\"hello\")\n" }, { "id": 176620, "commit_id": "de1d00f20e0bc14f1cc911b3486e50225a8fa168", "repo": "networkx", "path": "networkx/algorithms/bipartite/generators.py", "file_name": "generators.py", "fun_name": "complete_bipartite_graph", "commit_message": "Adjust the usage of nodes_or_number decorator (#5599)\n\n* recorrect typo in decorators.py\r\n\r\n* Update tests to show troubles in current code\r\n\r\n* fix troubles with usage of nodes_or_number\r\n\r\n* fix typo\r\n\r\n* remove nodes_or_number where that makes sense\r\n\r\n* Reinclude nodes_or_numbers and add some tests for nonstandard usage\r\n\r\n* fix typowq\r\n\r\n* hopefully final tweaks (no behavior changes\r\n\r\n* Update test_classic.py\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def complete_bipartite_graph(n1, n2, create_using=None):\n \n G = nx.empty_graph(0, create_using)\n if G.is_directed():\n raise nx.NetworkXError(\"Directed Graph not supported\")\n\n n1, top = n1\n n2, bottom = n2\n if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral):\n bottom = [n1 + i for i in bottom]\n G.add_nodes_from(top, bipartite=0)\n G.add_nodes_from(bottom, bipartite=1)\n if len(G) != len(top) + len(bottom):\n raise nx.NetworkXError(\"Inputs n1 and n2 must contain distinct nodes\")\n G.add_edges_from((u, v) for u in top for v in bottom)\n G.graph[\"name\"] = f\"complete_bipartite_graph({n1}, {n2})\"\n return G\n\n\n@py_random_state(3)", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@py_random_state(3)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 130, "n_words": 74, "vocab_size": 54, "complexity": 8, "nloc": 15, "token_counts": 148, "n_ast_nodes": 250, "n_identifiers": 23, "random_cut": "def complete_bipartite_graph(n1, n2, create_using=None):\n" }, { "id": 189988, "commit_id": "309c9d41eb734ca85a7aea5533f88a6d4ee7c944", "repo": "manim", "path": "manim/mobject/svg/svg_mobject.py", "file_name": "svg_mobject.py", "fun_name": "handle_commands", "commit_message": "Ported improved implementation of :class:`.SVGMobject` from 3b1b/manim (#2898)\n\n* port SVGMobject from 3b1b/manim\r\n\r\n* added svgelements as dependency\r\n\r\n* revert change of default values\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* set default stroke_width of svg elements to 0 if not set\r\n\r\n* fix handling of circles with different rx/ry\r\n\r\n* turn more methods into staticmethods\r\n\r\n* removed duplicated method\r\n\r\n* set/adapt stroke-width of some test SVGs\r\n\r\n* updated control data\r\n\r\n* forgot some control data\r\n\r\n* fixed init_colors in tex_mobject and text_mobject\r\n\r\n* minor changes, added docstrings\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* module docstring, removed import\r\n\r\n* vector_to_coords changed again\r\n\r\n* nail sphinx version to below 5.1 to fix rtd (?)\r\n\r\n* update test_text control data for science\r\n\r\n* changed Brace to use VMobjectFromSVGPath\r\n\r\n* remove unused classes and methods depending on old SVG path implementation\r\n\r\n* remove style_utils and svg_path modules\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* change test_text to use monospace font\r\n\r\n* restore geometry.polygram\r\n\r\n* added get_mobject_type_class auxiliary method; changed polyline implementation to ad-hoc approach\r\n\r\n* restore test_text to previous version\r\n\r\n* skip Use tags as svgelements already populates them\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def handle_commands(self) -> None:\n segment_class_to_func_map = {\n se.Move: (self.start_new_path, (\"end\",)),\n se.Close: (self.close_path, ()),\n se.Line: (self.add_line_to, (\"end\",)),\n se.QuadraticBezier: (\n self.add_quadratic_bezier_curve_to,\n (\"control\", \"end\"),\n ),\n se.CubicBezier: (\n self.add_cubic_bezier_curve_to,\n (\"control1\", \"control2\", \"end\"),\n ),\n }\n for segment in self.path_obj:\n segment_class = segment.__class__\n func, attr_names = segment_class_to_func_map[segment_class]\n points = [\n _convert_point_to_3d(*segment.__getattribute__(attr_name))\n for attr_name in attr_names\n ]\n func(*points)\n\n # Get rid of the side effect of trailing \"Z M\" commands.\n if self.has_new_path_started():\n self.resize_points(self.get_num_points() - 1)\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 335, "n_words": 68, "vocab_size": 57, "complexity": 4, "nloc": 24, "token_counts": 152, "n_ast_nodes": 237, "n_identifiers": 27, "random_cut": "def handle_commands(self) -> None:\n segment_class_to_func_map = {\n se.Move: (self.start_new_path, (\"end\",)),\n se.Close: (self.close_path, ()),\n se.Line: (self.add_line_to, (\"end\",)),\n se.QuadraticBe" }, { "id": 297826, "commit_id": "b0cee0bc46cbd7efe0e6421da18d91595c7a25ad", "repo": "core", "path": "homeassistant/components/evohome/__init__.py", "file_name": "__init__.py", "fun_name": "_update_v1_api_temps", "commit_message": "String formatting and max line length - Part 1 (#84390)\n\nCo-authored-by: Erik Montnemery ", "code": "async def _update_v1_api_temps(self, *args, **kwargs) -> None:\n \n\n assert self.client_v1\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 5, "nloc": 34, "token_counts": 169, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "async def _update_v1_api_temps(self, *args, **kwargs) -> None:\n \n\n assert self.client_v1\n" }, { "id": 154083, "commit_id": "02363589aa5105e091fa3d790b29cddf94cc8118", "repo": "modin", "path": "modin/config/envvars.py", "file_name": "envvars.py", "fun_name": "_get_raw_from_config", "commit_message": "REFACTOR-#4629: Add type annotations to `modin/config` (#4685)\n\nSigned-off-by: Karthik Velayutham ", "code": "def _get_raw_from_config(cls) -> str:\n \n if cls.varname is None:\n raise TypeError(\"varname should not be None\")\n return os.environ[cls.varname]\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 19, "token_counts": 29, "n_ast_nodes": 50, "n_identifiers": 7, "random_cut": "def _get_raw_from_config(cls) -> str:\n \n if cls.varname is None:\n raise TypeError(\"varname should not be None\")" }, { "id": 82899, "commit_id": "f45e418444b572ee5c782b90a0c2cae2489208bc", "repo": "examples", "path": "distributed/ddp-tutorial-series/multigpu.py", "file_name": "multigpu.py", "fun_name": "_save_checkpoint", "commit_message": "Add code for DDP tutorial series [PR 1 / 3] (#1067)", "code": "def _save_checkpoint(self, epoch):\n ckp = self.model.module.state_dict()\n PATH = \"checkpoint.pt\"\n torch.save(ckp, PATH)\n print(f\"Epoch {epoch} | Training checkpoint saved at {PATH}\")\n", "url": "https://github.com/pytorch/examples.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 34, "n_ast_nodes": 65, "n_identifiers": 11, "random_cut": "def _save_checkpoint(self, epoch):\n ckp = self.model.module.state_dict()\n P" }, { "id": 175192, "commit_id": "43aac29cbbb8a963a22c334b5b795d1e43417d6b", "repo": "cpython", "path": "Lib/statistics.py", "file_name": "statistics.py", "fun_name": "mean", "commit_message": "bpo-46257: Convert statistics._ss() to a single pass algorithm (GH-30403)", "code": "def mean(data):\n \n T, total, n = _sum(data)\n if n < 1:\n raise StatisticsError('mean requires at least one data point')\n return _convert(total / n, T)\n\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 24, "vocab_size": 23, "complexity": 2, "nloc": 5, "token_counts": 35, "n_ast_nodes": 59, "n_identifiers": 8, "random_cut": "def mean(data):\n \n T, total, n = _sum(data)\n if n < 1:\n raise St" }, { "id": 278277, "commit_id": "b0ffc0031e9c1964e7398ca47c6666bbfc0d5086", "repo": "keras", "path": "keras/saving/experimental/saving_lib_test.py", "file_name": "saving_lib_test.py", "fun_name": "test_saving_after_fit", "commit_message": "resolve line-too-long in saving", "code": "def test_saving_after_fit(self):\n temp_dir = os.path.join(self.get_temp_dir(), \"my_model\")\n subclassed_model = self._get_subclassed_model()\n\n x = np.random.random((100, 32))\n y = np.random.random((100, 1))\n subclassed_model.fit(x, y, epochs=1)\n subclassed_model._save_new(temp_dir)\n loaded_model = saving_lib.load(temp_dir)\n\n io_utils.enable_interactive_logging()\n # `tf.print` writes to stderr. This is to make sure the custom training\n # step is used.\n with self.captureWritesToStream(sys.stderr) as printed:\n loaded_model.fit(x, y, epochs=1)\n self.assertRegex(printed.contents(), train_step_message)\n\n # Check that the custom classes do get used.\n self.assertIsInstance(loaded_model, CustomModelX)\n self.assertIsInstance(loaded_model.dense1, MyDense)\n # Check that the custom method is available.\n self.assertEqual(loaded_model.one(), 1)\n self.assertEqual(loaded_model.dense1.two(), 2)\n\n # Everything should be the same class or function for the original model\n # and the loaded model.\n for model in [subclassed_model, loaded_model]:\n self.assertIs(\n model.optimizer.__class__,\n keras.optimizers.optimizer_v2.adam.Adam,\n )\n self.assertIs(\n model.compiled_loss.__class__,\n keras.engine.compile_utils.LossesContainer,\n )\n self.assertIs(\n model.compiled_loss._losses[0].__class__,\n keras.losses.LossFunctionWrapper,\n )\n self.assertIs(\n model.compiled_loss._losses[1].__class__,\n keras.losses.LossFunctionWrapper,\n )\n self.assertIs(\n model.compiled_loss._losses[2].__class__,\n keras.losses.MeanSquaredError,\n )\n self.assertIs(\n model.compiled_loss._losses[3].__class__,\n keras.losses.LossFunctionWrapper,\n )\n self.assertIs(\n model.compiled_loss._total_loss_mean.__class__,\n keras.metrics.base_metric.Mean,\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 652, "n_words": 127, "vocab_size": 86, "complexity": 2, "nloc": 45, "token_counts": 325, "n_ast_nodes": 501, "n_identifiers": 56, "random_cut": "def test_saving_after_fit(self):\n temp_dir = os.path.join(self.get_temp_dir(), \"my_model\")\n subclassed_model = self._get_subclassed_model()\n\n x = np.random.random((100, 32))\n y = np.random.random((100, 1))\n subclassed_model.fit(x, y, epochs=1)\n subclassed_model._save_new(temp_dir)\n loaded_model = saving_lib.load(temp_dir)\n\n io_utils.enable_interactive_logging()\n # `tf.print` writes to stderr. This is to make sure the custom training\n # step is used.\n with self.captureWritesToStream(sys.stderr) as printed:\n loaded_model.fit(x, y, epochs=1)\n self.assertRegex(printed.contents(), train_step_message)\n\n # Check " }, { "id": 205179, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/sqlite3/base.py", "file_name": "base.py", "fun_name": "disable_constraint_checking", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def disable_constraint_checking(self):\n with self.cursor() as cursor:\n cursor.execute(\"PRAGMA foreign_keys = OFF\")\n # Foreign key constraints cannot be turned off while in a multi-\n # statement transaction. Fetch the current state of the pragma\n # to determine if constraints are effectively disabled.\n enabled = cursor.execute(\"PRAGMA foreign_keys\").fetchone()[0]\n return not bool(enabled)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 115, "n_words": 47, "vocab_size": 41, "complexity": 1, "nloc": 5, "token_counts": 41, "n_ast_nodes": 78, "n_identifiers": 7, "random_cut": "def disable_constraint_checking(self):\n " }, { "id": 126145, "commit_id": "acf2bf9b2fa9f6cac8c599ec1eea6a9d5249905f", "repo": "ray", "path": "rllib/utils/replay_buffers/tests/test_multi_agent_replay_buffer.py", "file_name": "test_multi_agent_replay_buffer.py", "fun_name": "_add_sample_batch_to_buffer", "commit_message": "[RLlib] Get rid of all these deprecation warnings. (#27085)", "code": "def _add_sample_batch_to_buffer(self, buffer, batch_size, num_batches=5, **kwargs):\n self.eps_id = 0\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 3, "nloc": 8, "token_counts": 65, "n_ast_nodes": 30, "n_identifiers": 7, "random_cut": "def _add_sample_batch_to_buffer(self, buffer, batch_size, num_batches=5, **kwargs):\n " }, { "id": 186755, "commit_id": "fe0b637e4d5acf3d24ba0d5e3feca440c2a32717", "repo": "certbot", "path": "certbot/tests/display/internal_util_test.py", "file_name": "internal_util_test.py", "fun_name": "test_title_and_detail", "commit_message": "display acme.Errors less verbosely (#9255)\n\n* display acme.Errors less verbosely\r\n\r\n* remove superfluous import", "code": "def test_title_and_detail(self):\n self.assertEqual(\"Unacceptable CSR :: CSR contained unknown extensions\", self._call())\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 16, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 30, "n_identifiers": 4, "random_cut": "def test_title_and_detail(self):\n self.assertEqual(\"Unacceptable CSR :: CSR contained unkn" }, { "id": 19532, "commit_id": "3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8", "repo": "pipenv", "path": "pipenv/utils/resolver.py", "file_name": "resolver.py", "fun_name": "_get_file_hash", "commit_message": "Code reorg utils into utils module reduces complexity (#4990)\n\n* Split apart the massive utils.py into a utils module", "code": "def _get_file_hash(self, link):\n from pipenv.vendor.pip_shims import shims\n\n h = hashlib.new(shims.FAVORITE_HASH)\n with open_file(link.url, self.session) as fp:\n for chunk in iter(lambda: fp.read(8096), b\"\"):\n h.update(chunk)\n return \":\".join([h.name, h.hexdigest()])\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 78, "n_words": 25, "vocab_size": 25, "complexity": 2, "nloc": 7, "token_counts": 80, "n_ast_nodes": 127, "n_identifiers": 22, "random_cut": "def _get_file_hash(self, link):\n from pipenv.vendor.pip_shims import shims\n\n h = hashlib.new(shims.FAVORITE_HASH)\n with open_file(link.url, self.session) as" }, { "id": 171849, "commit_id": "bc987e708b9856f5d5c8cf3096e1e2bcf23e1121", "repo": "pandas", "path": "pandas/core/indexes/base.py", "file_name": "base.py", "fun_name": "get_loc", "commit_message": "DEPR: Remove method and tolerance in Index.get_loc, bump xarray (#49630)\n\n* DEPR: Remove method and tolerance in Index.get_loc\r\n\r\n* note xarray bump\r\n\r\n* Fix tests\r\n\r\n* Fix refactor in period\r\n\r\n* Lighter parameterization\r\n\r\n* xfail xarray test\r\n\r\n* Just use get_indexer", "code": "def get_loc(self, key):\n \n casted_key = self._maybe_cast_indexer(key)\n try:\n return self._engine.get_loc(casted_key)\n except KeyError as err:\n raise KeyError(key) from err\n except TypeError:\n # If we have a listlike key, _check_indexing_error will raise\n # InvalidIndexError. Otherwise we fall through and re-raise\n # the TypeError.\n self._check_indexing_error(key)\n raise\n\n _index_shared_docs[\n \"get_indexer\"\n ] = \n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 174, "n_words": 46, "vocab_size": 39, "complexity": 3, "nloc": 9, "token_counts": 49, "n_ast_nodes": 98, "n_identifiers": 11, "random_cut": "def get_loc(self, key):\n \n casted_key = self._maybe_cast_indexer(key)\n try:\n return self._engine.get_loc(casted_key)\n except KeyError as err:\n raise KeyError(key) from err\n except TypeError:\n # If we have a listlike key, _check_indexing_error will raise\n # InvalidIndexError. Otherwise we fall through and re-raise\n # the TypeError.\n self._check_indexing_error(key)\n " }, { "id": 37525, "commit_id": "18df440709f1b19d1c5617c0d987c5ff8fd0915d", "repo": "transformers", "path": "src/transformers/modeling_tf_utils.py", "file_name": "modeling_tf_utils.py", "fun_name": "input_processing", "commit_message": "Replace dict/BatchEncoding instance checks by Mapping (#17014)\n\n* Replace dict/BatchEncoding instance checks by Mapping\r\n\r\n* Typo", "code": "def input_processing(func, config, input_ids, **kwargs):\n \n signature = dict(inspect.signature(func).parameters)\n has_kwargs = bool(signature.pop(\"kwargs\", None))\n signature.pop(\"self\", None)\n parameter_names = list(signature.keys())\n output = {}\n allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray, KerasTensor)\n\n if \"inputs\" in kwargs[\"kwargs_call\"]:\n warnings.warn(\n \"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.\",\n FutureWarning,\n )\n\n output[\"input_ids\"] = kwargs[\"kwargs_call\"].pop(\"inputs\")\n\n if \"decoder_cached_states\" in kwargs[\"kwargs_call\"]:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n output[\"past_key_values\"] = kwargs[\"kwargs_call\"].pop(\"decoder_cached_states\")\n\n if \"past\" in kwargs[\"kwargs_call\"] and \"past_key_values\" in parameter_names:\n warnings.warn(\n \"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n kwargs[\"past_key_values\"] = kwargs[\"kwargs_call\"].pop(\"past\")\n elif \"past_key_values\" in kwargs[\"kwargs_call\"] and \"past\" in parameter_names:\n kwargs[\"past\"] = kwargs[\"kwargs_call\"].pop(\"past_key_values\")\n\n if has_kwargs:\n output[\"kwargs\"] = kwargs.pop(\"kwargs_call\", {})\n else:\n if len(kwargs[\"kwargs_call\"]) > 0:\n raise ValueError(\n f\"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}.\"\n )\n kwargs.pop(\"kwargs_call\")\n\n for k, v in kwargs.items():\n if isinstance(v, allowed_types) or v is None:\n output[k] = v\n else:\n raise ValueError(f\"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.\")\n\n if isinstance(input_ids, (tuple, list)):\n for i, input in enumerate(input_ids):\n # EagerTensors don't allow to use the .name property so we check for a real Tensor\n if type(input) == tf.Tensor:\n # Tensor names have always the pattern `name:id` then we check only the\n # `name` part\n tensor_name = input.name.split(\":\")[0]\n\n if tensor_name in parameter_names:\n output[tensor_name] = input\n else:\n output[parameter_names[i]] = input\n elif isinstance(input, allowed_types) or input is None:\n output[parameter_names[i]] = input\n else:\n raise ValueError(\n f\"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}.\"\n )\n elif isinstance(input_ids, Mapping):\n if \"inputs\" in input_ids:\n warnings.warn(\n \"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.\",\n FutureWarning,\n )\n\n output[\"input_ids\"] = input_ids.pop(\"inputs\")\n\n if \"decoder_cached_states\" in input_ids:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n output[\"past_key_values\"] = input_ids.pop(\"decoder_cached_states\")\n\n for k, v in dict(input_ids).items():\n if isinstance(v, allowed_types) or v is None:\n output[k] = v\n elif k not in parameter_names and \"args\" not in parameter_names:\n logger.warning(\n f\"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored.\"\n )\n continue\n else:\n raise ValueError(f\"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.\")\n else:\n if isinstance(input_ids, (tf.Tensor, KerasTensor)) or input_ids is None:\n output[parameter_names[0]] = input_ids\n else:\n raise ValueError(\n f\"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}.\"\n )\n\n # Populates any unspecified argument with their default value, according to the signature.\n for name in parameter_names:\n if name not in list(output.keys()) and name != \"args\":\n output[name] = kwargs.pop(name, signature[name].default)\n\n # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)\n # So to respect the proper output we have to add this exception\n if \"args\" in output:\n if output[\"args\"] is not None and type(output[\"args\"]) == tf.Tensor:\n tensor_name = output[\"args\"].name.split(\":\")[0]\n output[tensor_name] = output[\"args\"]\n else:\n # `args` in this case is always the first parameter, then `input_ids`\n output[\"input_ids\"] = output[\"args\"]\n\n del output[\"args\"]\n\n if \"kwargs\" in output:\n del output[\"kwargs\"]\n\n boolean_dict = {\n k: v\n for k, v in output.items()\n if k in [\"return_dict\", \"output_attentions\", \"output_hidden_states\", \"use_cache\"]\n }\n\n output.update(\n booleans_processing(\n config=config,\n **boolean_dict,\n )\n )\n\n return output\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1557, "n_words": 532, "vocab_size": 219, "complexity": 37, "nloc": 108, "token_counts": 701, "n_ast_nodes": 1290, "n_identifiers": 48, "random_cut": "def input_processing(func, config, input_ids, **kwargs):\n \n signature = dict(inspect.signature(func).parameters)\n has_kwargs = bool(signature.pop(\"kwargs\", None))\n signature.pop(\"self\", None)\n parameter_names = list(signature.keys())\n output = {}\n allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray, KerasTensor)\n\n if \"inputs\" in kwargs[\"kwargs_call\"]:\n warnings.warn(\n \"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.\",\n FutureWarning,\n )\n\n output[\"input_ids\"] = kwargs[\"kwargs_call\"].pop(\"inputs\")\n\n if \"decoder_cached_states\" in kwargs[\"kwargs_call\"]:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n output[\"past_key_values\"] = kwargs[\"kwargs_call\"].pop(\"decoder_cached_states\")\n\n if \"past\" in kwargs[\"kwargs_call\"] and \"past_key_values\" in parameter_names:\n warnings.warn(\n \"The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n kwargs[\"past_key_values\"] = kwargs[\"kwargs_call\"].pop(\"past\")\n elif \"past_key_values\" in kwargs[\"kwargs_call\"] and \"past\" in parameter_names:\n kwargs[\"past\"] = kwargs[\"kwargs_call\"].pop(\"past_key_values\")\n\n if has_kwargs:\n output[\"kwargs\"] = kwargs.pop(\"kwargs_call\", {})\n else:\n if len(kwargs[\"kwargs_call\"]) > 0:\n raise ValueError(\n f\"The following keyword ar" }, { "id": 161357, "commit_id": "f220a5085bdbc2a6f1b697b2f3fb9ce3a402f2d6", "repo": "rich", "path": "tests/test_segment.py", "file_name": "test_segment.py", "fun_name": "test_align_bottom", "commit_message": "formatting", "code": "def test_align_bottom():\n lines = [[Segment(\"X\")]]\n assert Segment.align_bottom(lines, 3, 1, Style()) == lines\n assert Segment.align_bottom(lines, 3, 3, Style()) == [\n [Segment(\" \", Style())],\n [Segment(\" \", Style())],\n [Segment(\"X\")],\n ]\n", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 63, "n_words": 27, "vocab_size": 17, "complexity": 1, "nloc": 8, "token_counts": 78, "n_ast_nodes": 124, "n_identifiers": 5, "random_cut": "def test_align_bottom():\n lines = [[Segment(\"X\")]]\n assert Segment.align_bottom(lines, 3, 1, Style()) == lines\n assert Segment.align_bottom(lines, 3, 3, Style" }, { "id": 286662, "commit_id": "cca41d4f085dfc64b6cf88aa46f893960836bd0b", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/screener/screener_controller.py", "file_name": "screener_controller.py", "fun_name": "call_ca", "commit_message": "Fixing various bugs (#3474)\n\n* Fixed warning message\r\n\r\n* Refactored screener menu\r\n\r\n* Pylint fix\r\n\r\n* Fixed bugs\r\n\r\n* With bug\r\n\r\n* Choices : update error message\r\n\r\n* Fixed issues\r\n\r\n* Fixed more bugs\r\n\r\n* Fixed more bugs\r\n\r\nCo-authored-by: Chavithra PARANA ", "code": "def call_ca(self, _):\n \n if self.screen_tickers:\n self.queue = self.load_class(\n ca_controller.ComparisonAnalysisController,\n self.screen_tickers,\n self.queue,\n )\n else:\n console.print(\n \"Please select a screener using 'set' and then run 'historical' \"\n \"before going to the CA menu.\\n\"\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 172, "n_words": 32, "vocab_size": 31, "complexity": 2, "nloc": 12, "token_counts": 43, "n_ast_nodes": 74, "n_identifiers": 10, "random_cut": "def call_ca(self, _):\n \n if self.screen_tickers:\n self.queue = self.load_class(\n ca_controller.ComparisonAnalysisController,\n self.screen_tickers,\n self.queue,\n )\n else:\n console.print(\n \"Please select a screener using 'set' and then run 'historic" }, { "id": 130885, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/serve/deployment_state.py", "file_name": "deployment_state.py", "fun_name": "actor_handle", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def actor_handle(self) -> Optional[ActorHandle]:\n if not self._actor_handle:\n try:\n self._actor_handle = ray.get_actor(\n self._actor_name, namespace=self._controller_namespace\n )\n except ValueError:\n self._actor_handle = None\n\n return self._actor_handle\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 120, "n_words": 21, "vocab_size": 18, "complexity": 3, "nloc": 9, "token_counts": 48, "n_ast_nodes": 76, "n_identifiers": 11, "random_cut": "def actor_handle(self) -> Optional[ActorHandle]:\n if not self._actor_handle:\n try:\n self._actor_handle = ray.get_actor(\n self._actor_name, namespace=self._controller_namespace\n )\n except ValueError:\n se" }, { "id": 265010, "commit_id": "cf7a0913191fa0379e4f11de9736c688c0627447", "repo": "netbox", "path": "netbox/dcim/svg/racks.py", "file_name": "racks.py", "fun_name": "_add_gradient", "commit_message": "Refactor SVG module", "code": "def _add_gradient(drawing, id_, color):\n gradient = drawing.linearGradient(\n start=(0, 0),\n end=(0, 25),\n spreadMethod='repeat',\n id_=id_,\n gradientTransform='rotate(45, 0, 0)',\n gradientUnits='userSpaceOnUse'\n )\n gradient.add_stop_color(offset='0%', color='#f7f7f7')\n gradient.add_stop_color(offset='50%', color='#f7f7f7')\n gradient.add_stop_color(offset='50%', color=color)\n gradient.add_stop_color(offset='100%', color=color)\n drawing.defs.add(gradient)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 141, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 14, "token_counts": 103, "n_ast_nodes": 167, "n_identifiers": 15, "random_cut": "def _add_gradient(drawing, id_, color):\n gradient = drawing.linearGradient(\n start=(0, 0),\n end=(0, 25),\n spreadMethod='repeat',\n id_=id_,\n gradientTransform='rotate(45, 0, 0)',\n gradientUnits='userSpaceOnUse'\n )\n gradient.add_stop_color(offset='0%', color='#f7f7f7')\n gradient.add_stop_color(offset='50%', color='#f7f7f7')\n gradient.add_stop_color(offset='50%', color=color)\n gradient.add_stop_color(offset='100%', color=color)\n drawing.def" }, { "id": 270137, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/datasets/imdb.py", "file_name": "imdb.py", "fun_name": "get_word_index", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_word_index(path=\"imdb_word_index.json\"):\n \n origin_folder = (\n \"https://storage.googleapis.com/tensorflow/tf-keras-datasets/\"\n )\n path = get_file(\n path,\n origin=origin_folder + \"imdb_word_index.json\",\n file_hash=\"bfafd718b763782e994055a2d397834f\",\n )\n with open(path) as f:\n return json.load(f)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 75, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 11, "token_counts": 45, "n_ast_nodes": 83, "n_identifiers": 10, "random_cut": "def get_word_index(path=\"imdb_word_index.json\"):\n \n origin_folder = (\n \"https://storage.googleapis.com/tensorflow/tf-keras-datasets/\"\n )\n path = get_file(\n path,\n origin=origin_folder + \"imdb_word_index.json\",\n file_hash=\"bfafd718b763782e994055a2d3978" }, { "id": 113995, "commit_id": "1552c3b72ed13e12e86be90506fa34504298695c", "repo": "mindsdb", "path": "mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", "file_name": "mysql_proxy.py", "fun_name": "answer_describe_predictor", "commit_message": "DESCRIBE to accept [predictor_name].[features, model, etc] syntax (#1938)\n\n* DESCRIBE to accept [predictor_name].[features, model, etc] syntax", "code": "def answer_describe_predictor(self, predictor_value):\n predictor_attr = None\n if isinstance(predictor_value, (list, tuple)):\n predictor_name = predictor_value[0]\n predictor_attr = predictor_value[1]\n else:\n predictor_name = predictor_value\n model_interface = self.session.model_interface\n models = model_interface.get_models()\n if predictor_name not in [x['name'] for x in models]:\n raise ErBadTableError(f\"Can't describe predictor. There is no predictor with name '{predictor_name}'\")\n description = model_interface.get_model_description(predictor_name)\n\n if predictor_attr is None:\n description = [\n description['accuracies'],\n description['column_importances'],\n description['outputs'],\n description['inputs'],\n description['datasource'],\n description['model']\n ]\n packages = self.get_tabel_packets(\n columns=[{\n 'table_name': '',\n 'name': 'accuracies',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': 'column_importances',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': \"outputs\",\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': 'inputs',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': 'datasource',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': 'model',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }],\n data=[description]\n )\n else:\n data = model_interface.get_model_data(predictor_name)\n if predictor_attr == \"features\":\n data = self._get_features_info(data)\n packages = self.get_tabel_packets(\n columns=[{\n 'table_name': '',\n 'name': 'column',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': 'type',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': \"encoder\",\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': 'role',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }],\n data=data\n )\n elif predictor_attr == \"model\":\n data = self._get_model_info(data)\n packages = self.get_tabel_packets(\n columns=[{\n 'table_name': '',\n 'name': 'name',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': 'performance',\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }, {\n 'table_name': '',\n 'name': \"selected\",\n 'type': TYPES.MYSQL_TYPE_VAR_STRING\n }],\n data=data\n )\n else:\n raise ErNotSupportedYet(\"DESCRIBE '%s' predictor attribute is not supported yet\" % predictor_attr)\n packages.append(self.last_packet())\n self.send_package_group(packages)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1771, "n_words": 214, "vocab_size": 91, "complexity": 7, "nloc": 95, "token_counts": 433, "n_ast_nodes": 774, "n_identifiers": 29, "random_cut": "def answer_describe_predictor(self, predictor_value):\n predictor_attr" }, { "id": 337423, "commit_id": "2d7fbbdc73670b96dbc8b3f875cfe147db4d9241", "repo": "accelerate", "path": "src/accelerate/test_utils/testing.py", "file_name": "testing.py", "fun_name": "slow", "commit_message": "Create Cross-Validation example (#317)", "code": "def slow(test_case):\n \n if not _run_slow_tests:\n return unittest.skip(\"test is slow\")(test_case)\n else:\n return test_case\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 24, "n_ast_nodes": 45, "n_identifiers": 5, "random_cut": "def slow(test_case):\n \n if not _run_slow_tests:\n return unittest.skip(\"test is slow\")(test_case)\n else:\n return test_case\n\n" }, { "id": 133273, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/tf/examples/tensorflow_train_example.py", "file_name": "tensorflow_train_example.py", "fun_name": "train_example", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def train_example(num_replicas=1, batch_size=128, use_gpu=False):\n trainer = TFTrainer(\n model_creator=simple_model,\n data_creator=simple_dataset,\n num_replicas=num_replicas,\n use_gpu=use_gpu,\n verbose=True,\n config=create_config(batch_size),\n )\n\n # model baseline performance\n start_stats = trainer.validate()\n print(start_stats)\n\n # train for 2 epochs\n trainer.train()\n trainer.train()\n\n # model performance after training (should improve)\n end_stats = trainer.validate()\n print(end_stats)\n\n # sanity check that training worked\n dloss = end_stats[\"validation_loss\"] - start_stats[\"validation_loss\"]\n dmse = (\n end_stats[\"validation_mean_squared_error\"]\n - start_stats[\"validation_mean_squared_error\"]\n )\n print(f\"dLoss: {dloss}, dMSE: {dmse}\")\n\n if dloss > 0 or dmse > 0:\n print(\"training sanity check failed. loss increased!\")\n else:\n print(\"success!\")\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 201, "n_words": 78, "vocab_size": 59, "complexity": 3, "nloc": 25, "token_counts": 127, "n_ast_nodes": 222, "n_identifiers": 20, "random_cut": "def train_example(num_replicas=1, batch_size=128, use_gpu=False):\n trainer = TFTrainer(\n model_creator=simple_model,\n data_creator=simple_dataset,\n num_replicas=num_replicas,\n use_gpu=use_gpu,\n verbose=True,\n config=create_config(batch_size),\n )\n\n # model baseline performance\n start_stats = trainer.validate()\n print(start_stats)\n\n # train for 2 epochs\n trainer.train()\n trainer.train()\n\n # model performance after training (should improve)\n end_stats = trainer.validate()\n print(end_stats)\n\n # sanity check that training worked\n dloss = end_stats[\"validation_loss\"] - start_stats[\"validation_loss\"]\n dmse = (\n end_stats[\"validation_mean_squared_error\"]\n - start_stats[\"validation_" }, { "id": 171063, "commit_id": "ceebce6f4f074887ce2c27f2342d8d618b4037e0", "repo": "pandas", "path": "pandas/core/indexes/base.py", "file_name": "base.py", "fun_name": "_convert_can_do_setop", "commit_message": "CLN: assorted (#49590)", "code": "def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]:\n if not isinstance(other, Index):\n other = Index(other, name=self.name)\n result_name = self.name\n else:\n result_name = get_op_result_name(self, other)\n return other, result_name\n\n # --------------------------------------------------------------------\n # Indexing Methods\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 89, "n_words": 30, "vocab_size": 24, "complexity": 2, "nloc": 7, "token_counts": 54, "n_ast_nodes": 84, "n_identifiers": 10, "random_cut": "def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]:\n if not isinstance(other, Index):\n other = Index(other, name=self.name)\n result_name = self.name\n else:\n result_name = get_op_result_name(self, other)\n return other, result_name\n\n # --------------------------------------------------------------------\n # Indexing Methods\n" }, { "id": 50182, "commit_id": "ffcde21305c61d950a9f93e57e6180c9a9665b87", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/beam.py", "file_name": "beam.py", "fun_name": "_expand_to_beam_size", "commit_message": "add disco_diffusion_ernievil_base", "code": "def _expand_to_beam_size(self, x):\n r\n check_type(x, 'x', (Variable), 'BeamSearchDecoder._expand_to_beam_size')\n x = nn.unsqueeze(x, [1])\n expand_times = [1] * len(x.shape)\n expand_times[1] = self.beam_size\n x = paddle.tile(x, expand_times)\n return x\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 74, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 22, "token_counts": 65, "n_ast_nodes": 102, "n_identifiers": 13, "random_cut": "def _expand_to_beam_size(self, x):\n r\n check_type(x, 'x', (Variable), 'BeamSearchDecoder._expand_to_be" }, { "id": 194793, "commit_id": "82df52b4431f3573ca2c93dd4bb3098992968acc", "repo": "ParlAI", "path": "tests/nightly/gpu/test_style_gen.py", "file_name": "test_style_gen.py", "fun_name": "test_curr_only_accuracy", "commit_message": "[Style-Controlled Generation] Open-source a second style classifier (#4380)\n\n* Add model to model list\r\n\r\n* Curr only classifier download page\r\n\r\n* Add test case\r\n\r\n* Update version\r\n\r\n* Update with some results\r\n\r\n* Wording", "code": "def test_curr_only_accuracy(self):\n \n _, test = testing_utils.eval_model(\n opt={\n 'batchsize': 4,\n 'fp16': True,\n 'num_examples': 16,\n 'model_file': 'zoo:style_gen/curr_only_classifier/model',\n 'model': 'projects.style_gen.classifier:ClassifierAgent',\n 'classes_from_file': 'image_chat_personalities_file',\n 'task': 'style_gen:CurrUttOnlyStyle',\n 'wrapper_task': 'style_gen:LabeledBlendedSkillTalk',\n },\n skip_valid=True,\n )\n self.assertAlmostEqual(test['accuracy'], 0.4375, delta=0.0)\n\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 210, "n_words": 29, "vocab_size": 29, "complexity": 1, "nloc": 15, "token_counts": 75, "n_ast_nodes": 129, "n_identifiers": 10, "random_cut": "def test_curr_only_accuracy(self):\n \n _, test = testing_utils.eval_model(\n opt={\n 'batchsize': 4,\n 'fp16': True,\n 'num_examples': 16,\n 'model_file': 'zoo:style_gen/cu" }, { "id": 291864, "commit_id": "e2308fd15cec4dfdd25d843b72cd3071657fd5b8", "repo": "core", "path": "homeassistant/components/matter/config_flow.py", "file_name": "config_flow.py", "fun_name": "_async_start_addon", "commit_message": "Add matter integration BETA (#83064)\n\n* Add matter base (#79372)\r\n\r\nCo-authored-by: Marcel van der Veldt \r\n\r\n* Add matter server add-on flow (#82698)\r\n\r\n* Add matter server add-on flow\r\n\r\n* Fix stale error argument\r\n\r\n* Clean docstrings\r\n\r\n* Use localhost as default address\r\n\r\n* Add matter websocket api foundation (#82848)\r\n\r\n* Add matter config entry add-on management (#82865)\r\n\r\n* Use matter refactored server/client library (#83003)\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Bump python-matter-server to 1.0.6 (#83059)\r\n\r\n* Extend matter websocket api (#82948)\r\n\r\n* Extend matter websocket api\r\n\r\n* Finish docstring\r\n\r\n* Fix pin type\r\n\r\n* Adjust api after new client\r\n\r\n* Adjust api to frontend for now\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "async def _async_start_addon(self) -> None:\n \n addon_manager: AddonManager = get_addon_manager(self.hass)\n\n try:\n await addon_manager.async_schedule_start_addon()\n # Sleep some seconds to let the add-on start properly before connecting.\n for _ in range(ADDON_SETUP_TIMEOUT_ROUNDS):\n await asyncio.sleep(ADDON_SETUP_TIMEOUT)\n try:\n if not (ws_address := self.ws_address):\n discovery_info = await self._async_get_addon_discovery_info()\n ws_address = self.ws_address = build_ws_address(\n discovery_info[\"host\"], discovery_info[\"port\"]\n )\n await validate_input(self.hass, {CONF_URL: ws_address})\n except (AbortFlow, CannotConnect) as err:\n LOGGER.debug(\n \"Add-on not ready yet, waiting %s seconds: %s\",\n ADDON_SETUP_TIMEOUT,\n err,\n )\n else:\n break\n else:\n raise CannotConnect(\"Failed to start Matter Server add-on: timeout\")\n finally:\n # Continue the flow after show progress when the task is done.\n self.hass.async_create_task(\n self.hass.config_entries.flow.async_configure(flow_id=self.flow_id)\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 551, "n_words": 96, "vocab_size": 80, "complexity": 5, "nloc": 28, "token_counts": 147, "n_ast_nodes": 249, "n_identifiers": 29, "random_cut": "async def _async_start_addon(self) -> None:\n \n addon_manager: AddonManager = get_addon_manager(self.hass)\n\n try:\n await addon_manager.async_schedule_start_addon()\n # Sleep some seconds to let the add-on start properly before connecting.\n for _ in range(ADDON_SETUP_TIMEOUT_ROUN" }, { "id": 217637, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/hashlib.py", "file_name": "hashlib.py", "fun_name": "__py_new", "commit_message": "add python 3.10.4 for windows", "code": "def __py_new(name, data=b'', **kwargs):\n \n return __get_builtin_constructor(name)(data, **kwargs)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 41, "n_identifiers": 5, "random_cut": "def __py_new(name, data=b'', **kwargs):\n \n return __get_builtin_constructor(name)(data, **kwa" }, { "id": 289777, "commit_id": "3759be09df09be61a4b880eaa58c7d9d8a099080", "repo": "core", "path": "homeassistant/components/jellyfin/media_player.py", "file_name": "media_player.py", "fun_name": "_handle_coordinator_update", "commit_message": "Add media_player platform to Jellyfin (#76801)", "code": "def _handle_coordinator_update(self) -> None:\n self.session_data = (\n self.coordinator.data.get(self.session_id)\n if self.coordinator.data is not None\n else None\n )\n\n if self.session_data is not None:\n self.now_playing = self.session_data.get(\"NowPlayingItem\")\n self.play_state = self.session_data.get(\"PlayState\")\n else:\n self.now_playing = None\n self.play_state = None\n\n self._update_from_session_data()\n super()._handle_coordinator_update()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 154, "n_words": 36, "vocab_size": 22, "complexity": 3, "nloc": 14, "token_counts": 92, "n_ast_nodes": 151, "n_identifiers": 11, "random_cut": "def _handle_coordinator_update(self) -> None:\n self.session_data = (\n self.coordinator.data.get(se" }, { "id": 280980, "commit_id": "000d1e93d7187299dce5653f781345031a9ad96f", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/stocks/options/test_yfinance_model.py", "file_name": "test_yfinance_model.py", "fun_name": "test_get_option_chain", "commit_message": "Tests : Stocks > Options (#1125)\n\n* Update tests : conftest\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Updating tests : fix typing\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : pyupgrade\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : mock dates in cassettes\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : force single threading\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : conftest\r\n\r\n* Update tests : skip stocks/options/controller\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : fixing issue\r\n\r\n* Updating tests : add init\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip\r\n\r\n* Updating tests : skip", "code": "def test_get_option_chain(recorder):\n result_tuple = yfinance_model.get_option_chain(\n ticker=\"PM\",\n expiration=\"2022-01-07\",\n )\n result_tuple = (result_tuple.calls, result_tuple.puts)\n\n recorder.capture_list(result_tuple)\n\n\n@pytest.mark.vcr\n@pytest.mark.parametrize(\n \"func\",\n [\n \"option_expirations\",\n \"get_dividend\",\n \"get_price\",\n \"get_info\",\n \"get_closing\",\n ],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr\n@pytest.mark.parametrize(\n \"func\",\n [\n \"option_expirations\",\n \"get_dividend\",\n \"get_price\",\n \"get_info\",\n \"get_closing\",\n ],\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 90, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 37, "n_ast_nodes": 112, "n_identifiers": 14, "random_cut": "def test_get_option_chain(recorder):\n result_tuple = yfinance_model.get_option_chain(\n ticker=\"PM\",\n expiration=\"2022-01-07\",\n )\n result_tuple = (result_tuple.calls, result_tuple.puts)\n\n recorder.capture_list(result_tuple)\n\n\n@pytest.mark.vcr\n@pytest.mark.parametri" }, { "id": 72469, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/pages/create.py", "file_name": "create.py", "fun_name": "get_context_data", "commit_message": "Reformat with black", "code": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"content_type\": self.page_content_type,\n \"page_class\": self.page_class,\n \"parent_page\": self.parent_page,\n \"edit_handler\": self.edit_handler,\n \"action_menu\": PageActionMenu(\n self.request, view=\"create\", parent_page=self.parent_page\n ),\n \"preview_modes\": self.page.preview_modes,\n \"form\": self.form,\n \"next\": self.next_url,\n \"has_unsaved_changes\": self.has_unsaved_changes,\n \"locale\": None,\n \"translations\": [],\n }\n )\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n # Pages can be created in any language at the root level\n if self.parent_page.is_root():\n translations = [\n {\n \"locale\": locale,\n \"url\": reverse(\n \"wagtailadmin_pages:add\",\n args=[\n self.page_content_type.app_label,\n self.page_content_type.model,\n self.parent_page.id,\n ],\n )\n + \"?\"\n + urlencode({\"locale\": locale.language_code}),\n }\n for locale in Locale.objects.all()\n ]\n\n else:\n user_perms = UserPagePermissionsProxy(self.request.user)\n translations = [\n {\n \"locale\": translation.locale,\n \"url\": reverse(\n \"wagtailadmin_pages:add\",\n args=[\n self.page_content_type.app_label,\n self.page_content_type.model,\n translation.id,\n ],\n ),\n }\n for translation in self.parent_page.get_translations()\n .only(\"id\", \"locale\")\n .select_related(\"locale\")\n if user_perms.for_page(translation).can_add_subpage()\n and self.page_class\n in translation.specific_class.creatable_subpage_models()\n and self.page_class.can_create_at(translation)\n ]\n\n context.update(\n {\n \"locale\": self.locale,\n \"translations\": translations,\n }\n )\n\n return context\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1339, "n_words": 126, "vocab_size": 90, "complexity": 8, "nloc": 66, "token_counts": 313, "n_ast_nodes": 512, "n_identifiers": 45, "random_cut": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"content_type\": self.page_content_type,\n \"page_class\": self.page_class,\n \"parent_page\": self.parent_page,\n \"edit_handler\": self.edit_handler,\n \"action_menu\": PageActionMenu(\n self.request, view=\"create\", parent_page=self.parent_page\n ),\n \"preview_modes\": self.page.preview_modes,\n \"form\": self.form,\n \"next\": self.next_url,\n \"has_unsaved_changes\": self.has_unsaved_changes,\n \"locale\": None,\n \"translations\": [],\n }\n )\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n # Pages can be created in any language at the root level\n if self.parent_page.is_root():\n translations = [\n {\n \"locale\": locale,\n \"url\": reverse(\n \"wagtailadmin_pages:add\",\n args=[\n self.page_content_type.app_label,\n self.page_content_type.model,\n self.parent_page.id,\n ],\n " }, { "id": 241760, "commit_id": "d2d284fd6e3e8f53e9a44ab233771850af1e4dab", "repo": "lightning", "path": "tests/checkpointing/test_model_checkpoint.py", "file_name": "test_model_checkpoint.py", "fun_name": "test_model_checkpoint_no_extraneous_invocations", "commit_message": "Update `tests/checkpointing/*.py` to use `devices` instead of `gpus` or `ipus` (#11408)\n\nCo-authored-by: Carlos Mocholí ", "code": "def test_model_checkpoint_no_extraneous_invocations(tmpdir):\n \n model = LogInTwoMethods()\n num_epochs = 4\n model_checkpoint = ModelCheckpointTestInvocations(monitor=\"early_stop_on\", expected_count=num_epochs, save_top_k=-1)\n trainer = Trainer(\n strategy=\"ddp_spawn\",\n accelerator=\"cpu\",\n devices=2,\n default_root_dir=tmpdir,\n callbacks=[model_checkpoint],\n max_epochs=num_epochs,\n )\n trainer.fit(model)\n assert trainer.state.finished, f\"Training failed with {trainer.state}\"\n\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 96, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 14, "token_counts": 77, "n_ast_nodes": 130, "n_identifiers": 21, "random_cut": "def test_model_checkpoint_no_extraneous_invocations(tmpdir):\n \n model = LogInTwoMethods()\n num_epochs = 4\n model_checkpoint = ModelCheckpointTestInvocations(monitor=\"early_stop_on\", expected_count=num_epochs, sa" }, { "id": 290833, "commit_id": "2453f95b2442036200a07d862d98bfd3a401e726", "repo": "core", "path": "homeassistant/components/humidifier/__init__.py", "file_name": "__init__.py", "fun_name": "supported_features", "commit_message": "Adjust HumidifierEntity type hints (#82248)", "code": "def supported_features(self) -> HumidifierEntityFeature | int:\n \n return self._attr_supported_features\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 5, "random_cut": "def supported_features(self) -> HumidifierEntityFeature | int:\n \n return self._attr_supported_features\n" }, { "id": 29297, "commit_id": "d90be220d6b687d08153934a51354011a3cb5ca1", "repo": "saleor", "path": "saleor/graphql/product/tests/queries/test_product_type_query.py", "file_name": "test_product_type_query.py", "fun_name": "test_query_product_type_for_federation", "commit_message": "Split test_product.py and test_variant.py into multiple files (#11173)\n\n* Split test_product.py into multiple files\r\n\r\n* Split test_variant.py into multiple files", "code": "def test_query_product_type_for_federation(api_client, product, channel_USD):\n product_type = product.product_type\n product_type_id = graphene.Node.to_global_id(\"ProductType\", product_type.pk)\n variables = {\n \"representations\": [\n {\n \"__typename\": \"ProductType\",\n \"id\": product_type_id,\n },\n ],\n }\n query = \n\n response = api_client.post_graphql(query, variables)\n content = get_graphql_content(response)\n assert content[\"data\"][\"_entities\"] == [\n {\n \"__typename\": \"ProductType\",\n \"id\": product_type_id,\n \"name\": product_type.name,\n }\n ]\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 186, "n_words": 46, "vocab_size": 33, "complexity": 1, "nloc": 31, "token_counts": 94, "n_ast_nodes": 161, "n_identifiers": 17, "random_cut": "def test_query_product_type_for_federation(api_client, product, channel_USD):\n product_type = product.product_type\n product_type_id = graphene.Node.to_global_id(\"ProductType\", product_type.pk)\n variables = {\n \"representations\": [\n {\n \"__typename\": \"ProductType\",\n \"id\": product_type_id,\n },\n ],\n }\n query = \n\n response = api_client.post_graphql(query, variables)\n content = get_graphql_content(response)\n assert content[\"data\"][\"_entities\"] == [\n {\n \"__typename\": " }, { "id": 37533, "commit_id": "2c2a2169b6524f18b37d7b4b64c64fb6a29a35a2", "repo": "transformers", "path": "src/transformers/utils/fx.py", "file_name": "fx.py", "fun_name": "__contains__", "commit_message": "Fx with meta (#16836)\n\n* Add meta proxy\r\n\r\n* Uses meta data to trace data dependent control-flow\r\n\r\n* Remove commented class\r\n\r\n* Handles torch creating functions\r\n\r\n* Added type annotation to fix tracing\r\n\r\n* Tracing works for everything but T5 and GPT-J\r\n\r\n* Almost all previously supported models pass\r\n\r\n* All architectures can be traced except T5\r\n\r\n* Intermediate commit to have a trace of the comparison operators for HFProxy\r\n\r\n* Everything works, except loss computation\r\n\r\n* Everything works\r\n\r\n* Removed unused import\r\n\r\n* Overriden methods do not use underlying ops (linear and torch.matmul), and model attributes are copied to the traced version\r\n\r\n* Fix torch_matmul_override\r\n\r\n* Change attributes reference to deepcopy\r\n\r\n* Remove breakpoint and add torch_index_override\r\n\r\n* Small fix\r\n\r\n* Fix typo\r\n\r\n* Replace asserts by explicit exceptions", "code": "def __contains__(self, key):\n # To handle cases such as :\n # `\"some_key\" in kwargs`\n if self.node.op == \"placeholder\":\n return False\n return super().__contains__(key)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 60, "n_words": 22, "vocab_size": 20, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def __contains__(self, key):\n # To handle cases such as :\n # `\"some_key\" in kwargs`\n if self.node.op == \"placeholder\":\n " }, { "id": 44419, "commit_id": "6fc6edf6af7f676bfa54ff3a2e6e6d2edb938f2e", "repo": "airflow", "path": "airflow/models/taskinstance.py", "file_name": "taskinstance.py", "fun_name": "key", "commit_message": "Make `airflow dags test` be able to execute Mapped Tasks (#21210)\n\n* Make `airflow dags test` be able to execute Mapped Tasks\r\n\r\nIn order to do this there were two steps required:\r\n\r\n- The BackfillJob needs to know about mapped tasks, both to expand them,\r\n and in order to update it's TI tracking\r\n- The DebugExecutor needed to \"unmap\" the mapped task to get the real\r\n operator back\r\n\r\nI was testing this with the following dag:\r\n\r\n```\r\nfrom airflow import DAG\r\nfrom airflow.decorators import task\r\nfrom airflow.operators.python import PythonOperator\r\nimport pendulum\r\n\r\n@task\r\ndef make_list():\r\n return list(map(lambda a: f'echo \"{a!r}\"', [1, 2, {'a': 'b'}]))\r\n\r\ndef consumer(*args):\r\n print(repr(args))\r\n\r\nwith DAG(dag_id='maptest', start_date=pendulum.DateTime(2022, 1, 18)) as dag:\r\n PythonOperator(task_id='consumer', python_callable=consumer).map(op_args=make_list())\r\n```\r\n\r\nIt can't \"unmap\" decorated operators successfully yet, so we're using\r\nold-school PythonOperator\r\n\r\nWe also just pass the whole value to the operator, not just the current\r\nmapping value(s)\r\n\r\n* Always have a `task_group` property on DAGNodes\r\n\r\nAnd since TaskGroup is a DAGNode, we don't need to store parent group\r\ndirectly anymore -- it'll already be stored\r\n\r\n* Add \"integation\" tests for running mapped tasks via BackfillJob\r\n\r\n* Only show \"Map Index\" in Backfill report when relevant\r\n\r\nCo-authored-by: Tzu-ping Chung ", "code": "def key(self) -> TaskInstanceKey:\n \n return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, self.try_number, self.map_index)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 47, "n_identifiers": 8, "random_cut": "def key(self) -> TaskInstanceKey:\n \n return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, self.try_number, self.m" }, { "id": 4476, "commit_id": "a305e4913060b919f02f3db57b9e17f82f48c425", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-zendesk-support/unit_tests/unit_test.py", "file_name": "unit_test.py", "fun_name": "test_check_start_time_param", "commit_message": "🐛 Source Zendesk-Support: fixed bug when `Tickets` stream didn't return removed records (#11349)", "code": "def test_check_start_time_param():\n expected = 1626936955\n start_time = calendar.timegm(pendulum.parse(DATETIME_STR).utctimetuple())\n output = SourceZendeskTicketExportStream.check_start_time_param(start_time)\n assert output == expected\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 26, "n_words": 15, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 36, "n_ast_nodes": 60, "n_identifiers": 12, "random_cut": "def test_check_start_time_param():\n " }, { "id": 258814, "commit_id": "1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe", "repo": "scikit-learn", "path": "sklearn/cluster/tests/test_k_means.py", "file_name": "test_k_means.py", "fun_name": "test_euclidean_distance", "commit_message": "MNT Update black to stable version (#22474)", "code": "def test_euclidean_distance(dtype, squared):\n # Check that the _euclidean_(dense/sparse)_dense helpers produce correct\n # results\n rng = np.random.RandomState(0)\n a_sparse = sp.random(\n 1, 100, density=0.5, format=\"csr\", random_state=rng, dtype=dtype\n )\n a_dense = a_sparse.toarray().reshape(-1)\n b = rng.randn(100).astype(dtype, copy=False)\n b_squared_norm = (b**2).sum()\n\n expected = ((a_dense - b) ** 2).sum()\n expected = expected if squared else np.sqrt(expected)\n\n distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared)\n distance_sparse_dense = _euclidean_sparse_dense_wrapper(\n a_sparse.data, a_sparse.indices, b, b_squared_norm, squared\n )\n\n assert_allclose(distance_dense_dense, distance_sparse_dense, rtol=1e-6)\n assert_allclose(distance_dense_dense, expected, rtol=1e-6)\n assert_allclose(distance_sparse_dense, expected, rtol=1e-6)\n\n\n@pytest.mark.parametrize(\"dtype\", [np.float32, np.float64])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"dtype\", [np.float32, np.float64])", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 136, "n_words": 76, "vocab_size": 58, "complexity": 2, "nloc": 17, "token_counts": 177, "n_ast_nodes": 284, "n_identifiers": 36, "random_cut": "def test_euclidean_distance(dtype, squared):\n # Check that the _euclidean_(dense/sparse)_dense helpers produce correct\n # results\n rng = np.random.RandomState(0)\n a_sparse = sp.random(\n 1, 100, density=0.5, format=\"csr\", random_state=rng, dtype=dtype\n )\n a_dense = a_sparse.toarray().reshape(-1)\n b = rng.randn(100).astype(dtype, copy=False)\n b_squared_norm = (b**2).sum()\n\n expected = ((a_dense - b) ** 2).sum()\n expected = expected if squared else np.sqrt(expected)\n\n distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared)\n distance_sparse_dense = _euclidean_sparse_dense_wrapper(\n a_sparse.data, a_sparse.indices, b, b_squared_norm, squared\n )\n\n assert_allclose(distance_dense_dense, distance_sparse_dense, rtol" }, { "id": 264990, "commit_id": "fe899d9d7cdb458298b92c2f46792adaf211851d", "repo": "netbox", "path": "netbox/extras/scripts.py", "file_name": "scripts.py", "fun_name": "_get_vars", "commit_message": "Iterate base classes when searching for ScriptVariables", "code": "def _get_vars(cls):\n vars = {}\n\n # Iterate all base classes looking for ScriptVariables\n for base_class in inspect.getmro(cls):\n # When object is reached there's no reason to continue\n if base_class is object:\n break\n\n for name, attr in base_class.__dict__.items():\n if name not in vars and issubclass(attr.__class__, ScriptVariable):\n vars[name] = attr\n\n # Order variables according to field_order\n field_order = getattr(cls.Meta, 'field_order', None)\n if not field_order:\n return vars\n ordered_vars = {\n field: vars.pop(field) for field in field_order if field in vars\n }\n ordered_vars.update(vars)\n\n return ordered_vars\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 254, "n_words": 81, "vocab_size": 53, "complexity": 9, "nloc": 16, "token_counts": 105, "n_ast_nodes": 167, "n_identifiers": 21, "random_cut": "def _get_vars(cls):\n vars = {}\n\n # Iterate all base classes looking for ScriptVariables\n for base_class in inspect.getmro(cls):\n # When object is reached there's no reason to continue\n if base_class is object:\n break\n\n for name, attr in base_class.__dict__.items():\n if name not in vars and issubclass(attr.__class__, ScriptVariable):\n vars[name] = attr\n\n # Order variables according to field_order\n field_order = getattr(cls.Meta, 'field_order', None)\n if not field_order:\n return vars\n ordered_vars = {\n field: vars.pop(field) for field in field_order if field in vars\n }\n ordered_vars.update(vars)\n\n r" }, { "id": 30491, "commit_id": "ea69e868ed95a335b362a3708628c0372cb7abb8", "repo": "OCRmyPDF", "path": "tests/test_unpaper.py", "file_name": "test_unpaper.py", "fun_name": "test_unpaper_image_too_big", "commit_message": "unpaper: issue warning if image too large to clean", "code": "def test_unpaper_image_too_big(resources, outdir, caplog):\n with patch('ocrmypdf._exec.unpaper.UNPAPER_IMAGE_PIXEL_LIMIT', 42):\n infile = resources / 'crom.png'\n unpaper.clean(infile, outdir / 'out.png', dpi=300) == infile\n\n assert any(\n 'too large for cleaning' in rec.message\n for rec in caplog.get_records('call')\n if rec.levelno == logging.WARNING\n )\n", "url": "https://github.com/ocrmypdf/OCRmyPDF.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 99, "n_words": 36, "vocab_size": 31, "complexity": 3, "nloc": 9, "token_counts": 64, "n_ast_nodes": 107, "n_identifiers": 16, "random_cut": "def test_unpaper_image_too_big(resources, outdir, caplog):\n with patch('ocrmypdf._exec.unpaper.UNPAPER_IMAGE_PIXEL_LIMIT', 42):\n infile = resources / 'crom.png'\n u" }, { "id": 35843, "commit_id": "d83d22f578276e9f201b0b3b0f8f9bd68e86c133", "repo": "transformers", "path": "tests/maskformer/test_feature_extraction_maskformer.py", "file_name": "test_feature_extraction_maskformer.py", "fun_name": "test_feat_extract_properties", "commit_message": "Maskformer (#15682)\n\n* maskformer\r\n\r\n* conflicts\r\n\r\n* conflicts\r\n\r\n* minor fixes\r\n\r\n* feature extractor test fix\r\n\r\nrefactor MaskFormerLoss following conversation\r\n\r\nMaskFormer related types should not trigger a module time import error\r\n\r\nmissed one\r\n\r\nremoved all the types that are not used\r\n\r\nupdate config mapping\r\n\r\nminor updates in the doc\r\n\r\nresolved conversation that doesn't need a discussion\r\n\r\nminor changes\r\n\r\nresolved conversations\r\n\r\nfixed DetrDecoder\r\n\r\n* minor changes\r\n\r\nminor changes\r\n\r\nfixed mdx file\r\n\r\ntest feature_extractor return types\r\n\r\nfunctional losses -> classes\r\n\r\nremoved the return type test for the feature extractor\r\n\r\nminor changes + style + quality\r\n\r\n* conflicts?\r\n\r\n* rebase master\r\n\r\n* readme\r\n\r\n* added missing files\r\n\r\n* deleded poolformers test that where in the wrong palce\r\n\r\n* CI\r\n\r\n* minor changes\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* resolved conversations\r\n\r\n* minor changes\r\n\r\n* conversations\r\n\r\n[Unispeech] Fix slow tests (#15818)\r\n\r\n* remove soundfile old way of loading audio\r\n\r\n* Adapt slow test\r\n\r\n[Barthez Tokenizer] Fix saving (#15815)\r\n\r\n[TFXLNet] Correct tf xlnet generate (#15822)\r\n\r\n* [TFXLNet] Correct tf xlnet\r\n\r\n* adapt test comment\r\n\r\nFix the push run (#15807)\r\n\r\nFix semantic segmentation pipeline test (#15826)\r\n\r\nFix dummy_inputs() to dummy_inputs in symbolic_trace doc (#15776)\r\n\r\nAdd model specific output classes to PoolFormer model docs (#15746)\r\n\r\n* Added model specific output classes to poolformer docs\r\n\r\n* Fixed Segformer typo in Poolformer docs\r\n\r\nAdding the option to return_timestamps on pure CTC ASR models. (#15792)\r\n\r\n* Adding the option to return_timestamps on pure CTC ASR models.\r\n\r\n* Remove `math.prod` which was introduced in Python 3.8\r\n\r\n* int are not floats.\r\n\r\n* Reworking the PR to support \"char\" vs \"word\" output.\r\n\r\n* Fixup!\r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Quality.\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\nHFTracer.trace should use/return self.graph to be compatible with torch.fx.Tracer (#15824)\r\n\r\nFix tf.concatenate + test past_key_values for TF models (#15774)\r\n\r\n* fix wrong method name tf.concatenate\r\n\r\n* add tests related to causal LM / decoder\r\n\r\n* make style and quality\r\n\r\n* clean-up\r\n\r\n* Fix TFBertModel's extended_attention_mask when past_key_values is provided\r\n\r\n* Fix tests\r\n\r\n* fix copies\r\n\r\n* More tf.int8 -> tf.int32 in TF test template\r\n\r\n* clean-up\r\n\r\n* Update TF test template\r\n\r\n* revert the previous commit + update the TF test template\r\n\r\n* Fix TF template extended_attention_mask when past_key_values is provided\r\n\r\n* Fix some styles manually\r\n\r\n* clean-up\r\n\r\n* Fix ValueError: too many values to unpack in the test\r\n\r\n* Fix more: too many values to unpack in the test\r\n\r\n* Add a comment for extended_attention_mask when there is past_key_values\r\n\r\n* Fix TFElectra extended_attention_mask when past_key_values is provided\r\n\r\n* Add tests to other TF models\r\n\r\n* Fix for TF Electra test: add prepare_config_and_inputs_for_decoder\r\n\r\n* Fix not passing training arg to lm_head in TFRobertaForCausalLM\r\n\r\n* Fix tests (with past) for TF Roberta\r\n\r\n* add testing for pask_key_values for TFElectra model\r\n\r\nCo-authored-by: ydshieh \r\n\r\n[examples/summarization and translation] fix readme (#15833)\r\n\r\nAdd ONNX Runtime quantization for text classification notebook (#15817)\r\n\r\nRe-enable doctests for the quicktour (#15828)\r\n\r\n* Re-enable doctests for the quicktour\r\n\r\n* Re-enable doctests for task_summary (#15830)\r\n\r\n* Remove &\r\n\r\nFramework split model report (#15825)\r\n\r\nAdd TFConvNextModel (#15750)\r\n\r\n* feat: initial implementation of convnext in tensorflow.\r\n\r\n* fix: sample code for the classification model.\r\n\r\n* chore: added checked for from the classification model.\r\n\r\n* chore: set bias initializer in the classification head.\r\n\r\n* chore: updated license terms.\r\n\r\n* chore: removed ununsed imports\r\n\r\n* feat: enabled argument during using drop_path.\r\n\r\n* chore: replaced tf.identity with layers.Activation(linear).\r\n\r\n* chore: edited default checkpoint.\r\n\r\n* fix: minor bugs in the initializations.\r\n\r\n* partial-fix: tf model errors for loading pretrained pt weights.\r\n\r\n* partial-fix: call method updated\r\n\r\n* partial-fix: cross loading of weights (4x3 variables to be matched)\r\n\r\n* chore: removed unneeded comment.\r\n\r\n* removed playground.py\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: renaming TFConvNextStage conv and layer norm layers\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* add: tests for convnext.\r\n\r\n* fix: integration tester class.\r\n\r\n* fix: issues mentioned in pr feedback (round 1).\r\n\r\n* fix: how output_hidden_states arg is propoagated inside the network.\r\n\r\n* feat: handling of arg for pure cnn models.\r\n\r\n* chore: added a note on equal contribution in model docs.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* feat: encapsulation for the convnext trunk.\r\n\r\n* Fix variable naming; Test-related corrections; Run make fixup\r\n\r\n* chore: added Joao as a contributor to convnext.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: corrected copyright year and added comment on NHWC.\r\n\r\n* chore: fixed the black version and ran formatting.\r\n\r\n* chore: ran make style.\r\n\r\n* chore: removed from_pt argument from test, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: tests in the convnext subclass, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: moved convnext test to the correct location\r\n\r\n* fix: locations for the test file of convnext.\r\n\r\n* fix: convnext tests.\r\n\r\n* chore: applied sgugger's suggestion for dealing w/ output_attentions.\r\n\r\n* chore: added comments.\r\n\r\n* chore: applied updated quality enviornment style.\r\n\r\n* chore: applied formatting with quality enviornment.\r\n\r\n* chore: revert to the previous tests/test_modeling_common.py.\r\n\r\n* chore: revert to the original test_modeling_common.py\r\n\r\n* chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py\r\n\r\n* fix: tests for convnext.\r\n\r\n* chore: removed output_attentions argument from convnext config.\r\n\r\n* chore: revert to the earlier tf utils.\r\n\r\n* fix: output shapes of the hidden states\r\n\r\n* chore: removed unnecessary comment\r\n\r\n* chore: reverting to the right test_modeling_tf_common.py.\r\n\r\n* Styling nits\r\n\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger \r\n\r\n* minor changes\r\n\r\n* doc fix in feature extractor\r\n\r\n* doc\r\n\r\n* typose\r\n\r\n* removed detr logic from config\r\n\r\n* removed detr logic from config\r\n\r\n* removed num_labels\r\n\r\n* small fix in the config\r\n\r\n* auxilary -> auxiliary\r\n\r\n* make style\r\n\r\n* some test is failing\r\n\r\n* fix a weird char in config prevending doc-builder\r\n\r\n* retry to fix the doc-builder issue\r\n\r\n* make style\r\n\r\n* new try to fix the doc builder\r\n\r\n* CI\r\n\r\n* change weights to facebook\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger ", "code": "def test_feat_extract_properties(self):\n feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)\n self.assertTrue(hasattr(feature_extractor, \"image_mean\"))\n self.assertTrue(hasattr(feature_extractor, \"image_std\"))\n self.assertTrue(hasattr(feature_extractor, \"do_normalize\"))\n self.assertTrue(hasattr(feature_extractor, \"do_resize\"))\n self.assertTrue(hasattr(feature_extractor, \"size\"))\n self.assertTrue(hasattr(feature_extractor, \"max_size\"))\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 17, "vocab_size": 12, "complexity": 1, "nloc": 8, "token_counts": 82, "n_ast_nodes": 141, "n_identifiers": 7, "random_cut": "def test_feat_extract_properties(self):\n feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)\n self.assertTrue(hasattr(feature_extractor, \"image_mean\"))\n self.assertTrue(hasattr(feature_extractor, \"image_std\"))\n self.assertTrue(hasattr(feature_extractor, \"do_normalize\"))\n self.assertTrue(hasattr(feature_extractor, \"do_resize\"))\n self.assertTrue(hasattr(feature_extractor, \"size\"))\n self.assertTru" }, { "id": 284285, "commit_id": "a5b414bf1a91f05f370886748845077d4cec03e7", "repo": "OpenBBTerminal", "path": "openbb_terminal/settings_controller.py", "file_name": "settings_controller.py", "fun_name": "call_cls", "commit_message": "Default env for packaged apps (#1693)\n\n* Remove defaults json in favor of a .env in a cross platform specfile\r\n\r\n* Use ENV_FILE from obff across the app\r\n\r\n* Add venv packaging support to the specfile\r\n\r\n* Make silencing explicit\r\n\r\n* Fix bug in integration tests report printout\r\n\r\nCo-authored-by: piiq ", "code": "def call_cls(self, _):\n \n obbff.USE_CLEAR_AFTER_CMD = not obbff.USE_CLEAR_AFTER_CMD\n set_key(\n obbff.ENV_FILE,\n \"OPENBB_USE_CLEAR_AFTER_CMD\",\n str(obbff.USE_CLEAR_AFTER_CMD),\n )\n console.print(\"\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 81, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 8, "token_counts": 38, "n_ast_nodes": 65, "n_identifiers": 10, "random_cut": "def call_cls(self, _):\n \n obbff.USE_CLEAR_AFTER_CMD = not obbff.USE_CLEAR_AFTER_CMD\n set_key(" }, { "id": 100446, "commit_id": "aa39234538a8f83e6aa2b60b8275a570e8876ac2", "repo": "faceswap", "path": "plugins/extract/detect/s3fd.py", "file_name": "s3fd.py", "fun_name": "_post_process", "commit_message": "Update all Keras Imports to be conditional (#1214)\n\n* Remove custom keras importer\r\n\r\n* first round keras imports fix\r\n\r\n* launcher.py: Remove KerasFinder references\r\n\r\n* 2nd round keras imports update (lib and extract)\r\n\r\n* 3rd round keras imports update (train)\r\n\r\n* remove KerasFinder from tests\r\n\r\n* 4th round keras imports update (tests)", "code": "def _post_process(self, bboxlist):\n \n retval = []\n for i in range(len(bboxlist) // 2):\n bboxlist[i * 2] = self.softmax(bboxlist[i * 2], axis=3)\n for i in range(len(bboxlist) // 2):\n ocls, oreg = bboxlist[i * 2], bboxlist[i * 2 + 1]\n stride = 2 ** (i + 2) # 4,8,16,32,64,128\n poss = zip(*np.where(ocls[:, :, :, 1] > 0.05))\n for _, hindex, windex in poss:\n axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride\n score = ocls[0, hindex, windex, 1]\n if score >= self.confidence:\n loc = np.ascontiguousarray(oreg[0, hindex, windex, :]).reshape((1, 4))\n priors = np.array([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])\n box = self.decode(loc, priors)\n x_1, y_1, x_2, y_2 = box[0] * 1.0\n retval.append([x_1, y_1, x_2, y_2, score])\n return_numpy = np.array(retval) if len(retval) != 0 else np.zeros((1, 5))\n return return_numpy\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 381, "n_words": 141, "vocab_size": 79, "complexity": 6, "nloc": 19, "token_counts": 288, "n_ast_nodes": 417, "n_identifiers": 37, "random_cut": "def _post_process(self, bboxlist):\n " }, { "id": 118533, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/streamlit/app_session.py", "file_name": "app_session.py", "fun_name": "handle_stop_script_request", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def handle_stop_script_request(self):\n \n self._enqueue_script_request(ScriptRequest.STOP)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 26, "n_identifiers": 5, "random_cut": "def handle_stop_script_request(self):\n \n self._enqueue_script_request(ScriptRequest.STOP)\n" }, { "id": 155881, "commit_id": "c7e069947b9b720df03aca2e4f7682faa2d9876f", "repo": "dask", "path": "dask/array/tests/test_overlap.py", "file_name": "test_overlap.py", "fun_name": "test_map_overlap_no_depth", "commit_message": "Finish making ``map_overlap`` default boundary ``kwarg`` ``'none'`` (#8743)\n\nFollowup to PR https://github.com/dask/dask/pull/8397\r\n\r\nWe've had a FutureWarning up for a few months about an upcoming change to the default 'boundary' kwarg value in `map_overlap`, so now is the time to change it. Previous default was `\"reflect\"`, new default will be \"None\".\r\n\r\nThe reason for this change is that it makes the code run a lot faster, and for most people the overlap depth is sufficient and they should not require additional boundary handling. See https://github.com/dask/dask/issues/8391 for a full discussion.", "code": "def test_map_overlap_no_depth(boundary):\n x = da.arange(10, chunks=5)\n y = x.map_overlap(lambda i: i, depth=0, boundary=boundary, dtype=x.dtype)\n assert_eq(y, x)\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 48, "n_ast_nodes": 72, "n_identifiers": 12, "random_cut": "def test_map_overlap_no_depth(boundary):\n x = da.arange(10, chunks=5)\n y = x.map_overlap(lambda i: i, depth=0, boundary=bound" }, { "id": 292993, "commit_id": "d68ada74ccebaa0c1b6986b3be9cf4d73eca7cae", "repo": "core", "path": "homeassistant/components/media_player/browse_media.py", "file_name": "browse_media.py", "fun_name": "calculate_children_class", "commit_message": "Restore children media class (#67409)", "code": "def calculate_children_class(self) -> None:\n \n self.children_media_class = MEDIA_CLASS_DIRECTORY\n assert self.children is not None\n proposed_class = self.children[0].media_class\n if all(child.media_class == proposed_class for child in self.children):\n self.children_media_class = proposed_class\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 72, "n_words": 26, "vocab_size": 21, "complexity": 3, "nloc": 7, "token_counts": 51, "n_ast_nodes": 81, "n_identifiers": 9, "random_cut": "def calculate_children_class(self) -> None:\n \n self.children_media_class = MEDIA_CLASS" }, { "id": 282174, "commit_id": "ccfe98e19dd36702047fd8130e9b299e8f7cadcc", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/cryptocurrency/defi/test_coindix_model.py", "file_name": "test_coindix_model.py", "fun_name": "test_get_defi_vaults_value_error", "commit_message": "Tests + Fix : Cryptocurrency > Defi (#1284)\n\n* Updating tests : crypto/defi\r\n\r\n* Updating tests : stocks/prediction_techniques\r\n\r\n* Updating tests : economy\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : economy/wsj\r\n\r\n* Updating crypto : fixing defi/coindix_view\r\n\r\n* Updating crypto : fix crypto/defi/defipulse_model\r\n\r\n* Updating tests : crypto/defi\r\n\r\n* Updating tests : crypto/defi\r\n\r\n* Updating crypto : crypto/defi/graph_model\r\n\r\n* Updating tests : crypto/defi\r\n\r\n* Updating tests : crypto/defi\r\n\r\n* Updating tests : crypto/defi\r\n\r\n* Updating tests : black\r\n\r\n* Updating tests : economy/fred/prediction\r\n\r\n* Updating tests : crypto/defi/graph\r\n\r\n* Updating tests : linting", "code": "def test_get_defi_vaults_value_error(mocker):\n # MOCK GET\n attrs = {\n \"status_code\": 200,\n \"json.side_effect\": UnicodeDecodeError,\n }\n mock_response = mocker.Mock(**attrs)\n mocker.patch(target=\"requests.get\", new=mocker.Mock(return_value=mock_response))\n\n with pytest.raises(ValueError) as _:\n coindix_model.get_defi_vaults(\n chain=None,\n protocol=None,\n kind=None,\n )\n\n\n@pytest.mark.vcr(record_mode=\"none\")", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.vcr(record_mode=\"none\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 105, "n_words": 28, "vocab_size": 27, "complexity": 1, "nloc": 13, "token_counts": 72, "n_ast_nodes": 140, "n_identifiers": 22, "random_cut": "def test_get_defi_vaults_value_error(mocker):\n # MOCK GET\n attrs = {\n \"status_code\": 200,\n \"json.side_effect\": UnicodeDecodeError,\n }\n mock_response = mocker.Mock(**attrs)\n mocker.patch(target=\"requests.get\", new=mocker." }, { "id": 292632, "commit_id": "e1989e285896e07fb6f4a5f09dcf5039c722a16e", "repo": "core", "path": "homeassistant/components/powerwall/__init__.py", "file_name": "__init__.py", "fun_name": "async_update_data", "commit_message": "Enable strict typing for powerwall (#65577)", "code": "async def async_update_data(self) -> PowerwallData:\n \n # Check if we had an error before\n _LOGGER.debug(\"Checking if update failed\")\n if self.api_changed:\n raise UpdateFailed(\"The powerwall api has changed\")\n return await self.hass.async_add_executor_job(self._update_data)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 74, "n_words": 28, "vocab_size": 26, "complexity": 2, "nloc": 6, "token_counts": 36, "n_ast_nodes": 67, "n_identifiers": 10, "random_cut": "async def async_update_data(self) -> PowerwallData:\n \n # Check if we had an error before\n _LO" }, { "id": 306911, "commit_id": "823e7e8830118a8c500a0492c9cc8905bf5effb4", "repo": "core", "path": "homeassistant/components/itunes/media_player.py", "file_name": "media_player.py", "fun_name": "state", "commit_message": "Use new media player enums [i-l] (#78054)", "code": "def state(self):\n \n if self.player_state == \"offline\" or self.player_state is None:\n return \"offline\"\n\n if self.player_state == \"error\":\n return \"error\"\n\n if self.player_state == \"stopped\":\n return MediaPlayerState.IDLE\n\n if self.player_state == \"paused\":\n return MediaPlayerState.PAUSED\n\n return MediaPlayerState.PLAYING\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 118, "n_words": 32, "vocab_size": 17, "complexity": 6, "nloc": 10, "token_counts": 56, "n_ast_nodes": 102, "n_identifiers": 7, "random_cut": "def state(self):\n \n if self.player_state == \"offline\" or self.player_state is None:\n return \"offline\"\n\n if self.player_state == \"error\":\n return \"error\"\n\n if self.player_state == \"stopped\":\n return MediaPlayerState.IDLE\n\n if self.player_s" }, { "id": 164937, "commit_id": "03fef5f0e35200aa5828975b62782bcf11faa0d2", "repo": "pandas", "path": "pandas/tests/plotting/frame/test_frame.py", "file_name": "test_frame.py", "fun_name": "test_boxplot_vertical", "commit_message": "TST: Clean tests/plotting (#45992)", "code": "def test_boxplot_vertical(self, hist_df):\n df = hist_df\n numeric_cols = df._get_numeric_data().columns\n labels = [pprint_thing(c) for c in numeric_cols]\n\n # if horizontal, yticklabels are rotated\n ax = df.plot.box(rot=50, fontsize=8, vert=False)\n self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)\n self._check_text_labels(ax.get_yticklabels(), labels)\n assert len(ax.lines) == 7 * len(numeric_cols)\n\n axes = _check_plot_works(\n df.plot.box,\n default_axes=True,\n subplots=True,\n vert=False,\n logx=True,\n )\n self._check_axes_shape(axes, axes_num=3, layout=(1, 3))\n self._check_ax_scales(axes, xaxis=\"log\")\n for ax, label in zip(axes, labels):\n self._check_text_labels(ax.get_yticklabels(), [label])\n assert len(ax.lines) == 7\n\n positions = np.array([3, 2, 8])\n ax = df.plot.box(positions=positions, vert=False)\n self._check_text_labels(ax.get_yticklabels(), labels)\n tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)\n assert len(ax.lines) == 7 * len(numeric_cols)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 287, "n_words": 85, "vocab_size": 62, "complexity": 3, "nloc": 25, "token_counts": 255, "n_ast_nodes": 384, "n_identifiers": 43, "random_cut": "def test_boxplot_vertical(self, hist_df):\n df = hist_df\n numeric_cols = df._get_numeric_data().columns\n labels = [pprint_thing(c) for c in numeric_cols]\n\n # if horizontal, yticklabels are rotated\n ax = df.plot.box(rot=50, fontsize=8, vert=False)\n self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)\n self._check_text_labels(ax.get_yticklabels(), labels)\n assert len(ax.lines) == 7 * len(numeric_cols)\n\n axes = _check_p" }, { "id": 301002, "commit_id": "8ff0ced846e505a0c33a848e21b19820861e6884", "repo": "core", "path": "homeassistant/components/esphome/media_player.py", "file_name": "media_player.py", "fun_name": "supported_features", "commit_message": "Initial implementation of ESPHome media players (#72047)\n\nCo-authored-by: Paulus Schoutsen \r\nCo-authored-by: Franck Nijhof ", "code": "def supported_features(self) -> int:\n \n flags = (\n MediaPlayerEntityFeature.PLAY_MEDIA\n | MediaPlayerEntityFeature.BROWSE_MEDIA\n | MediaPlayerEntityFeature.STOP\n | MediaPlayerEntityFeature.VOLUME_SET\n | MediaPlayerEntityFeature.VOLUME_MUTE\n )\n if self._static_info.supports_pause:\n flags |= MediaPlayerEntityFeature.PAUSE | MediaPlayerEntityFeature.PLAY\n return flags\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 127, "n_words": 26, "vocab_size": 20, "complexity": 2, "nloc": 12, "token_counts": 49, "n_ast_nodes": 79, "n_identifiers": 14, "random_cut": "def supported_features(self) -> int:\n \n flags = (\n MediaPlayerEntityFeature.PLAY_MEDIA\n | MediaPlayerEn" }, { "id": 80601, "commit_id": "604cbc17376620dc67df35386421835d43732a4e", "repo": "awx", "path": "awx/main/scheduler/task_manager.py", "file_name": "task_manager.py", "fun_name": "task_needs_capacity", "commit_message": "Consume control capacity (#11665)\n\n* Select control node before start task\r\n\r\nConsume capacity on control nodes for controlling tasks and consider\r\nremainging capacity on control nodes before selecting them.\r\n\r\nThis depends on the requirement that control and hybrid nodes should all\r\nbe in the instance group named 'controlplane'. Many tests do not satisfy that\r\nrequirement. I'll update the tests in another commit.\r\n\r\n* update tests to use controlplane\r\n\r\nWe don't start any tasks if we don't have a controlplane instance group\r\n\r\nDue to updates to fixtures, update tests to set node type and capacity\r\nexplicitly so they get expected result.\r\n\r\n* Fixes for accounting of control capacity consumed\r\n\r\nUpdate method is used to account for currently consumed capacity for\r\ninstance groups in the in-memory capacity tracking data structure we initialize in\r\nafter_lock_init and then update via calculate_capacity_consumed (both in\r\ntask_manager.py)\r\n\r\nAlso update fit_task_to_instance to consider control impact on instances\r\n\r\nTrust that these functions do the right thing looking for a\r\nnode with capacity, and cut out redundant check for the whole group's\r\ncapacity per Alan's reccomendation.\r\n\r\n* Refactor now redundant code\r\n\r\nDeal with control type tasks before we loop over the preferred instance\r\ngroups, which cuts out the need for some redundant logic.\r\n\r\nAlso, fix a bug where I was missing assigning the execution node in one case!\r\n\r\n* set job explanation on tasks that need capacity\r\n\r\nmove the job explanation for jobs that need capacity to a function\r\nso we can re-use it in the three places we need it.\r\n\r\n* project updates always run on the controlplane\r\n\r\nInstance group ordering makes no sense on project updates because they\r\nalways need to run on the control plane.\r\n\r\nAlso, since hybrid nodes should always run the control processes for the\r\njobs running on them as execution nodes, account for this when looking for a\r\nexecution node.\r\n\r\n* fix misleading message\r\n\r\nthe variables and wording were both misleading, fix to be more accurate\r\ndescription in the two different cases where this log may be emitted.\r\n\r\n* use settings correctly\r\n\r\nuse settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME instead of a hardcoded\r\nname\r\ncache the controlplane_ig object during the after lock init to avoid\r\nan uneccesary query\r\neliminate mistakenly duplicated AWX_CONTROL_PLANE_TASK_IMPACT and use\r\nonly AWX_CONTROL_NODE_TASK_IMPACT\r\n\r\n* add test for control capacity consumption\r\n\r\nadd test to verify that when there are 2 jobs and only capacity for one\r\nthat one will move into waiting and the other stays in pending\r\n\r\n* add test for hybrid node capacity consumption\r\n\r\nassert that the hybrid node is used for both control and execution and\r\ncapacity is deducted correctly\r\n\r\n* add test for task.capacity_type = control\r\n\r\nTest that control type tasks have the right capacity consumed and\r\nget assigned to the right instance group\r\n\r\nAlso fix lint in the tests\r\n\r\n* jobs_running not accurate for control nodes\r\n\r\nWe can either NOT use \"idle instances\" for control nodes, or we need\r\nto update the jobs_running property on the Instance model to count\r\njobs where the node is the controller_node.\r\n\r\nI didn't do that because it may be an expensive query, and it would be\r\nhard to make it match with jobs_running on the InstanceGroup which\r\nfilters on tasks assigned to the instance group.\r\n\r\nThis change chooses to stop considering \"idle\" control nodes an option,\r\nsince we can't acurrately identify them.\r\n\r\nThe way things are without any change, is we are continuing to over consume capacity on control nodes\r\nbecause this method sees all control nodes as \"idle\" at the beginning\r\nof the task manager run, and then only counts jobs started in that run\r\nin the in-memory tracking. So jobs which last over a number of task\r\nmanager runs build up consuming capacity, which is accurately reported\r\nvia Instance.consumed_capacity\r\n\r\n* Reduce default task impact for control nodes\r\n\r\nThis is something we can experiment with as far as what users\r\nwant at install time, but start with just 1 for now.\r\n\r\n* update capacity docs\r\n\r\nDescribe usage of the new setting and the concept of control impact.\r\n\r\nCo-authored-by: Alan Rominger \r\nCo-authored-by: Rebeccah ", "code": "def task_needs_capacity(self, task, tasks_to_update_job_explanation):\n task.log_lifecycle(\"needs_capacity\")\n job_explanation = gettext_noop(\"This job is not ready to start because there is not enough available capacity.\")\n if task.job_explanation != job_explanation:\n if task.created < (tz_now() - self.time_delta_job_explanation):\n # Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds.\n # Therefore we should only update the job_explanation after some time has elapsed to\n # prevent excessive task saves.\n task.job_explanation = job_explanation\n tasks_to_update_job_explanation.append(task)\n logger.debug(\"{} couldn't be scheduled on graph, waiting for next cycle\".format(task.log_format))\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 193, "n_words": 80, "vocab_size": 70, "complexity": 3, "nloc": 8, "token_counts": 67, "n_ast_nodes": 116, "n_identifiers": 15, "random_cut": "def task_needs_capacity(self, task, tasks_to_update_job_explanation):\n task.log_lifecycle(\"needs_capacity\")\n job_explanation = gettext_noop(\"This job is not ready to start because there is not enough available capacity.\")\n if task.job_explanation != job_explanation:\n if task.created < (tz_now() - self.time_delta_job_explanation):\n # Many launched jobs are immediately blocked, but most blocks will resolve in a few seconds.\n # Therefore we should only update the job_explanation after some time has elapsed to\n # prev" }, { "id": 88115, "commit_id": "b38f59d9f6d9eedd7ce0606805df7c072addb000", "repo": "sentry", "path": "tests/sentry/notifications/utils/test_participants.py", "file_name": "test_participants.py", "fun_name": "test_other_org_user", "commit_message": "ref(hybrid-cloud): Add user services. Start tagging some model tests as stable (#40614)\n\nNotifications uses new hybrid cloud APIUser\r\n\r\nCo-authored-by: Mike Ihbe \r\nCo-authored-by: Zachary Collins \r\nCo-authored-by: Zach Collins ", "code": "def test_other_org_user(self):\n org_2 = self.create_organization()\n user_2 = self.create_user()\n team_2 = self.create_team(org_2, members=[user_2])\n team_3 = self.create_team(org_2, members=[user_2])\n project_2 = self.create_project(organization=org_2, teams=[team_2, team_3])\n\n assert self.get_send_to_member(project_2, user_2.id) == {\n ExternalProviders.EMAIL: {user_service.serialize_user(user_2)},\n ExternalProviders.SLACK: {user_service.serialize_user(user_2)},\n }\n assert self.get_send_to_member(self.project, user_2.id) == {}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 113, "n_words": 36, "vocab_size": 26, "complexity": 1, "nloc": 11, "token_counts": 121, "n_ast_nodes": 184, "n_identifiers": 22, "random_cut": "def test_other_org_user(self):\n org_2 = self.create_organizatio" }, { "id": 249234, "commit_id": "1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b", "repo": "synapse", "path": "tests/rest/admin/test_device.py", "file_name": "test_device.py", "fun_name": "test_no_auth", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13479)\n\nReplace\r\n- `HTTPStatus.NOT_FOUND`\r\n- `HTTPStatus.FORBIDDEN`\r\n- `HTTPStatus.UNAUTHORIZED`\r\n- `HTTPStatus.CONFLICT`\r\n- `HTTPStatus.CREATED`\r\n\r\nSigned-off-by: Dirk Klimpel ", "code": "def test_no_auth(self) -> None:\n \n channel = self.make_request(\"GET\", self.url, b\"{}\")\n\n self.assertEqual(\n 401,\n channel.code,\n msg=channel.json_body,\n )\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 84, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 11, "token_counts": 55, "n_ast_nodes": 88, "n_identifiers": 11, "random_cut": "def test_no_auth(self) -> None:\n \n channel = self.make_request(\"GET\", self.url, b\"{}\")\n\n self.assertEqual(\n 401,\n channel.code,\n msg=channel.json_body,\n )\n s" }, { "id": 13189, "commit_id": "cdaf7f87ececf9e13b517379ca183b17f0d7b007", "repo": "jina", "path": "tests/unit/serve/gateway/test_gateway.py", "file_name": "test_gateway.py", "fun_name": "_start_gateway_runtime", "commit_message": "feat: allow passing custom gateway in Flow (#5189)", "code": "def _start_gateway_runtime(uses, uses_with, worker_port):\n port = random_port()\n\n p = multiprocessing.Process(\n target=_create_gateway_runtime,\n args=(port, uses, uses_with, worker_port),\n daemon=True,\n )\n p.start()\n time.sleep(1)\n return port, p\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 10, "token_counts": 56, "n_ast_nodes": 83, "n_identifiers": 16, "random_cut": "def _start_gateway_runtime(uses, uses_with, worker_port):\n port = random_port()\n\n p = multiprocessing.Process(\n " }, { "id": 165192, "commit_id": "63616a622186068e487b3fd5304022c27f6aa6db", "repo": "pandas", "path": "pandas/tests/plotting/frame/test_frame_subplots.py", "file_name": "test_frame_subplots.py", "fun_name": "test_bar_barwidth_position_int", "commit_message": "TST: Don't mark all plotting tests as slow (#46003)", "code": "def test_bar_barwidth_position_int(self, w):\n # GH 12979\n df = DataFrame(np.random.randn(5, 5))\n ax = df.plot.bar(stacked=True, width=w)\n ticks = ax.xaxis.get_ticklocs()\n tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))\n assert ax.get_xlim() == (-0.75, 4.75)\n # check left-edge of bars\n assert ax.patches[0].get_x() == -0.5\n assert ax.patches[-1].get_x() == 3.5\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 103, "n_words": 41, "vocab_size": 34, "complexity": 1, "nloc": 8, "token_counts": 119, "n_ast_nodes": 170, "n_identifiers": 22, "random_cut": "def test_bar_barwidth_position_int(self, w):\n # GH 12979\n df = DataFrame(np.random.randn(5, 5))\n ax = df.plot.bar(stacked=True, width=w)\n ticks = ax.xaxis.get_ticklocs()\n tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))\n assert ax.get_xlim() == (-0.75, 4.75)\n # check left-edge of bars\n assert ax.patches[0].get_x() == -0.5\n assert ax.patches[-1].get_x() == 3.5\n" }, { "id": 163593, "commit_id": "b8cce91ee7bcc86877d4679cd8a9454b5995c2c6", "repo": "pandas", "path": "pandas/tests/indexes/test_base.py", "file_name": "test_base.py", "fun_name": "test_format_missing", "commit_message": "BUG: do not replace all nulls with \"NaN\"-string in Series index (#45283)", "code": "def test_format_missing(self, vals, nulls_fixture):\n # 2845\n vals = list(vals) # Copy for each iteration\n vals.append(nulls_fixture)\n index = Index(vals)\n\n formatted = index.format()\n null_repr = \"NaN\" if isinstance(nulls_fixture, float) else str(nulls_fixture)\n expected = [str(index[0]), str(index[1]), str(index[2]), null_repr]\n\n assert formatted == expected\n assert index[3] is nulls_fixture\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 106, "n_words": 43, "vocab_size": 35, "complexity": 2, "nloc": 9, "token_counts": 89, "n_ast_nodes": 138, "n_identifiers": 15, "random_cut": "def test_format_missing(self, vals, nulls_fixture):\n # 2845\n vals = list(vals) # Copy for each iteration\n vals.append(nulls_fixture)\n " }, { "id": 243790, "commit_id": "2ae55ccbdad9c842929fb238ea1eb81d1f999024", "repo": "Pillow", "path": "src/PIL/Jpeg2KImagePlugin.py", "file_name": "Jpeg2KImagePlugin.py", "fun_name": "_open", "commit_message": "Improve exception traceback readability", "code": "def _open(self):\n sig = self.fp.read(4)\n if sig == b\"\\xff\\x4f\\xff\\x51\":\n self.codec = \"j2k\"\n self._size, self.mode = _parse_codestream(self.fp)\n else:\n sig = sig + self.fp.read(8)\n\n if sig == b\"\\x00\\x00\\x00\\x0cjP \\x0d\\x0a\\x87\\x0a\":\n self.codec = \"jp2\"\n header = _parse_jp2_header(self.fp)\n self._size, self.mode, self.custom_mimetype, dpi = header\n if dpi is not None:\n self.info[\"dpi\"] = dpi\n else:\n msg = \"not a JPEG 2000 file\"\n raise SyntaxError(msg)\n\n if self.size is None or self.mode is None:\n msg = \"unable to determine size/mode\"\n raise SyntaxError(msg)\n\n self._reduce = 0\n self.layers = 0\n\n fd = -1\n length = -1\n\n try:\n fd = self.fp.fileno()\n length = os.fstat(fd).st_size\n except Exception:\n fd = -1\n try:\n pos = self.fp.tell()\n self.fp.seek(0, io.SEEK_END)\n length = self.fp.tell()\n self.fp.seek(pos)\n except Exception:\n length = -1\n\n self.tile = [\n (\n \"jpeg2k\",\n (0, 0) + self.size,\n 0,\n (self.codec, self._reduce, self.layers, fd, length),\n )\n ]\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 611, "n_words": 129, "vocab_size": 74, "complexity": 8, "nloc": 43, "token_counts": 266, "n_ast_nodes": 441, "n_identifiers": 32, "random_cut": "def _open(self):\n sig = self.fp.read(4)\n if sig == b\"\\xff\\x4f\\xff\\x51\":\n self.codec = \"j2k\"\n self._size, self.mode = _parse_codestream(self.fp)\n else:\n sig = sig + self.fp.read(8)\n\n if sig == b\"\\x00\\x00\\x00\\x0cjP \\x0d\\x0a\\x87\\x0a\":\n self.codec = \"jp2\"\n header = _parse_jp2_header(self.fp)\n self._size, self.mode, self.custom_mimetype, dpi = header\n if dpi is not None:\n self.info[\"dpi\"] = dpi\n else:\n msg = \"not a JPEG 2000 file\"\n raise SyntaxError(msg)\n\n if self.size is None or self.mode is None:\n msg = \"unable to determine size/mode\"\n raise S" }, { "id": 84089, "commit_id": "a142fbff85302c5e3acb2e204eca2e9c75dbc74b", "repo": "zulip", "path": "zerver/tests/test_message_topics.py", "file_name": "test_message_topics.py", "fun_name": "test_get_topics_web_public_stream_web_public_request", "commit_message": "tests: Refactor away result.json() calls with helpers.\n\nSigned-off-by: Zixuan James Li ", "code": "def test_get_topics_web_public_stream_web_public_request(self) -> None:\n iago = self.example_user(\"iago\")\n stream = self.make_stream(\"web-public-stream\", is_web_public=True)\n self.subscribe(iago, stream.name)\n\n for i in range(3):\n self.send_stream_message(iago, stream.name, topic_name=\"topic\" + str(i))\n\n endpoint = f\"/json/users/me/{stream.id}/topics\"\n result = self.client_get(endpoint)\n history = self.assert_json_success(result)[\"topics\"]\n self.assertEqual(\n [topic[\"name\"] for topic in history],\n [\n \"topic2\",\n \"topic1\",\n \"topic0\",\n ],\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 194, "n_words": 43, "vocab_size": 37, "complexity": 3, "nloc": 17, "token_counts": 112, "n_ast_nodes": 191, "n_identifiers": 22, "random_cut": "def test_get_topics_web_public_stream_web_public_request(self) -> None:\n iago = self.example_user(\"iago\")\n stream = self.make_stream(\"web-public-stream\", is_web_public=True)\n self.subscribe(iago, stream.name)\n\n for i in range(3):\n self.send_stream_message(iago, " }, { "id": 8067, "commit_id": "e4fc06f986e03919d9aef3ab55c05fee5a6b9d3a", "repo": "ludwig", "path": "ludwig/datasets/kaggle.py", "file_name": "kaggle.py", "fun_name": "create_kaggle_client", "commit_message": "Config-first Datasets API (ludwig.datasets refactor) (#2479)\n\n* Adds README and stub for reading dataset configs.\r\n\r\n* Adds __init__.py for configs, moves circular import into function scope in ludwig/datasets/__init__.py\r\n\r\n* Print config files in datasets folder.\r\n\r\n* First pass at automatic archive extraction.\r\n\r\n* Implemented downloading and extract.\r\n\r\n* Refactor DatasetConfig into its own file.\r\n\r\n* Fixed bugs downloading kaggle dataset.\r\n\r\n* Makes registry store dataset instances, not classes. Also comments out import_submodules for testing.\r\n\r\n* Typo fix.\r\n\r\n* Only pass data files on to load_unprocessed_dataframe, symlink directories.\r\n\r\n* Downloading dataset files into existing directory if exists.\r\n\r\n* Refactor: make datasets fully config-first, lazy load dataset loaders.\r\n\r\n* Implemented agnews custom loader.\r\n\r\n* Implements train/validation/test split by files, and globbing support\r\n\r\n* Adds _glob_multiple\r\n\r\n* Adds adult_census_income, agnews, allstate_claims_severity.\r\n\r\n* Implements sha256 verification, adds more datasets up to creditcard_fraud.\r\n\r\n* Adds checksums, dbpedia, electricity\r\n\r\n* Fixes gzip file name returned as string not list, adds up to forest_cover dataset.\r\n\r\n* Adds datasets up to reuters_r8\r\n\r\n* Adds all datasets which don't require a custom class.\r\n\r\n* Restore dataset import behavior by implementing module __getattr__\r\n\r\n* Adds KDD datasets.\r\n\r\n* Adds ieee_fraud.\r\n\r\n* Adds imbalanced_insurance, insurance_lite.\r\n\r\n* Adds mnist.\r\n\r\n* Completes implementation of all of the built-in datasets.\r\n\r\n* Made cache_dir optional, read from environment variable if set.\r\n\r\n* Upgrades datasets tests.\r\n\r\n* Adds test for new dataset config API. Also adds scripts for dataset link checking.\r\n\r\n* Fixes loading allstate claims severity dataset.\r\n\r\n* Use @lru_cache(1), @cache not supported in python < 3.9\r\n\r\n* Deletes dataset registry, updates automl test utils\r\n\r\n* Fix imports of datasets API.\r\n\r\n* Adds more detail to sha256: docstring and basic README\r\n\r\n* Copy-paste link oops.\r\n\r\n* Fixes handling of nested archive types like .tar.bz Also adds a LUDWIG_CACHE and export to the README\r\n\r\n* Adds link for twitter bots.\r\n\r\n* Fix order of splits in README.md\r\n\r\n* typo\r\n\r\n* Adds verify as a phase in doc string.\r\n\r\n* Support .pqt, .pq extensions for parquet.\r\n\r\n* Handle nested archives with longer file extensions like .csv.zip\r\n\r\n* Handle nested .gz types properly too. Check all extensions with .endswith\r\n\r\n* Handle all archive types with .endswith\r\n\r\n* Update ludwig/datasets/loaders/split_loaders.py\r\n\r\nCo-authored-by: Joppe Geluykens \r\n\r\n* Adds explanation for export, fixes preserve_paths (should be relative to processed_dataset_dir)\r\n\r\n* Resolve preserved paths relative to raw dataset dir before move.\r\n\r\n* Catch runtime exception from extracting sub-archives.\r\n\r\nCo-authored-by: Daniel Treiman \r\nCo-authored-by: Joppe Geluykens ", "code": "def create_kaggle_client():\n # Need to import here to prevent Kaggle from authenticating on import\n from kaggle import api\n\n return api\n\n\n@contextmanager", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@contextmanager", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 28, "n_words": 21, "vocab_size": 16, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 23, "n_identifiers": 4, "random_cut": "def create_kaggle_client():\n # Need " }, { "id": 15000, "commit_id": "dbd6d1c306421a24581647dd50f82f3e11dadf4e", "repo": "ccxt", "path": "python/ccxt/async_support/huobi.py", "file_name": "huobi.py", "fun_name": "parse_trade", "commit_message": "1.66.21\n\n[ci skip]", "code": "def parse_trade(self, trade, market=None):\n #\n # spot fetchTrades(public)\n #\n # {\n # \"amount\": 0.010411000000000000,\n # \"trade-id\": 102090736910,\n # \"ts\": 1583497692182,\n # \"id\": 10500517034273194594947,\n # \"price\": 9096.050000000000000000,\n # \"direction\": \"sell\"\n # }\n #\n # spot fetchMyTrades(private)\n #\n # {\n # 'symbol': 'swftcbtc',\n # 'fee-currency': 'swftc',\n # 'filled-fees': '0',\n # 'source': 'spot-api',\n # 'id': 83789509854000,\n # 'type': 'buy-limit',\n # 'order-id': 83711103204909,\n # 'filled-points': '0.005826843283532154',\n # 'fee-deduct-currency': 'ht',\n # 'filled-amount': '45941.53',\n # 'price': '0.0000001401',\n # 'created-at': 1597933260729,\n # 'match-id': 100087455560,\n # 'role': 'maker',\n # 'trade-id': 100050305348\n # }\n #\n # linear swap isolated margin fetchOrder details\n #\n # {\n # \"trade_id\": 131560927,\n # \"trade_price\": 13059.800000000000000000,\n # \"trade_volume\": 1.000000000000000000,\n # \"trade_turnover\": 13.059800000000000000,\n # \"trade_fee\": -0.005223920000000000,\n # \"created_at\": 1603703614715,\n # \"role\": \"taker\",\n # \"fee_asset\": \"USDT\",\n # \"profit\": 0,\n # \"real_profit\": 0,\n # \"id\": \"131560927-770334322963152896-1\"\n # }\n #\n marketId = self.safe_string(trade, 'symbol')\n market = self.safe_market(marketId, market)\n symbol = market['symbol']\n timestamp = self.safe_integer_2(trade, 'ts', 'created-at')\n timestamp = self.safe_integer(trade, 'created_at', timestamp)\n order = self.safe_string(trade, 'order-id')\n side = self.safe_string(trade, 'direction')\n type = self.safe_string(trade, 'type')\n if type is not None:\n typeParts = type.split('-')\n side = typeParts[0]\n type = typeParts[1]\n takerOrMaker = self.safe_string(trade, 'role')\n priceString = self.safe_string_2(trade, 'price', 'trade_price')\n amountString = self.safe_string_2(trade, 'filled-amount', 'amount')\n amountString = self.safe_string(trade, 'trade_volume', amountString)\n costString = self.safe_string(trade, 'trade_turnover')\n fee = None\n feeCost = self.safe_string_2(trade, 'filled-fees', 'trade_fee')\n feeCurrencyId = self.safe_string_2(trade, 'fee-currency', 'fee_asset')\n feeCurrency = self.safe_currency_code(feeCurrencyId)\n filledPoints = self.safe_string(trade, 'filled-points')\n if filledPoints is not None:\n if (feeCost is None) or Precise.string_equals(feeCost, '0'):\n feeCost = filledPoints\n feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-deduct-currency'))\n if feeCost is not None:\n fee = {\n 'cost': feeCost,\n 'currency': feeCurrency,\n }\n tradeId = self.safe_string_2(trade, 'trade-id', 'tradeId')\n id = self.safe_string_2(trade, 'trade_id', 'id', tradeId)\n return self.safe_trade({\n 'id': id,\n 'info': trade,\n 'order': order,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'symbol': symbol,\n 'type': type,\n 'side': side,\n 'takerOrMaker': takerOrMaker,\n 'price': priceString,\n 'amount': amountString,\n 'cost': costString,\n 'fee': fee,\n }, market)\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1369, "n_words": 295, "vocab_size": 174, "complexity": 6, "nloc": 49, "token_counts": 369, "n_ast_nodes": 665, "n_identifiers": 33, "random_cut": "def parse_trade(self, trade, market=None):\n #\n # spot fetchTrades(public)\n #\n # {\n # \"amount\": 0.010411000000000000,\n # \"trade-id\": 102090736910,\n # \"ts\": 1583497692182,\n # \"id\": 10500517034273194594947,\n # \"price\": 9096.050000000000000000,\n # \"direction\": \"sell\"\n # }\n #\n # spot fetchMyTrades(private)\n #\n # {\n # 'symbol': 'swftcbtc',\n # 'fee-currency': 'swftc',\n # 'filled-fees': '0',\n # 'source': 'spot-api',\n # 'id': 83789509854000,\n # 'type': 'buy-limit',\n # 'order-id': 83711103204909,\n # 'filled-points': '0.005826843283532154',\n # 'fee-deduct-currency': 'ht',\n # 'filled-amount': '45941.53',\n " }, { "id": 168733, "commit_id": "786c28fe929ed65298bfc723aa1cdbe49a68ae0c", "repo": "pandas", "path": "pandas/tests/base/test_value_counts.py", "file_name": "test_value_counts.py", "fun_name": "test_value_counts_null", "commit_message": "TST: Filter/test pyarrow PerformanceWarnings (#48093)", "code": "def test_value_counts_null(null_obj, index_or_series_obj):\n orig = index_or_series_obj\n obj = orig.copy()\n\n if not allow_na_ops(obj):\n pytest.skip(\"type doesn't allow for NA operations\")\n elif len(obj) < 1:\n pytest.skip(\"Test doesn't make sense on empty data\")\n elif isinstance(orig, pd.MultiIndex):\n pytest.skip(f\"MultiIndex can't hold '{null_obj}'\")\n\n values = obj._values\n values[0:2] = null_obj\n\n klass = type(obj)\n repeated_values = np.repeat(values, range(1, len(values) + 1))\n obj = klass(repeated_values, dtype=obj.dtype)\n\n # because np.nan == np.nan is False, but None == None is True\n # np.nan would be duplicated, whereas None wouldn't\n counter = collections.Counter(obj.dropna())\n expected = Series(dict(counter.most_common()), dtype=np.int64)\n expected.index = expected.index.astype(obj.dtype)\n\n result = obj.value_counts()\n if obj.duplicated().any():\n # TODO(GH#32514):\n # Order of entries with the same count is inconsistent on CI (gh-32449)\n with tm.maybe_produces_warning(\n PerformanceWarning,\n pa_version_under7p0 and getattr(obj.dtype, \"storage\", \"\") == \"pyarrow\",\n ):\n expected = expected.sort_index()\n with tm.maybe_produces_warning(\n PerformanceWarning,\n pa_version_under7p0 and getattr(obj.dtype, \"storage\", \"\") == \"pyarrow\",\n ):\n result = result.sort_index()\n\n if not isinstance(result.dtype, np.dtype):\n # i.e IntegerDtype\n expected = expected.astype(\"Int64\")\n tm.assert_series_equal(result, expected)\n\n expected[null_obj] = 3\n\n result = obj.value_counts(dropna=False)\n if obj.duplicated().any():\n # TODO(GH#32514):\n # Order of entries with the same count is inconsistent on CI (gh-32449)\n with tm.maybe_produces_warning(\n PerformanceWarning,\n pa_version_under7p0 and getattr(obj.dtype, \"storage\", \"\") == \"pyarrow\",\n ):\n expected = expected.sort_index()\n with tm.maybe_produces_warning(\n PerformanceWarning,\n pa_version_under7p0 and getattr(obj.dtype, \"storage\", \"\") == \"pyarrow\",\n ):\n result = result.sort_index()\n tm.assert_series_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 521, "n_words": 200, "vocab_size": 103, "complexity": 11, "nloc": 46, "token_counts": 363, "n_ast_nodes": 615, "n_identifiers": 44, "random_cut": "def test_value_counts_null(null_obj, index_or_series_obj):\n orig = index_or_series_obj\n obj = orig.copy()\n\n if not allow_na_ops(obj):\n pytest.skip(\"type doesn't allow for NA operations\")\n elif len(obj) < 1:\n pytest.skip(\"Test doesn't make sense on empty data\")\n elif isinstance(orig, pd.MultiIndex):\n pytest.skip(f\"MultiIndex can't hold '{null_obj}'\")\n\n values = obj._values\n values[0:2] = null_obj\n\n klass = type(obj)\n repeated_values = np.repeat(values, range(1, len(values) + 1))\n obj = klass(repeated_values, dtype=obj.dtype)\n\n # because np.nan == np.nan is False, but None == None is True\n # np.nan would be duplicated, whereas None wouldn't\n counter = collections.Counter(obj.dropna())\n expected = Series(dict(counter.most_common()), dtype=np.int64)\n expected.index = expected.index.astype(obj.dtype)\n\n result = obj.value_counts()\n if obj.duplicated().any():\n # TODO(GH#32514):\n # Order of entries with the same count is inconsistent on CI (gh-32449)\n with tm.maybe_produces_warning(\n PerformanceWarning,\n pa_version_under7p0 and getattr(obj.dtype, \"storage\", \"\") == \"pyarrow\",\n ):\n expected = expected.sort_index()\n with tm.maybe_produces_warning(\n PerformanceWarning,\n pa_version_under7p0 and getattr(obj.dtype, \"storage\", \"\") == \"pyarrow\",\n " }, { "id": 72067, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_page_chooser.py", "file_name": "test_page_chooser.py", "fun_name": "test_type_missing", "commit_message": "Reformat with black", "code": "def test_type_missing(self):\n \n self.assertEqual(\n self.get_best_root({\"page_type\": \"tests.BusinessIndex\"}), self.tree_root\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 39, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 4, "token_counts": 25, "n_ast_nodes": 46, "n_identifiers": 5, "random_cut": "def test_type_missing(self):\n \n self.assertEqual(\n self.get_best_root({\"page_type\": \"tests.B" }, { "id": 69348, "commit_id": "d59ed24e6ca2a1ff62963c282882a2d52691b7c6", "repo": "erpnext", "path": "erpnext/manufacturing/report/work_order_consumed_materials/work_order_consumed_materials.py", "file_name": "work_order_consumed_materials.py", "fun_name": "get_returned_materials", "commit_message": "feat: provision to return non consumed components against the work order", "code": "def get_returned_materials(work_orders):\n\traw_materials_qty = defaultdict(float)\n\n\traw_materials = frappe.get_all(\n\t\t\"Stock Entry\",\n\t\tfields=[\"`tabStock Entry Detail`.`item_code`\", \"`tabStock Entry Detail`.`qty`\"],\n\t\tfilters=[\n\t\t\t[\"Stock Entry\", \"is_return\", \"=\", 1],\n\t\t\t[\"Stock Entry Detail\", \"docstatus\", \"=\", 1],\n\t\t\t[\"Stock Entry\", \"work_order\", \"in\", [d.name for d in work_orders]],\n\t\t],\n\t)\n\n\tfor d in raw_materials:\n\t\traw_materials_qty[d.item_code] += d.qty\n\n\tfor row in work_orders:\n\t\trow.returned_qty = 0.0\n\t\tif raw_materials_qty.get(row.raw_material_item_code):\n\t\t\trow.returned_qty = raw_materials_qty.get(row.raw_material_item_code)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 41, "n_words": 58, "vocab_size": 41, "complexity": 5, "nloc": 17, "token_counts": 120, "n_ast_nodes": 190, "n_identifiers": 18, "random_cut": "def get_returned_materials(work_orders):\n\traw_materials_qty = defaultdict(float)\n\n\traw_materials = frappe.get_all(\n\t\t\"Stock Entry\",\n\t\tfields=[\"`tabStock Entry Detail`.`item_code`\", \"`tabStock Entry Detail`.`qty`\"],\n\t\tfilters=[\n\t\t\t[\"Stock Entry\", \"is_return\", \"=\", 1],\n\t\t\t[\"Stock Entry Detail\", \"docstatus\", \"=\", 1],\n\t\t\t[\"Stock Entry\", \"work_order\", \"in\", [d.name for d in work_orders]],\n\t\t],\n\t)\n\n\tfor d in raw_materials:\n\t\traw_materials_qty[d.item_code] += d.qty\n\n\tfor row in work_orders:\n\t\trow.returned_qty = 0.0\n\t\tif raw_materials_qty.get(row.raw_material_item_code):\n\t\t\trow.returned_qty = raw_materials_qty.get(row.raw_material_item_code)\n\n" }, { "id": 6229, "commit_id": "b77b6ca0afa3439103ab164d80be61652bee21dc", "repo": "ludwig", "path": "tests/integration_tests/utils.py", "file_name": "utils.py", "fun_name": "audio_feature", "commit_message": "Removes/renames some references to legacy config keys (#1775)\n\n* regularizer settings no longer supported for modules.\r\n\r\n* s/fc_size/output_size\r\n\r\n* Removes regularize parameter.\r\n\r\nCo-authored-by: Daniel Treiman ", "code": "def audio_feature(folder, **kwargs):\n feature = {\n \"name\": \"audio_\" + random_string(),\n \"type\": \"audio\",\n \"preprocessing\": {\n \"audio_feature\": {\n \"type\": \"fbank\",\n \"window_length_in_s\": 0.04,\n \"window_shift_in_s\": 0.02,\n \"num_filter_bands\": 80,\n },\n \"audio_file_length_limit_in_s\": 3.0,\n },\n \"encoder\": \"stacked_cnn\",\n \"should_embed\": False,\n \"conv_layers\": [\n {\"filter_size\": 400, \"pool_size\": 16, \"num_filters\": 32},\n {\"filter_size\": 40, \"pool_size\": 10, \"num_filters\": 64},\n ],\n \"output_size\": 16,\n \"destination_folder\": folder,\n }\n feature.update(kwargs)\n feature[COLUMN] = feature[NAME]\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n return feature\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 263, "n_words": 61, "vocab_size": 50, "complexity": 1, "nloc": 26, "token_counts": 135, "n_ast_nodes": 227, "n_identifiers": 10, "random_cut": "def audio_feature(folder, **kwargs):\n feature = {\n \"name\": \"audio_\" + random_string(),\n \"type\": \"audio\",\n \"preprocessing\": {\n \"audio_feature\": {\n \"type\": \"fbank\",\n \"window_length_in_s\": 0.04,\n \"window_shift_in_s\": 0.02,\n \"num_filter_bands\": 80,\n },\n \"audio_file_length_limit_in_s\": 3.0,\n },\n \"encoder\": \"s" }, { "id": 9518, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/training/misc.py", "file_name": "misc.py", "fun_name": "parse_config_for_previous_run", "commit_message": "initialize ostec", "code": "def parse_config_for_previous_run(run_dir):\n with open(os.path.join(run_dir, 'submit_config.pkl'), 'rb') as f:\n data = pickle.load(f)\n data = data.get('run_func_kwargs', {})\n return dict(train=data, dataset=data.get('dataset_args', {}))\n\n#----------------------------------------------------------------------------\n# Size and contents of the image snapshot grids that are exported\n# periodically during training.\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 48, "n_words": 36, "vocab_size": 33, "complexity": 1, "nloc": 5, "token_counts": 62, "n_ast_nodes": 109, "n_identifiers": 14, "random_cut": "def parse_config_for_previous_run(run_dir):\n with open(os.path.join(run_dir, 'submit_config.pkl'), " }, { "id": 159133, "commit_id": "36eb9c9a5fcca2160e54a6cde5076c93db5bd70b", "repo": "rasa", "path": "tests/core/training/test_story_conflict.py", "file_name": "test_story_conflict.py", "fun_name": "test_get_previous_event", "commit_message": "Update dependencies in 3.0 to align with rasa-sdk (#10667)\n\n* align dependencies\r\n* use black 21.7b0\r\n* apply black and docstring reformatting\r\n* add changelog", "code": "async def test_get_previous_event():\n assert _get_previous_event(\n {PREVIOUS_ACTION: {\"action_name\": \"utter_greet\"}, USER: {\"intent\": \"greet\"}}\n ) == (\"action\", \"utter_greet\")\n assert _get_previous_event(\n {PREVIOUS_ACTION: {\"action_text\": \"this is a test\"}, USER: {\"intent\": \"greet\"}}\n ) == (\"bot utterance\", \"this is a test\")\n assert (\n _get_previous_event(\n {\n PREVIOUS_ACTION: {\"action_name\": ACTION_LISTEN_NAME},\n USER: {\"intent\": \"greet\"},\n }\n )\n == (\"intent\", \"greet\")\n )\n\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 154, "n_words": 50, "vocab_size": 31, "complexity": 1, "nloc": 16, "token_counts": 88, "n_ast_nodes": 164, "n_identifiers": 5, "random_cut": "async def test_get_previous_event():\n assert _get_previous_event(\n {PREVIOUS_ACTION: {\"action_name\": \"utter_greet\"}, USER: {\"intent\": \"greet\"}}\n ) == (\"action\", \"utter_greet\")\n assert _get_previous_event(\n {PREVIOUS_ACTION: {\"action_text\": \"this is a test\"}, USER: {\"intent\": \"greet\"}}\n ) == (\"bot utterance\", \"this is a test\"" }, { "id": 8296, "commit_id": "c99cab3a674e31885e5608a4aed73a64b1901c55", "repo": "ludwig", "path": "tests/ludwig/backend/test_ray.py", "file_name": "test_ray.py", "fun_name": "test_get_trainer_kwargs", "commit_message": "Allow explicitly plumbing through nics (#2605)", "code": "def test_get_trainer_kwargs(trainer_config, cluster_resources, num_nodes, expected_kwargs):\n with patch(\"ludwig.backend.ray.ray.cluster_resources\", return_value=cluster_resources):\n with patch(\"ludwig.backend.ray._num_nodes\", return_value=num_nodes):\n trainer_config_copy = copy.deepcopy(trainer_config)\n actual_kwargs = get_trainer_kwargs(**trainer_config_copy)\n\n # Function should not modify the original input\n assert trainer_config_copy == trainer_config\n\n actual_backend = actual_kwargs.pop(\"backend\")\n expected_backend = expected_kwargs.pop(\"backend\")\n\n assert type(actual_backend) == type(expected_backend)\n assert actual_backend.nics == expected_backend.nics\n assert actual_kwargs == expected_kwargs\n\n\n@pytest.mark.parametrize(\n \"trainer_kwargs,current_env_value,expected_env_value\",\n [\n ({\"use_gpu\": False, \"num_workers\": 2}, None, \"1\"),\n ({\"use_gpu\": False, \"num_workers\": 1}, None, None),\n ({\"use_gpu\": True, \"num_workers\": 2}, None, None),\n ({\"use_gpu\": True, \"num_workers\": 2}, \"1\", \"1\"),\n ({\"use_gpu\": True, \"num_workers\": 2}, \"\", \"\"),\n ],\n)", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"trainer_kwargs,current_env_value,expected_env_value\",\n [\n ({\"use_gpu\": False, \"num_workers\": 2}, None, \"1\"),\n ({\"use_gpu\": False, \"num_workers\": 1}, None, None),\n ({\"use_gpu\": True, \"num_workers\": 2}, None, None),\n ({\"use_gpu\": True, \"num_workers\": 2}, \"1\", \"1\"),\n ({\"use_gpu\": True, \"num_workers\": 2}, \"\", \"\"),\n ],\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 232, "n_words": 82, "vocab_size": 52, "complexity": 1, "nloc": 11, "token_counts": 88, "n_ast_nodes": 301, "n_identifiers": 20, "random_cut": "def test_get_trainer_kwargs(trainer_config, cluster_resources, num_nodes, expected_kwargs):\n with patch(\"ludwig.backend.ray.ray.cluster_resources\", return_value=cluster_resources):\n with patch(\"ludwig.backend.ray._num_nodes\", return_value=num_nodes):\n trainer_config_copy = copy.deepcopy(trainer_config)\n actual_kwargs = get_trainer_kwargs(**trainer_config_copy)\n\n # Function should not modify the original input\n assert trainer_config_copy == trainer_config\n\n actual_backend = actual_kwargs.pop(\"backend\")\n expected_backend = expected_kwargs.pop(\"backend\")\n\n assert type(actual_backend) == type(expected_backend)\n assert actual_backend.nics == expected_backend.nics\n assert " }, { "id": 257547, "commit_id": "4d2a06989db0b8bff5570624b13c734dfc1e3d68", "repo": "haystack", "path": "test/pipelines/test_pipeline.py", "file_name": "test_pipeline.py", "fun_name": "test_validate_pipeline_config_component_with_json_input_invalid_value", "commit_message": "Fix YAML validation for `ElasticsearchDocumentStore.custom_query` (#2789)\n\n* Add exception for in the validation code\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Add tests\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_validate_pipeline_config_component_with_json_input_invalid_value():\n with pytest.raises(PipelineConfigError, match=\"does not contain valid JSON\"):\n validate_config_strings(\n {\n \"components\": [\n {\"name\": \"test\", \"type\": \"test\", \"params\": {\"custom_query\": \"this is surely not JSON! :)\"}}\n ]\n }\n )\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 115, "n_words": 28, "vocab_size": 26, "complexity": 1, "nloc": 9, "token_counts": 42, "n_ast_nodes": 84, "n_identifiers": 6, "random_cut": "def test_validate_pipeline_config_component_with_json_input_invalid_value():\n with pytest.raises(PipelineConfigError, match=\"does not contain valid JSON\"):\n validate_config_strings(\n {\n \"components\": [\n {\"name\": \"test\", \"type\": \"test\", \"params\": {\"custom_query\": \"this is surely not JSON! :)\"}}\n ]\n }\n " }, { "id": 101907, "commit_id": "dab823a3eb7a5257cb1e0818ee10ed234d3de97f", "repo": "faceswap", "path": "lib/gui/display_command.py", "file_name": "display_command.py", "fun_name": "display_item_set", "commit_message": "Typing - lib.gui.display_command", "code": "def display_item_set(self) -> None:\n \n logger.trace(\"Loading latest preview\") # type:ignore\n size = 256 if self.command == \"convert\" else 128\n get_images().load_latest_preview(thumbnail_size=int(size * get_config().scaling_factor),\n frame_dims=(self.winfo_width(), self.winfo_height()))\n self.display_item = get_images().previewoutput\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 102, "n_words": 26, "vocab_size": 25, "complexity": 2, "nloc": 7, "token_counts": 69, "n_ast_nodes": 118, "n_identifiers": 17, "random_cut": "def display_item_set(self) -> None:\n \n logger.trace(\"Loadin" }, { "id": 135515, "commit_id": "28a295968b445635efd1105b900cc624312fc49e", "repo": "ray", "path": "python/ray/data/tests/conftest.py", "file_name": "conftest.py", "fun_name": "ds_pandas_list_multi_column_format", "commit_message": "[AIR] Add `batch_size` arg for `BatchMapper`. (#29193)\n\nThe default batch_size of 4096 at the Datasets level doesn't suffice for all use cases: it can be too large for wide tables and large images, leading to DRAM/GRAM OOms; it can be too small for narrow tables, leading to unnecessary batch slicing overhead and suboptimal vectorized operations in their UDFs. We should allow users to configure the batch_size at the AIR level.\r\n\r\nCloses #29168\r\n\r\nSigned-off-by: Amog Kamsetty \r\nSigned-off-by: Amog Kamsetty \r\nCo-authored-by: Amog Kamsetty \r\nCo-authored-by: Amog Kamsetty ", "code": "def ds_pandas_list_multi_column_format():\n in_df = pd.DataFrame({\"column_1\": [1], \"column_2\": [1]})\n yield ray.data.from_pandas([in_df] * 4)\n\n\n# ===== Arrow dataset formats =====\n@pytest.fixture(scope=\"function\")", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.fixture(scope=\"function\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 22, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 3, "token_counts": 37, "n_ast_nodes": 81, "n_identifiers": 10, "random_cut": "def ds_pandas_list_multi_column_format():\n in_df = pd.DataFrame" }, { "id": 244474, "commit_id": "924c381a78eb70cede198e042ef34e038e05c15a", "repo": "mmdetection", "path": "mmdet/models/detectors/base.py", "file_name": "base.py", "fun_name": "preprocss_aug_testing_data", "commit_message": "Modify RetinaNet model interface", "code": "def preprocss_aug_testing_data(self, data):\n \n\n num_augs = len(data[0]['img'])\n batch_size = len(data)\n aug_batch_imgs = []\n aug_batch_data_samples = []\n\n # adjust `images` and `data_samples` to a list of list\n # outer list is test-time augmentation and inter list\n # is batch dimension\n for aug_index in range(num_augs):\n batch_imgs = []\n batch_data_samples = []\n for batch_index in range(batch_size):\n single_img = data[batch_index]['img'][aug_index]\n\n # to gpu and normalize\n single_img = single_img.to(self.device)\n if self.preprocess_cfg is None:\n # YOLOX does not need preprocess_cfg\n single_img = single_img.float()\n else:\n if self.to_rgb and single_img[0].size(0) == 3:\n single_img = single_img[[2, 1, 0], ...]\n single_img = (single_img -\n self.pixel_mean) / self.pixel_std\n\n batch_imgs.append(single_img)\n batch_data_samples.append(\n data[batch_index]['data_sample'][aug_index])\n aug_batch_imgs.append(\n stack_batch(batch_imgs, self.pad_size_divisor, self.pad_value))\n aug_batch_data_samples.append(batch_data_samples)\n\n return aug_batch_imgs, aug_batch_data_samples\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 503, "n_words": 107, "vocab_size": 73, "complexity": 6, "nloc": 25, "token_counts": 188, "n_ast_nodes": 303, "n_identifiers": 26, "random_cut": "def preprocss_aug_testing_data(self, data):\n \n\n num_augs = len(data[0]['img'])\n batch_size = len(data)\n aug_batch_imgs = []\n aug_batch_data_samples = []\n\n # adjust `images` and `data_samples` to a list of list\n # outer list is test-time augmentation and inter list\n # is batch dimension\n for aug_index in range(num_augs):\n batch_imgs = []\n batch_data_samples = []\n for batch_index in range(batch_size):\n single_img = data[batch_index]['img'][aug_index]\n\n # to gpu and normalize\n single_img = single_img.to(self.device)\n if self.preprocess_cfg is None:\n # YOLOX does not need preprocess_cfg\n single_img = single_img.float()\n else:\n if self.to_rgb and single_img[0].size(0) == 3:\n single_img = single_img[[2, 1, 0], .." }, { "id": 196836, "commit_id": "117f9554466e08aa4178137ad65fae1f2d49b340", "repo": "sympy", "path": "sympy/polys/numberfields/minpoly.py", "file_name": "minpoly.py", "fun_name": "_choose_factor", "commit_message": "Moved definition of illegal", "code": "def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):\n \n\n if isinstance(factors[0], tuple):\n factors = [f[0] for f in factors]\n if len(factors) == 1:\n return factors[0]\n\n prec1 = 10\n points = {}\n symbols = dom.symbols if hasattr(dom, 'symbols') else []\n while prec1 <= prec:\n # when dealing with non-Rational numbers we usually evaluate\n # with `subs` argument but we only need a ballpark evaluation\n xv = {x:v if not v.is_number else v.n(prec1)}\n fe = [f.as_expr().xreplace(xv) for f in factors]\n\n # assign integers [0, n) to symbols (if any)\n for n in subsets(range(bound), k=len(symbols), repetition=True):\n for s, i in zip(symbols, n):\n points[s] = i\n\n # evaluate the expression at these points\n candidates = [(abs(f.subs(points).n(prec1)), i)\n for i,f in enumerate(fe)]\n\n # if we get invalid numbers (e.g. from division by zero)\n # we try again\n if any(i in _illegal for i, _ in candidates):\n continue\n\n # find the smallest two -- if they differ significantly\n # then we assume we have found the factor that becomes\n # 0 when v is substituted into it\n can = sorted(candidates)\n (a, ix), (b, _) = can[:2]\n if b > a * 10**6: # XXX what to use?\n return factors[ix]\n\n prec1 *= 2\n\n raise NotImplementedError(\"multiple candidates for the minimal polynomial of %s\" % v)\n\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 485, "n_words": 205, "vocab_size": 144, "complexity": 14, "nloc": 24, "token_counts": 256, "n_ast_nodes": 397, "n_identifiers": 42, "random_cut": "def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):\n \n\n if isinstance(factors[0], tuple):\n factors = [f[0] for f in factors]\n if len(factors) == 1:\n return factors[0]\n\n prec1 = 10\n points = {}\n symbols = dom.symbols if hasattr(dom, 'symbols') else []\n while prec1 <= prec:\n # when dealing with non-Rational numbers we usually evaluate\n # with `subs` argument but we only need a ballpark evaluation\n xv = {x:v if not v.is_number else v.n(prec1)}\n fe = [f.as_expr().xreplace(xv) for f in factors]\n\n # assign integers [0, n) to symbols (if any)\n for n in subsets(range(bound), k=len(symbols), repetition=True):\n for s, i in zip(symbols, n):\n points[s] = i\n\n # evaluate the expression at these points\n candidates = [" }, { "id": 128244, "commit_id": "65d0c0aa48be8f9f7faae857d3ab71444997755a", "repo": "ray", "path": "python/ray/serve/controller.py", "file_name": "controller.py", "fun_name": "get_root_url", "commit_message": "[Serve] add alpha gRPC support (#28175)", "code": "def get_root_url(self):\n \n if self.http_state is None:\n return None\n http_config = self.get_http_config()\n if http_config.root_url == \"\":\n if SERVE_ROOT_URL_ENV_KEY in os.environ:\n return os.environ[SERVE_ROOT_URL_ENV_KEY]\n else:\n return (\n f\"http://{http_config.host}:{http_config.port}\"\n f\"{http_config.root_path}\"\n )\n return http_config.root_url\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 180, "n_words": 29, "vocab_size": 23, "complexity": 4, "nloc": 13, "token_counts": 56, "n_ast_nodes": 116, "n_identifiers": 12, "random_cut": "def get_root_url(self):\n \n if self.http_state is None:\n return None\n http_config" }, { "id": 331845, "commit_id": "372ad5fa0dbeb74dcec81db06e9ff69b3d5a2eb6", "repo": "pytorch-image-models", "path": "timm/models/convnext.py", "file_name": "convnext.py", "fun_name": "set_grad_checkpointing", "commit_message": "Significant model refactor and additions:\n* All models updated with revised foward_features / forward_head interface\n* Vision transformer and MLP based models consistently output sequence from forward_features (pooling or token selection considered part of 'head')\n* WIP param grouping interface to allow consistent grouping of parameters for layer-wise decay across all model types\n* Add gradient checkpointing support to a significant % of models, especially popular architectures\n* Formatting and interface consistency improvements across models\n* layer-wise LR decay impl part of optimizer factory w/ scale support in scheduler\n* Poolformer and Volo architectures added", "code": "def set_grad_checkpointing(self, enable=True):\n for s in self.stages:\n s.grad_checkpointing = enable\n", "url": "https://github.com/huggingface/pytorch-image-models.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 3, "token_counts": 21, "n_ast_nodes": 32, "n_identifiers": 6, "random_cut": "def set_grad_checkpointing(self, enable=True):\n for s in s" }, { "id": 241718, "commit_id": "6107ce8e0d2feaed0263c0a60fc6c031603fd9ea", "repo": "lightning", "path": "pytorch_lightning/loops/fit_loop.py", "file_name": "fit_loop.py", "fun_name": "advance", "commit_message": "Add DETAIL logs for batch use cases (#11008)", "code": "def advance(self) -> None: # type: ignore[override]\n \n log.detail(f\"{self.__class__.__name__}: advancing loop\")\n assert self.trainer.train_dataloader is not None\n dataloader = self.trainer.strategy.process_dataloader(self.trainer.train_dataloader)\n data_fetcher = self.trainer._data_connector.get_profiled_dataloader(dataloader)\n\n with self.trainer.profiler.profile(\"run_training_epoch\"):\n self._outputs = self.epoch_loop.run(data_fetcher)\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 80, "n_words": 26, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 76, "n_ast_nodes": 138, "n_identifiers": 19, "random_cut": "def advance(self) -> None: # type: ignore[override]\n \n log.detail(f\"{self.__class__.__name__}: advancing loop\")\n assert self.trainer.train_dataloader is not None\n dataloader = self.trainer.strategy.process_dataloader(self.trainer.train_dataloader)\n data_fetcher = self.trainer._data_connector.get_profiled_dataloader(dataloader)\n\n with self.train" }, { "id": 218368, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "cleandoc", "commit_message": "add python 3.10.4 for windows", "code": "def cleandoc(doc):\n \n try:\n lines = doc.expandtabs().split('\\n')\n except UnicodeError:\n return None\n else:\n # Find minimum indentation of any non-blank lines after first line.\n margin = sys.maxsize\n for line in lines[1:]:\n content = len(line.lstrip())\n if content:\n indent = len(line) - content\n margin = min(margin, indent)\n # Remove indentation.\n if lines:\n lines[0] = lines[0].lstrip()\n if margin < sys.maxsize:\n for i in range(1, len(lines)): lines[i] = lines[i][margin:]\n # Remove any trailing or leading blank lines.\n while lines and not lines[-1]:\n lines.pop()\n while lines and not lines[0]:\n lines.pop(0)\n return '\\n'.join(lines)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 277, "n_words": 85, "vocab_size": 61, "complexity": 11, "nloc": 21, "token_counts": 156, "n_ast_nodes": 260, "n_identifiers": 19, "random_cut": "def cleandoc(doc):\n \n try:\n lines = doc.expandtabs().split('\\n')\n except UnicodeError:\n return None\n else:\n # Find minimum indentation of any non-blank lines after first line.\n margin = sys.maxsize\n for line in lines[1:]:\n content = len(line.lstrip())\n if content:\n indent = len(line) - content\n margin = min(margin, indent)\n # Remove indentation.\n if lines:\n lines[0] = lines[0].lstrip()\n if margin < sys.maxsize:\n for i in range(1, len(lines)): lines[i] = lines[i][margin:]\n # Remove any trailing or leading blank lines.\n while lines and not lines[-1]:\n lines.pop()\n while lines and not lines[0]:\n lines.pop(0)" }, { "id": 179381, "commit_id": "cc0cff893f9d7d472788adc2510c123967b384fe", "repo": "gradio", "path": "test/test_utils.py", "file_name": "test_utils.py", "fun_name": "test_should_fail_with_distribution_not_found", "commit_message": "Format The Codebase\n- black formatting\n- isort formatting", "code": "def test_should_fail_with_distribution_not_found(self, mock_require):\n\n mock_require.side_effect = pkg_resources.DistributionNotFound()\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n version_check()\n self.assertEqual(\n str(w[-1].message),\n \"gradio is not setup or installed properly. Unable to get version info.\",\n )\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 114, "n_words": 27, "vocab_size": 27, "complexity": 1, "nloc": 9, "token_counts": 55, "n_ast_nodes": 95, "n_identifiers": 15, "random_cut": "def test_should_fail_with_distribution_not_found(self, mock_require):\n\n mock_require.side_effect = pkg_resources.DistributionNotFound()\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n version_check()\n self.assertEqual(\n str(w[-1].message),\n \"gradio is not setup or installed properly. Unable to get" }, { "id": 180849, "commit_id": "bf1510165ddd8c0d5b29adf67dfed967995e8a5b", "repo": "gradio", "path": "test/test_components.py", "file_name": "test_components.py", "fun_name": "test_in_blocks", "commit_message": "Support for iterative outputs (#2189)\n\n* Support for iterative outputs (#2162) (#2188)\r\n\r\n* added generator demo\r\n\r\n* fixed demo structure\r\n\r\n* fixes\r\n\r\n* fix failing tests due to refactor\r\n\r\n* test components\r\n\r\n* adding generators\r\n\r\n* fixes\r\n\r\n* iterative\r\n\r\n* formatting\r\n\r\n* add all\r\n\r\n* added demo\r\n\r\n* demo\r\n\r\n* formatting\r\n\r\n* fixed frontend\r\n\r\n* 3.2.1b release\r\n\r\n* removed test queue\r\n\r\n* iterative\r\n\r\n* formatting\r\n\r\n* formatting\r\n\r\n* Support for iterative outputs (#2149)\r\n\r\n* added generator demo\r\n\r\n* fixed demo structure\r\n\r\n* fixes\r\n\r\n* fix failing tests due to refactor\r\n\r\n* test components\r\n\r\n* adding generators\r\n\r\n* fixes\r\n\r\n* iterative\r\n\r\n* formatting\r\n\r\n* add all\r\n\r\n* added demo\r\n\r\n* demo\r\n\r\n* formatting\r\n\r\n* fixed frontend\r\n\r\n* 3.2.1b release\r\n\r\n* iterative\r\n\r\n* formatting\r\n\r\n* formatting\r\n\r\n* reverted queue everywhere\r\n\r\n* added queue to demos\r\n\r\n* added fake diffusion with gif\r\n\r\n* add to demos\r\n\r\n* more complex counter\r\n\r\n* fixes\r\n\r\n* image gif\r\n\r\n* fixes\r\n\r\n* version\r\n\r\n* merged\r\n\r\n* added support for state\r\n\r\n* formatting\r\n\r\n* generating animation\r\n\r\n* fix\r\n\r\n* tests, iterator\r\n\r\n* tests\r\n\r\n* formatting\r\n\r\n* tests for queuing\r\n\r\n* version\r\n\r\n* generating orange border animation\r\n\r\n* testings\r\n\r\n* added to documentation\r\n\r\nCo-authored-by: Ali Abid ", "code": "async def test_in_blocks(self):\n with gr.Blocks() as demo:\n score = gr.State()\n btn = gr.Button()\n btn.click(lambda x: x + 1, score, score)\n\n result = await demo.call_function(0, [0])\n assert result[\"prediction\"] == 1\n result = await demo.call_function(0, [result[\"prediction\"]])\n assert result[\"prediction\"] == 2\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 105, "n_words": 38, "vocab_size": 29, "complexity": 1, "nloc": 9, "token_counts": 86, "n_ast_nodes": 144, "n_identifiers": 13, "random_cut": "async def test_in_blocks(self):\n with gr.Blocks() as demo:\n score = gr.State()\n " }, { "id": 16445, "commit_id": "599367bddf0348d9491990623efcf32c1158d48f", "repo": "ccxt", "path": "python/ccxt/stex.py", "file_name": "stex.py", "fun_name": "fetch_markets", "commit_message": "1.70.39\n\n[ci skip]", "code": "def fetch_markets(self, params={}):\n request = {\n 'code': 'ALL',\n }\n response = self.publicGetCurrencyPairsListCode(self.extend(request, params))\n #\n # {\n # \"success\":true,\n # \"data\":[\n # {\n # \"id\":935,\n # \"currency_id\":662,\n # \"currency_code\":\"ABET\",\n # \"currency_name\":\"Altbet\",\n # \"market_currency_id\":1,\n # \"market_code\":\"BTC\",\n # \"market_name\":\"Bitcoin\",\n # \"min_order_amount\":\"0.00000010\",\n # \"min_buy_price\":\"0.00000001\",\n # \"min_sell_price\":\"0.00000001\",\n # \"buy_fee_percent\":\"0.20000000\",\n # \"sell_fee_percent\":\"0.20000000\",\n # \"active\":true,\n # \"delisted\":false,\n # \"pair_message\":\"\",\n # \"currency_precision\":8,\n # \"market_precision\":8,\n # \"symbol\":\"ABET_BTC\",\n # \"group_name\":\"BTC\",\n # \"group_id\":1\n # }\n # ]\n # }\n #\n result = []\n markets = self.safe_value(response, 'data', [])\n for i in range(0, len(markets)):\n market = markets[i]\n id = self.safe_string(market, 'id')\n numericId = self.safe_integer(market, 'id')\n baseId = self.safe_string(market, 'currency_id')\n quoteId = self.safe_string(market, 'market_currency_id')\n baseNumericId = self.safe_integer(market, 'currency_id')\n quoteNumericId = self.safe_integer(market, 'market_currency_id')\n base = self.safe_currency_code(self.safe_string(market, 'currency_code'))\n quote = self.safe_currency_code(self.safe_string(market, 'market_code'))\n minBuyPrice = self.safe_string(market, 'min_buy_price')\n minSellPrice = self.safe_string(market, 'min_sell_price')\n minPrice = Precise.string_max(minBuyPrice, minSellPrice)\n buyFee = Precise.string_div(self.safe_string(market, 'buy_fee_percent'), '100')\n sellFee = Precise.string_div(self.safe_string(market, 'sell_fee_percent'), '100')\n fee = Precise.string_max(buyFee, sellFee)\n result.append({\n 'id': id,\n 'numericId': numericId,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'baseNumericId': baseNumericId,\n 'quoteNumericId': quoteNumericId,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': self.safe_value(market, 'active'),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'taker': fee,\n 'maker': fee,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'price': self.safe_integer(market, 'market_precision'),\n 'amount': self.safe_integer(market, 'currency_precision'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.safe_number(market, 'min_order_amount'),\n 'max': None,\n },\n 'price': {\n 'min': minPrice,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1945, "n_words": 251, "vocab_size": 145, "complexity": 2, "nloc": 77, "token_counts": 460, "n_ast_nodes": 814, "n_identifiers": 36, "random_cut": "def fetch_markets(self, params={}):\n request = {\n 'code': 'ALL',\n }\n response = self.publicGetCurrencyPairsListCode(self.extend(request, params))\n #\n # {\n # \"success\":true,\n # \"data\":[\n # {\n # \"id\":935,\n # \"currency_id\":662,\n # \"currency_code\":\"ABET\",\n # \"currency_name\":\"Altbet\",\n # \"market_currency_id\":1,\n # \"market_code\":\"BTC\",\n # \"market_name\":\"Bitcoin\",\n # \"min_order_amount\":\"0.00000010\",\n # \"min_buy_price\":\"0.00000001\",\n # \"min_sell_price\":\"0.00000001\",\n # \"buy_fee_percent\":\"0.20000000\",\n # \"sell_fee_percent\":\"0.20000000\",\n # \"active\":true,\n # \"delisted\":false,\n # \"pair_message\":\"\",\n # \"currency_precision\":8,\n # \"market_precision\":8,\n # \"symbol\":\"ABET_BTC\",\n # \"group_name\":\"BTC\",\n # \"group_id\":1\n # }\n # ]\n # }\n #\n result = []\n markets = self.safe_value(response, 'data', [])\n for i in range(0, len(markets)):\n market = markets[i]\n id = self.safe_string(market, 'id')\n numericId = self.safe_integer(market, 'id')\n baseId = self.safe_string(market, 'currency_id')\n quoteId = self.safe_string(market, 'market_currency_id')\n baseNumericId = self.safe_integer(market, 'currency_id')\n quoteNumericId = self.safe_integer(market, 'market_currency_id')\n base = self.safe_currency_code(self.safe_string(market, 'currency_code'))\n quote = self.safe_currency_code(self.safe_string(market, 'market_code'))\n minBuyPrice = self.safe_string(market, 'min_buy_price')\n minSellPrice = self.safe_string(market, 'min_sell_price')\n minPrice = Precise.string_max(minBuyPrice, minSellPrice)\n buyFee = Precise.string_div(self.safe_string(market, 'buy_fee_percent'), '100')\n sellFee = Precise.string_div(self.safe_string(market, 'sell_fee_percent'), '100')\n fee = Precise.string_max(buyFee, sellFee)\n result.append({\n 'id': id,\n 'numericId': numericId,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'baseNumericId': baseNumericId,\n 'quoteNumericId': quoteNumericId,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': self.safe_value(market, 'active'),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'taker': fee,\n 'maker': fee,\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'price': self.safe_integer(market, 'market_precision'),\n 'amount': self.safe_integer(market, 'currency_precision'),\n },\n 'limits': {\n 'leverage': {\n " }, { "id": 168510, "commit_id": "252ae0555abf488522f947107dcdee684be6ac8a", "repo": "pandas", "path": "pandas/core/indexes/interval.py", "file_name": "interval.py", "fun_name": "__reduce__", "commit_message": "Revert Interval/IntervalIndex/interval_range.inclusive deprecation (#48116)\n\n* Revert \"Cln tests interval wrt inclusive (#47775)\"\r\n\r\nThis reverts commit 2d6e0b251955d3a2c0c88f7e6ddb57b335ed09b7.\r\n\r\n* Revert \"CLN: Rename private variables to inclusive (#47655)\"\r\n\r\nThis reverts commit 102b3ca2119df822e2b0f346fa936d0fe9f17501.\r\n\r\n* Revert \"TYP: Improve typing interval inclusive (#47646)\"\r\n\r\nThis reverts commit 55064763e8ba55f6ff5370a8dd083767a189d7a4.\r\n\r\n* Revert \"DEPR: Deprecate set_closed and add set_incluive (#47636)\"\r\n\r\nThis reverts commit bd4ff395cbbf4cbde1fc8f1f746cae064a401638.\r\n\r\n* Revert \"DEPR: Remove deprecation from private class IntervalTree (#47637)\"\r\n\r\nThis reverts commit f6658ef9fdef5972214fdc338e2c6b5ee308dbf4.\r\n\r\n* Revert \"Revert inclusive default change of IntervalDtype (#47367)\"\r\n\r\nThis reverts commit d9dd1289e07d86928d144e53beb3d5b8ab3c2215.\r\n\r\n* Revert \"ENH: consistency of input args for boundaries - Interval (#46522)\"\r\n\r\nThis reverts commit 7e23a37e1c5bda81234801a6584563e2880769eb.\r\n\r\n* Revert \"ENH: consistency of input args for boundaries - pd.interval_range (#46355)\"\r\n\r\nThis reverts commit 073b3535d7a5171102e5915c38b57c21d13795ae.\r\n\r\n* Fix ArrowIntervalType manually\r\n\r\n* Remove unused import\r\n\r\n* Fix doctest and leftover usage\r\n\r\n* Fix remaining tests\r\n\r\n* Fix wording in doctoring\r\n\r\nCo-authored-by: Patrick Hoefler <61934744+phofl@users.noreply.github.com>", "code": "def __reduce__(self):\n d = {\n \"left\": self.left,\n \"right\": self.right,\n \"closed\": self.closed,\n \"name\": self.name,\n }\n return _new_IntervalIndex, (type(self), d), None\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 83, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 46, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def __reduce__(self):\n d = {\n \"left\": self.left,\n \"right\": self" }, { "id": 7439, "commit_id": "5069f19bc289592c3d57969531e56271cb0bc538", "repo": "ludwig", "path": "ludwig/serve.py", "file_name": "serve.py", "fun_name": "server", "commit_message": "Serve json numpy encoding (#2316)", "code": "def server(model, allowed_origins=None):\n middleware = [Middleware(CORSMiddleware, allow_origins=allowed_origins)] if allowed_origins else None\n app = FastAPI(middleware=middleware)\n\n input_features = {f[COLUMN] for f in model.config[\"input_features\"]}\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 29, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 11, "token_counts": 81, "n_ast_nodes": 76, "n_identifiers": 13, "random_cut": "def server(model, allowed_origins=None):\n middleware = [Middleware(CORSMiddleware, allow_o" }, { "id": 71025, "commit_id": "de3fcba9e95818e9634ab7de6bfcb1f4221f2775", "repo": "wagtail", "path": "wagtail/search/tests/test_backends.py", "file_name": "test_backends.py", "fun_name": "assertUnsortedListEqual", "commit_message": "Fix warnings from flake8-comprehensions.", "code": "def assertUnsortedListEqual(self, a, b):\n \n self.assertListEqual(sorted(a), sorted(b))\n\n # SEARCH TESTS\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 26, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def assertUnsortedListEqual(self, a, b):\n \n " }, { "id": 71301, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/api/test_images.py", "file_name": "test_images.py", "fun_name": "test_thumbnail", "commit_message": "Reformat with black", "code": "def test_thumbnail(self):\n # Add a new image with source file\n image = get_image_model().objects.create(\n title=\"Test image\",\n file=get_test_image_file(),\n )\n\n response = self.get_response(image.id)\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n self.assertIn(\"thumbnail\", content)\n self.assertEqual(content[\"thumbnail\"][\"width\"], 165)\n self.assertEqual(content[\"thumbnail\"][\"height\"], 123)\n self.assertTrue(content[\"thumbnail\"][\"url\"].startswith(\"/media/images/test\"))\n\n # Check that source_image_error didn't appear\n self.assertNotIn(\"source_image_error\", content[\"meta\"])\n\n\n# Overwrite imported test cases do Django doesn't run them\nTestImageDetail = None\nTestImageListing = None\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 149, "n_words": 54, "vocab_size": 46, "complexity": 1, "nloc": 12, "token_counts": 115, "n_ast_nodes": 213, "n_identifiers": 23, "random_cut": "def test_thumbnail(self):\n # Add a new image with source file\n image = get_image_model().objects.create(\n " }, { "id": 222539, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/_msvccompiler.py", "file_name": "_msvccompiler.py", "fun_name": "_find_vc2017", "commit_message": "add python 3.10.4 for windows", "code": "def _find_vc2017():\n \n root = os.environ.get(\"ProgramFiles(x86)\") or os.environ.get(\"ProgramFiles\")\n if not root:\n return None, None\n\n try:\n path = subprocess.check_output([\n os.path.join(root, \"Microsoft Visual Studio\", \"Installer\", \"vswhere.exe\"),\n \"-latest\",\n \"-prerelease\",\n \"-requires\", \"Microsoft.VisualStudio.Component.VC.Tools.x86.x64\",\n \"-property\", \"installationPath\",\n \"-products\", \"*\",\n ], encoding=\"mbcs\", errors=\"strict\").strip()\n except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):\n return None, None\n\n path = os.path.join(path, \"VC\", \"Auxiliary\", \"Build\")\n if os.path.isdir(path):\n return 15, path\n\n return None, None\n\nPLAT_SPEC_TO_RUNTIME = {\n 'x86' : 'x86',\n 'x86_amd64' : 'x64',\n 'x86_arm' : 'arm',\n 'x86_arm64' : 'arm64'\n}\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 206, "n_words": 71, "vocab_size": 55, "complexity": 5, "nloc": 19, "token_counts": 135, "n_ast_nodes": 275, "n_identifiers": 17, "random_cut": "def _find_vc2017():\n \n root = os.environ.get(\"ProgramFiles(x86)\") or os.environ.get(\"ProgramFiles\")\n if not root:\n return None, None\n\n try:\n path = subprocess.check_output([\n os.path.join(root, \"Microsoft Visual Studio\", \"Installer\", \"vswhere.exe\"),\n \"-latest\",\n \"-prerelease\",\n \"-requires\", \"Microsoft.VisualStudio.Component.VC.Tools.x86.x64\",\n \"-property\", \"installationPath\",\n \"-products\", \"*\",\n ], encoding=\"m" }, { "id": 40840, "commit_id": "6f3077f12b7837106ba0a79740fbfd547628291b", "repo": "seaborn", "path": "seaborn/tests/_core/test_mappings.py", "file_name": "test_mappings.py", "fun_name": "test_categorical_multi_lookup_categorical", "commit_message": "Thoroughly update scaling logic and internal API", "code": "def test_categorical_multi_lookup_categorical(self):\n\n x = pd.Series([\"a\", \"b\", \"c\"]).astype(\"category\")\n colors = color_palette(n_colors=len(x))\n scale = get_default_scale(x)\n m = ColorSemantic().setup(x, scale)\n assert_series_equal(m(x), pd.Series(colors))\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 53, "n_words": 19, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 67, "n_ast_nodes": 114, "n_identifiers": 16, "random_cut": "def test_categorical_multi_lookup_categorical(self):\n\n x = pd.Series([\"a\", \"b\", \"c\"]).astype(\"category\")\n colors = color_palette(n_colors=len(x))\n scale = get_default_scale(x)\n m = ColorSemantic().setup(x, scale)\n assert_series_equal(m" }, { "id": 224742, "commit_id": "13b9c0dbd18d5a1b9705ca171a9e3b383a8e7d97", "repo": "mkdocs", "path": "mkdocs/tests/config/config_options_tests.py", "file_name": "config_options_tests.py", "fun_name": "test_deprecated_option_move", "commit_message": "Refactor tests for ConfigOption errors", "code": "def test_deprecated_option_move(self):\n option = config_options.Deprecated(moved_to='new')\n config = {'old': 'value'}\n option.pre_validation(config, 'old')\n self.assertEqual(\n option.warnings,\n [\n \"The configuration option 'old' has been deprecated and will be removed in a \"\n \"future release of MkDocs. Use 'new' instead.\"\n ],\n )\n self.assertEqual(config, {'new': 'value'})\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 144, "n_words": 40, "vocab_size": 38, "complexity": 1, "nloc": 12, "token_counts": 56, "n_ast_nodes": 103, "n_identifiers": 10, "random_cut": "def test_deprecated_option_move(self):\n option = config_options.Deprecated(moved_to='new')\n config = {'old': 'value'}\n option.pre_validation(config, 'old')\n self.assertEqual(\n option.warnings,\n [\n \"The configuration option 'old' has been deprecated and will be removed in a \"\n \"future release of MkD" }, { "id": 287963, "commit_id": "499c3410d1177eeec478af366e275a41b3e6ea60", "repo": "core", "path": "homeassistant/components/forked_daapd/media_player.py", "file_name": "media_player.py", "fun_name": "async_turn_on", "commit_message": "Add browse media to forked-daapd (#79009)\n\n* Add browse media to forked-daapd\r\n\r\n* Use elif in async_browse_image\r\n\r\n* Add tests\r\n\r\n* Add tests\r\n\r\n* Add test\r\n\r\n* Fix test", "code": "async def async_turn_on(self) -> None:\n \n # restore state\n await self.api.set_volume(volume=self._last_volume * 100)\n if self._last_outputs:\n futures: list[asyncio.Task[int]] = []\n for output in self._last_outputs:\n futures.append(\n asyncio.create_task(\n self.api.change_output(\n output[\"id\"],\n selected=output[\"selected\"],\n volume=output[\"volume\"],\n )\n )\n )\n await asyncio.wait(futures)\n else: # enable all outputs\n await self.api.set_enabled_outputs(\n [output[\"id\"] for output in self._outputs]\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 347, "n_words": 46, "vocab_size": 36, "complexity": 4, "nloc": 20, "token_counts": 114, "n_ast_nodes": 188, "n_identifiers": 20, "random_cut": "async def async_turn_on(self) -> None:\n \n # restore state\n await self.api.set_volume(volume=self._last_volume * 100)\n if self._last_outputs:\n futures: list[asyncio.Task[int]] = []\n for output in self._last_outputs:\n futures.append(\n asyncio.create_task(\n self.api.change_output(\n output[\"id\"],\n selected=output[\"selected\"],\n volume=output[\"volume\"],\n )\n )\n )\n await asyncio.w" }, { "id": 178970, "commit_id": "033d29fee17fbf13a53bf89f89ca5c444ff3dd0b", "repo": "Nuitka", "path": "nuitka/tools/quality/Git.py", "file_name": "Git.py", "fun_name": "updateWorkingFile", "commit_message": "Quality: Fix formatting when adding files on Windows\n\n* These have the wrong newlines potentially, so try again in case of\n failure after cleaning the newlines in checkout.", "code": "def updateWorkingFile(path, orig_object_hash, new_object_hash):\n patch = check_output(\n [\"git\", \"diff\", \"--no-color\", orig_object_hash, new_object_hash]\n )\n\n git_path = path.replace(os.path.sep, \"/\").encode(\"utf8\")\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 32, "n_words": 17, "vocab_size": 15, "complexity": 7, "nloc": 25, "token_counts": 166, "n_ast_nodes": 73, "n_identifiers": 11, "random_cut": "def updateWorkingFile(path, orig_object_hash, new_object_hash):\n patch = check_output(\n [\"git\", \"diff\", \"--no-color\", orig_object_hash, new_object_hash]\n )\n\n git_path = path.replace(os.path.sep, \"/\").encode(\"utf8\")\n" }, { "id": 111049, "commit_id": "4f4459f6f8d37d3b687f7844e63abb0f672d8a98", "repo": "pyxel", "path": "pyxel/editor/music_editor.py", "file_name": "music_editor.py", "fun_name": "get_field", "commit_message": "Renamed the sounds property of Music", "code": "def get_field(self, index):\n if index >= pyxel.NUM_CHANNELS:\n return\n music = pyxel.music(self.music_no_var)\n return music.snds_list[index]\n", "url": "https://github.com/kitao/pyxel.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 44, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 32, "n_ast_nodes": 50, "n_identifiers": 8, "random_cut": "def get_field(self, index):\n if index >= pyxel.NUM_CHANNELS:\n return\n music = pyxel.music(self.music_no_var)\n return music.snds_list[index]\n" }, { "id": 180603, "commit_id": "3ef4d4da4c1d39818d8bde82701f5e75b4b2cbe8", "repo": "gradio", "path": "gradio/documentation.py", "file_name": "documentation.py", "fun_name": "document_fn", "commit_message": "Fix default value in docs for objects (#1900)", "code": "def document_fn(fn):\n doc_str = inspect.getdoc(fn)\n doc_lines = doc_str.split(\"\\n\")\n signature = inspect.signature(fn)\n description, parameters, returns, examples = [], {}, [], []\n mode = \"description\"\n for line in doc_lines:\n line = line.rstrip()\n if line == \"Parameters:\":\n mode = \"parameter\"\n elif line == \"Example:\":\n mode = \"example\"\n elif line == \"Returns:\":\n mode = \"return\"\n else:\n if mode == \"description\":\n description.append(line if line.strip() else \"
    \")\n continue\n assert line.startswith(\n \" \"\n ), f\"Documentation format for {fn.__name__} has format error in line: {line}\"\n line = line[4:]\n if mode == \"parameter\":\n colon_index = line.index(\": \")\n assert (\n colon_index > -1\n ), f\"Documentation format for {fn.__name__} has format error in line: {line}\"\n parameter = line[:colon_index]\n parameter_doc = line[colon_index + 2 :]\n parameters[parameter] = parameter_doc\n elif mode == \"return\":\n returns.append(line)\n elif mode == \"example\":\n examples.append(line)\n description_doc = \" \".join(description)\n parameter_docs = []\n for param_name, param in signature.parameters.items():\n if param_name.startswith(\"_\"):\n continue\n if param_name == \"kwargs\" and param_name not in parameters:\n continue\n parameter_doc = {\n \"name\": param_name,\n \"annotation\": param.annotation,\n \"kind\": param.kind.description,\n \"doc\": parameters.get(param_name),\n }\n if param_name in parameters:\n del parameters[param_name]\n if param.default != inspect.Parameter.empty:\n default = param.default\n if type(default) == str:\n default = '\"' + default + '\"'\n if default.__class__.__module__ != \"builtins\":\n default = f\"{default.__class__.__name__}()\"\n parameter_doc[\"default\"] = default\n elif parameter_doc[\"doc\"] is not None and \"kwargs\" in parameter_doc[\"doc\"]:\n parameter_doc[\"kwargs\"] = True\n parameter_docs.append(parameter_doc)\n assert (\n len(parameters) == 0\n ), f\"Documentation format for {fn.__name__} documents nonexistent parameters: {''.join(parameters.keys())}\"\n if len(returns) == 0:\n return_docs = {}\n elif len(returns) == 1:\n return_docs = {\"annotation\": signature.return_annotation, \"doc\": returns[0]}\n else:\n return_docs = {}\n # raise ValueError(\"Does not support multiple returns yet.\")\n examples_doc = \"\\n\".join(examples) if len(examples) > 0 else None\n return description_doc, parameter_docs, return_docs, examples_doc\n\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 899, "n_words": 267, "vocab_size": 138, "complexity": 23, "nloc": 70, "token_counts": 436, "n_ast_nodes": 808, "n_identifiers": 44, "random_cut": "def document_fn(fn):\n doc_str = inspect.getdoc(fn)\n doc_lines = doc_str.split(\"\\n\")\n signature = inspect.signature(fn)\n description, parameters, returns, examples = [], {}, [], []\n mode = \"description\"\n for line in doc_lines:\n line = line.rstrip()\n if line == \"Parameters:\":\n mode = \"parameter\"\n elif line == \"Example:\":\n mode = \"example\"\n elif line == \"Returns:\":\n mode = \"return\"\n else:\n if mode == \"description\":\n description.append(line if line.strip() else \"
    \")\n continue\n assert line.startswith(\n \" \"\n ), f\"Documentation format for {fn.__name__} has format error in line: {line}\"\n line = line[4:]\n if mode == \"parameter\":\n colon_index = line.index(\": \")\n assert (\n colon_index > -1\n ), f\"Documentation format for {fn.__name__} has format error in line: {line}\"\n parameter = line[:colon_index]\n parameter_doc = line[colon_index + 2 :]\n parameters[parameter] = parameter_doc\n elif mode == \"return\":\n returns.append(line)\n elif mode == \"example\":\n examples.append(line)\n description_doc = \" \".join(description)\n parameter_docs = []\n for param_name, param in signature.parameters.items():\n if param_name.startswith(\"_\"):\n continue\n if param_name == \"kwargs\" and param_name not in parameters:\n continue\n parameter_doc = {\n \"name\": param_name,\n \"annotation\": param.annotation,\n \"kind\": param.kind.description,\n \"doc\": parameters.get(param_name),\n }\n if param_name in parameters:\n " }, { "id": 166554, "commit_id": "46bcf3740b38339f62b94e66ec29537a28a17140", "repo": "pandas", "path": "pandas/tests/indexing/test_indexing.py", "file_name": "test_indexing.py", "fun_name": "test_astype_assignment", "commit_message": "DEPR: df.iloc[:, foo] = bar attempt to set inplace (#45333)", "code": "def test_astype_assignment(self):\n\n # GH4312 (iloc)\n df_orig = DataFrame(\n [[\"1\", \"2\", \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n\n df = df_orig.copy()\n msg = \"will attempt to set the values inplace instead\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)\n expected = DataFrame(\n [[1, 2, \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n df = df_orig.copy()\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)\n expected = DataFrame(\n [[1, 2, \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n # GH5702 (loc)\n df = df_orig.copy()\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.loc[:, \"A\"] = df.loc[:, \"A\"].astype(np.int64)\n expected = DataFrame(\n [[1, \"2\", \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n df = df_orig.copy()\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.loc[:, [\"B\", \"C\"]] = df.loc[:, [\"B\", \"C\"]].astype(np.int64)\n expected = DataFrame(\n [[\"1\", 2, 3, \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 406, "n_words": 133, "vocab_size": 50, "complexity": 1, "nloc": 33, "token_counts": 387, "n_ast_nodes": 613, "n_identifiers": 23, "random_cut": "def test_astype_assignment(self):\n\n # GH4312 (iloc)\n df_orig = DataFrame(\n [[\"1\", \"2\", \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n\n df = df_orig.copy()\n msg = \"will attempt to set the values inplace instead\"\n with tm.assert_produces_warning(Futu" }, { "id": 75262, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_jinja2.py", "file_name": "test_jinja2.py", "fun_name": "render", "commit_message": "Reformat with black", "code": "def render(self, string, context=None, request_context=True):\n if context is None:\n context = {}\n\n # Add a request to the template, to simulate a RequestContext\n if request_context:\n site = Site.objects.get(is_default_site=True)\n request = self.client.get(\"/test/\", HTTP_HOST=site.hostname)\n context[\"request\"] = request\n\n template = self.engine.from_string(string)\n return template.render(context)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 118, "n_words": 40, "vocab_size": 30, "complexity": 3, "nloc": 9, "token_counts": 78, "n_ast_nodes": 125, "n_identifiers": 17, "random_cut": "def render(self, string, context=None, request_context=True):\n if context is None:\n context = {}\n\n # Add a request to the template, to simulate a RequestContext\n if request_context:\n " }, { "id": 86354, "commit_id": "5d8a666bebd4d4b0b0200af5ed37ba504e0895be", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events.py", "file_name": "test_organization_events.py", "fun_name": "test_has_performance_issue_ids", "commit_message": "test(perf-issues): Improve how performance issues are created in tests (#39293)\n\nThis PR updates `Factories.store_event` and `load_data` to support\r\ncreation of performance groups for transactions. Once this PR is merged,\r\nI will update all instances of `hack_pull_out_data`.\r\n\r\nResolved ISP-16", "code": "def test_has_performance_issue_ids(self):\n data = load_data(\n platform=\"transaction\",\n fingerprint=[f\"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1\"],\n )\n self.store_event(data=data, project_id=self.project.id)\n\n query = {\n \"field\": [\"count()\"],\n \"statsPeriod\": \"1h\",\n \"query\": \"has:performance.issue_ids\",\n }\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert response.data[\"data\"][0][\"count()\"] == 1\n\n query = {\n \"field\": [\"count()\"],\n \"statsPeriod\": \"1h\",\n \"query\": \"!has:performance.issue_ids\",\n }\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert response.data[\"data\"][0][\"count()\"] == 0\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 232, "n_words": 54, "vocab_size": 30, "complexity": 1, "nloc": 22, "token_counts": 139, "n_ast_nodes": 247, "n_identifiers": 18, "random_cut": "def test_has_performance_issue_ids(self):\n data = load_data(\n platform=\"transaction\",\n fingerprint=[f\"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1\"],\n )\n self.store_event(data=data, project_id=self.project.id)\n\n query = {\n \"field\": [\"c" }, { "id": 39206, "commit_id": "87970de68431d511a1ea28f838be1f9eba9b4c02", "repo": "recommenders", "path": "recommenders/models/ncf/dataset.py", "file_name": "dataset.py", "fun_name": "__next__", "commit_message": "fix docstrings", "code": "def __next__(self):\n if self.next_row:\n self.row = self.next_row\n elif self.line_num == 0:\n self.row = self._extract_row_data(next(self.reader, None))\n if self.row is None:\n raise EmptyFileException(\"{} is empty.\".format(self.filename))\n else:\n raise StopIteration # end of file\n self.next_row = self._extract_row_data(next(self.reader, None))\n self.line_num += 1\n\n return self.row\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 140, "n_words": 39, "vocab_size": 27, "complexity": 4, "nloc": 12, "token_counts": 90, "n_ast_nodes": 145, "n_identifiers": 12, "random_cut": "def __next__(self):\n if self.next_row:\n self.row = self.next_row\n elif self.line_num == 0:\n self.row = self._extract_row_data(next(self.reader, None))\n if self.row is None:\n raise EmptyFileException(\"{} is empty.\".format(self.filename))\n else:\n raise StopIteration # end of file\n self.next_row = self._extract_" }, { "id": 108893, "commit_id": "4e21912d2938b0e8812c4d1f7cd902c080062ff2", "repo": "matplotlib", "path": "lib/matplotlib/backends/_backend_tk.py", "file_name": "_backend_tk.py", "fun_name": "scroll_event_windows", "commit_message": "Make it easier to improve UI event metadata.\n\nCurrently, UI events (MouseEvent, KeyEvent, etc.) are generated by\nletting the GUI-specific backends massage the native event objects into\na list of args/kwargs and then call\n`FigureCanvasBase.motion_notify_event`/`.key_press_event`/etc. This\nmakes it a bit tricky to improve the metadata on the events, because one\nneeds to change the signature on both the `FigureCanvasBase` method and\nthe event class. Moreover, the `motion_notify_event`/etc. methods are\ndirectly bound as event handlers in the gtk3 and tk backends, and thus\nhave incompatible signatures there.\n\nInstead, the native GUI handlers can directly construct the relevant\nevent objects and trigger the events themselves; a new `Event._process`\nhelper method makes this even shorter (and allows to keep factoring some\ncommon functionality e.g. for tracking the last pressed button or key).\n\nAs an example, this PR also updates figure_leave_event to always\ncorrectly set the event location based on the *current* cursor position,\ninstead of the last triggered location event (which may be outdated);\nthis can now easily be done on a backend-by-backend basis, instead of\ncoordinating the change with FigureCanvasBase.figure_leave_event.\n\nThis also exposed another (minor) issue, in that resize events\noften trigger *two* calls to draw_idle -- one in the GUI-specific\nhandler, and one in FigureCanvasBase.draw_idle (now moved to\nResizeEvent._process, but should perhaps instead be a callback\nautoconnected to \"resize_event\") -- could probably be fixed later.", "code": "def scroll_event_windows(self, event):\n \n # need to find the window that contains the mouse\n w = event.widget.winfo_containing(event.x_root, event.y_root)\n if w != self._tkcanvas:\n return\n x = self._tkcanvas.canvasx(event.x_root - w.winfo_rootx())\n y = (self.figure.bbox.height\n - self._tkcanvas.canvasy(event.y_root - w.winfo_rooty()))\n step = event.delta / 120\n MouseEvent(\"scroll_event\", self,\n x, y, step=step, guiEvent=event)._process()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 142, "n_words": 45, "vocab_size": 38, "complexity": 2, "nloc": 10, "token_counts": 107, "n_ast_nodes": 169, "n_identifiers": 23, "random_cut": "def scroll_event_windows(self, event):\n \n # need to find the window that co" }, { "id": 129834, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "dashboard/datacenter.py", "file_name": "datacenter.py", "fun_name": "_get_actor", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "async def _get_actor(actor):\n actor = dict(actor)\n worker_id = actor[\"address\"][\"workerId\"]\n core_worker_stats = DataSource.core_worker_stats.get(worker_id, {})\n actor_constructor = core_worker_stats.get(\n \"actorTitle\", \"Unknown actor constructor\"\n )\n actor[\"actorConstructor\"] = actor_constructor\n actor.update(core_worker_stats)\n\n # TODO(fyrestone): remove this, give a link from actor\n # info to worker info in front-end.\n node_id = actor[\"address\"][\"rayletId\"]\n pid = core_worker_stats.get(\"pid\")\n node_physical_stats = DataSource.node_physical_stats.get(node_id, {})\n actor_process_stats = None\n actor_process_gpu_stats = []\n if pid:\n for process_stats in node_physical_stats.get(\"workers\", []):\n if process_stats[\"pid\"] == pid:\n actor_process_stats = process_stats\n break\n\n for gpu_stats in node_physical_stats.get(\"gpus\", []):\n for process in gpu_stats.get(\"processes\", []):\n if process[\"pid\"] == pid:\n actor_process_gpu_stats.append(gpu_stats)\n break\n\n actor[\"gpus\"] = actor_process_gpu_stats\n actor[\"processStats\"] = actor_process_stats\n return actor\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 387, "n_words": 96, "vocab_size": 60, "complexity": 7, "nloc": 27, "token_counts": 175, "n_ast_nodes": 303, "n_identifiers": 18, "random_cut": "async def _get_actor(actor):\n actor = dict(actor)\n worker_id = actor[\"address\"][\"workerId\"]\n core_w" }, { "id": 12054, "commit_id": "984e743734b18c1117bbbc2eda49d7eceaa9343f", "repo": "jina", "path": "jina/orchestrate/deployments/config/docker_compose.py", "file_name": "docker_compose.py", "fun_name": "get_runtime_config", "commit_message": "feat: add default volume to dockererized executors (#4554)", "code": "def get_runtime_config(self) -> List[Dict]:\n # One Dict for replica\n replica_configs = []\n for i_rep in range(self.service_args.replicas):\n cargs = copy.copy(self.service_args)\n cargs.name = (\n f'{cargs.name}/rep-{i_rep}'\n if self.service_args.replicas > 1\n else cargs.name\n )\n\n env = cargs.env\n image_name = self._get_image_name(cargs.uses)\n container_args = self._get_container_args(cargs)\n config = {\n 'image': image_name,\n 'entrypoint': ['jina'],\n 'command': container_args,\n 'healthcheck': {\n 'test': f'python -m jina.resources.health_check.pod localhost:{cargs.port}',\n 'interval': '2s',\n },\n 'environment': [\n f'JINA_LOG_LEVEL={os.getenv(\"JINA_LOG_LEVEL\", \"INFO\")}'\n ],\n }\n if env is not None:\n config['environment'] = [f'{k}={v}' for k, v in env.items()]\n\n if self.service_args.pod_role == PodRoleType.WORKER:\n config = self._update_config_with_volumes(\n config, auto_volume=not self.common_args.disable_auto_volume\n )\n\n replica_configs.append(config)\n return replica_configs\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 638, "n_words": 91, "vocab_size": 72, "complexity": 6, "nloc": 32, "token_counts": 179, "n_ast_nodes": 337, "n_identifiers": 33, "random_cut": "def get_runtime_config(self) -> List[Dict]:\n # One Dict for replica\n replica_configs = []\n for i_rep in range(self.service_args.replicas):\n cargs = copy.copy(self.service_args)\n cargs.name = (\n f'{cargs.name}/rep-{i_rep" }, { "id": 274372, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/legacy_tf_layers/core_test.py", "file_name": "core_test.py", "fun_name": "testDropoutProperties", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def testDropoutProperties(self):\n dp = core_layers.Dropout(0.5, name=\"dropout\")\n self.assertEqual(dp.rate, 0.5)\n self.assertEqual(dp.noise_shape, None)\n dp(tf.ones(()))\n self.assertEqual(dp.name, \"dropout\")\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 61, "n_ast_nodes": 93, "n_identifiers": 11, "random_cut": "def testDropoutProperties(self):\n dp = core_layers" }, { "id": 124558, "commit_id": "a68c02a15d041f987359c73781fb38202041a16f", "repo": "ray", "path": "dashboard/modules/healthz/tests/test_healthz.py", "file_name": "test_healthz.py", "fun_name": "test_healthz_head", "commit_message": "[dashboard][2/2] Add endpoints to dashboard and dashboard_agent for liveness check of raylet and gcs (#26408)\n\n## Why are these changes needed?\r\nAs in this https://github.com/ray-project/ray/pull/26405 we added the health check for gcs and raylets.\r\n\r\nThis PR expose them in the endpoint in dashboard and dashboard agent.\r\n\r\nFor dashboard, we added `http://host:port/api/gcs_healthz` and it'll send RPC to GCS directly to see whether the GCS is alive or not.\r\n\r\nFor agent, we added `http://host:port/api/local_raylet_healthz` and it'll send RPC to GCS to check whether raylet is alive or not.\r\n\r\nWe think raylet is live if\r\n- GCS is dead\r\n- GCS is alive but GCS think the raylet is dead\r\n\r\nIf GCS is dead for more than X seconds (60 by default), raylet will just crash itself, so KubeRay can still catch it.", "code": "def test_healthz_head(ray_start_cluster):\n dashboard_port = find_free_port()\n h = ray_start_cluster.add_node(dashboard_port=dashboard_port)\n uri = f\"http://localhost:{dashboard_port}/api/gcs_healthz\"\n wait_for_condition(lambda: requests.get(uri).status_code == 200)\n h.all_processes[ray_constants.PROCESS_TYPE_GCS_SERVER][0].process.kill()\n # It'll either timeout or just return an error\n try:\n wait_for_condition(lambda: requests.get(uri, timeout=1) != 200, timeout=4)\n except RuntimeError as e:\n assert \"Read timed out\" in str(e)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 79, "n_words": 42, "vocab_size": 39, "complexity": 2, "nloc": 10, "token_counts": 91, "n_ast_nodes": 153, "n_identifiers": 20, "random_cut": "def test_healthz_head(ray_start_cluster):\n dashboard_port = find_free_port()\n h = ray_start_cluster.add_node(dashboard_port=dashboard_port)\n uri = f\"http://localhost:{dashboard_port}/api/gcs_healthz\"\n wait_for_condition(lambda: requests.get(uri).status_code == 200)\n h.all_processes[ray_constants.PROCESS_TYPE_GCS_SERVER][0].process.kill()\n # It'll either timeout or just return an error\n try:\n wait_for_condition(lambda: requests.get(uri, timeout=1) != 200, timeout=4)\n except RuntimeError as e:\n assert \"Read timed out\" in str(e)\n\n" }, { "id": 86558, "commit_id": "ff8ef470d2fdb80df0a57890ead1e4a792ac99a2", "repo": "sentry", "path": "tests/sentry/api_gateway/test_proxy.py", "file_name": "test_proxy.py", "fun_name": "test_query_params", "commit_message": "feat(api-gateway): Proxy GET requests (#39595)\n\nThis change introduces a proxy manager that proxies requests to a region silo given an org slug Currently, this only handles GETs and JSON response bodies. Later PRs will handle other methods and body types.", "code": "def test_query_params(self, region_fnc_patch):\n\n query_param_dict = dict(foo=\"bar\", numlist=[\"1\", \"2\", \"3\"])\n query_param_str = urlencode(query_param_dict, doseq=True)\n request = RequestFactory().get(f\"http://sentry.io/echo?{query_param_str}\")\n region_fnc_patch.return_value = SENTRY_REGION_CONFIG[0]\n\n resp = proxy_request(request, self.organization.slug)\n resp_json = json.loads(b\"\".join(resp.streaming_content))\n\n assert resp.status_code == 200\n # parse_qs returns everything in a list, including single arguments\n assert query_param_dict[\"foo\"] == resp_json[\"foo\"][0]\n assert query_param_dict[\"numlist\"] == resp_json[\"numlist\"]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 117, "n_words": 48, "vocab_size": 39, "complexity": 1, "nloc": 10, "token_counts": 111, "n_ast_nodes": 188, "n_identifiers": 25, "random_cut": "def test_query_params(self, region_fnc_patch):\n\n query_param_dict = dict(foo=\"bar\", numlist=[\"1\", \"2\", \"3\"])\n query_param_str = url" }, { "id": 152809, "commit_id": "875ddfeecfaffad9eee24813301637cba310337d", "repo": "stable-diffusion-webui", "path": "modules/safe.py", "file_name": "safe.py", "fun_name": "find_class", "commit_message": "added guard for torch.load to prevent loading pickles with unknown content", "code": "def find_class(self, module, name):\r\n if module == 'collections' and name == 'OrderedDict':\r\n return getattr(collections, name)\r\n if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']:\r\n return getattr(torch._utils, name)\r\n if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage']:\r\n return getattr(torch, name)\r\n if module == 'torch.nn.modules.container' and name in ['ParameterDict']:\r\n return getattr(torch.nn.modules.container, name)\r\n if module == 'numpy.core.multiarray' and name == 'scalar':\r\n return numpy.core.multiarray.scalar\r\n if module == 'numpy' and name == 'dtype':\r\n return numpy.dtype\r\n if module == '_codecs' and name == 'encode':\r\n return encode\r\n if module == \"pytorch_lightning.callbacks\" and name == 'model_checkpoint':\r\n import pytorch_lightning.callbacks\r\n return pytorch_lightning.callbacks.model_checkpoint\r\n if module == \"pytorch_lightning.callbacks.model_checkpoint\" and name == 'ModelCheckpoint':\r\n import pytorch_lightning.callbacks.model_checkpoint\r\n return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint\r\n if module == \"__builtin__\" and name == 'set':\r\n return set\r\n\r\n # Forbid everything else.\r\n raise pickle.UnpicklingError(f\"global '{module}/{name}' is forbidden\")\r", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 340, "n_words": 125, "vocab_size": 57, "complexity": 21, "nloc": 24, "token_counts": 197, "n_ast_nodes": 351, "n_identifiers": 24, "random_cut": "def find_class(self, module, name):\r\n if module == 'collections' and name == 'OrderedDict':\r\n return getattr(collections, name)\r\n if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']:\r\n return getattr(torch._utils, name)\r\n if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage']:\r\n return getattr(torch, name)\r\n if module == 'torch.nn.modules.container' and name in ['ParameterDict']:\r\n return getattr(torch.nn.modules.container, name)\r\n if module == 'numpy.core.multiarray' and name == 'scalar':\r\n return numpy.core.multiarray.scalar\r\n if module == 'numpy' and name == 'dtype':\r\n return numpy.dtype\r\n if module == '_codecs' and name == 'encode':\r\n return encode\r\n if module == \"pytorch_lightning.callbacks\" and name == 'model_checkpoint':\r\n import pytorch_lightning.callbacks\r\n return pytorch_lightning.callbacks.model_checkpoint\r\n if module == \"pytorch_lightning.callbacks.model_checkpoint\" and name == 'ModelCheckpoint':\r\n import pytorch_lightning.callbacks.model_checkpoint\r\n return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint\r\n " }, { "id": 30338, "commit_id": "a96db2512e1533287684d5563d0a6b7dd065a8b7", "repo": "spotify-downloader", "path": "tests/utils/test_metadata.py", "file_name": "test_metadata.py", "fun_name": "test_embed_metadata", "commit_message": "Create test_metadata.py", "code": "def test_embed_metadata(tmpdir, monkeypatch, output_format):\n \n\n monkeypatch.chdir(tmpdir)\n monkeypatch.setattr(spotdl.utils.ffmpeg, \"get_spotdl_path\", lambda *_: tmpdir)\n\n yt = YoutubeDL(\n {\n \"format\": \"bestaudio\",\n \"encoding\": \"UTF-8\",\n }\n )\n\n download_info = yt.extract_info(\n \"https://www.youtube.com/watch?v=h-nHdqC3pPs\", download=False\n )\n\n song = Song.from_data_dump(\n \n )\n\n output_file = Path(tmpdir / f\"test.{output_format}\")\n\n assert convert(\n input_file=(download_info[\"url\"], download_info[\"ext\"]),\n output_file=output_file,\n output_format=output_format,\n ) == (True, None)\n\n embed_metadata(output_file, song, output_format)\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 159, "n_words": 48, "vocab_size": 42, "complexity": 1, "nloc": 46, "token_counts": 118, "n_ast_nodes": 198, "n_identifiers": 23, "random_cut": "def test_embed_metadata(tmpdir, monkeypatch, output_format):\n \n\n monkeypatch.chdir(tmpdir)\n monkeypatch.setattr(spotdl.utils.ffmpeg, \"get_spotdl_path\", lambda *_: tmpdir)\n\n yt = YoutubeDL(\n {\n \"format\": \"bestaudio\",\n \"encoding\": \"UTF-8\",\n }\n )\n\n download_info = yt.extract_info(\n \"https://www.youtube.com/watch?v=h-nHdqC3pPs\", download=False\n )\n\n song = Song.from_data_dump(\n \n )\n\n output_file = Path(tmpdir / f\"test.{output_format}\")\n\n assert convert(\n input_file=(download_info[\"url\"], download_info[\"ext\"]),\n output_file=output_file,\n output_format=output_format,\n ) == (True, None)" }, { "id": 200373, "commit_id": "24f1e7730119fe958cc8e28411f790c9a5ec04eb", "repo": "sympy", "path": "sympy/assumptions/assume.py", "file_name": "assume.py", "fun_name": "function", "commit_message": "Fix various typos\n\nFound via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`", "code": "def function(self):\n \n # Will be changed to self.args[0] after args overriding is removed\n return self._args[0]\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 36, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 24, "n_identifiers": 3, "random_cut": "def function(self):\n \n # Will be changed to self.args[0" }, { "id": 57466, "commit_id": "d238c7b16097895006eff9e3f081958af15cd3e5", "repo": "prefect", "path": "src/prefect/utilities/importtools.py", "file_name": "importtools.py", "fun_name": "__getattr__", "commit_message": "Fix attribute getter support", "code": "def __getattr__(self, attr):\n if attr in (\"__class__\", \"__file__\", \"__frame_data\", \"__help_message\"):\n super().__getattr__(attr)\n else:\n fd = self.__frame_data\n raise ModuleNotFoundError(\n f\"No module named '{fd['spec']}'\\n\\n\"\n \"This module was originally imported at:\\n\"\n f' File \"{fd[\"filename\"]}\", line {fd[\"lineno\"]}, in {fd[\"function\"]}\\n\\n'\n f' {\"\".join(fd[\"code_context\"]).strip()}\\n' + self.__help_message\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 160, "n_words": 39, "vocab_size": 36, "complexity": 2, "nloc": 11, "token_counts": 50, "n_ast_nodes": 161, "n_identifiers": 10, "random_cut": "def __getattr__(self, attr):\n if attr in (\"__class__\", \"__file_" }, { "id": 335804, "commit_id": "53a42d0a0cab99e9a905b117b9893052c6849e10", "repo": "diffusers", "path": "src/diffusers/models/resnet.py", "file_name": "resnet.py", "fun_name": "forward", "commit_message": "Simplify FirUp/down, unet sde (#71)\n\n* refactor fir up/down sample\r\n\r\n* remove variance scaling\r\n\r\n* remove variance scaling from unet sde\r\n\r\n* refactor Linear\r\n\r\n* style\r\n\r\n* actually remove variance scaling\r\n\r\n* add back upsample_2d, downsample_2d\r\n\r\n* style\r\n\r\n* fix FirUpsample2D", "code": "def forward(self, x):\n if self.use_conv:\n h = self._upsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel)\n h = h + self.Conv2d_0.bias.reshape(1, -1, 1, 1)\n else:\n h = self._upsample_2d(x, k=self.fir_kernel, factor=2)\n\n return h\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 79, "n_words": 26, "vocab_size": 19, "complexity": 2, "nloc": 7, "token_counts": 75, "n_ast_nodes": 111, "n_identifiers": 13, "random_cut": "def forward(self, x):\n if self.use_conv:\n h = self._upsample_2d(x, self.Conv2d_0.weight, k=self.fir_kernel)\n h = h + self.Conv2d_0.bias.reshape(1, -1, 1, 1)\n else:\n h = self._upsample_2d(x, k=self.fir_kernel, factor=2)\n\n " }, { "id": 138558, "commit_id": "cc08c01adedad6cd89f3ab310ed58100ed6dbc26", "repo": "ray", "path": "python/ray/ml/preprocessors/scaler.py", "file_name": "scaler.py", "fun_name": "__repr__", "commit_message": "[ml] add more preprocessors (#23904)\n\nAdding some more common preprocessors:\r\n* MaxAbsScaler\r\n* RobustScaler\r\n* PowerTransformer\r\n* Normalizer\r\n* FeatureHasher\r\n* Tokenizer\r\n* HashingVectorizer\r\n* CountVectorizer\r\n\r\nAPI docs: https://ray--23904.org.readthedocs.build/en/23904/ray-air/getting-started.html\r\n\r\nCo-authored-by: Kai Fricke ", "code": "def __repr__(self):\n stats = getattr(self, \"stats_\", None)\n return (\n f\"StandardScaler(columns={self.columns}, ddof={self.ddof}, stats={stats})\"\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 44, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 20, "n_ast_nodes": 51, "n_identifiers": 6, "random_cut": "def __repr__(self):\n " }, { "id": 63420, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "addCondition", "commit_message": "upd; format", "code": "def addCondition(self, *fns, **kwargs):\n \n for fn in fns:\n self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'),\n fatal=kwargs.get('fatal', False)))\n\n self.callDuringTry = self.callDuringTry or kwargs.get(\"callDuringTry\", False)\n return self\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 117, "n_words": 20, "vocab_size": 19, "complexity": 3, "nloc": 6, "token_counts": 66, "n_ast_nodes": 107, "n_identifiers": 12, "random_cut": "def addCondition(self, *fns, **kwargs):\n \n for fn in fns:\n self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'),\n fa" }, { "id": 41855, "commit_id": "de1ecf0e0d0064982ebf4f13e1b1afddd27c80ff", "repo": "seaborn", "path": "tests/test_categorical.py", "file_name": "test_categorical.py", "fun_name": "test_plot_elements", "commit_message": "Revert unnecessary (and broken) backwards compat in catplot (#2839)", "code": "def test_plot_elements(self):\n\n g = cat.catplot(x=\"g\", y=\"y\", data=self.df, kind=\"point\")\n assert len(g.ax.collections) == 1\n want_lines = self.g.unique().size + 1\n assert len(g.ax.lines) == want_lines\n\n g = cat.catplot(x=\"g\", y=\"y\", hue=\"h\", data=self.df, kind=\"point\")\n want_collections = self.h.unique().size\n assert len(g.ax.collections) == want_collections\n want_lines = (self.g.unique().size + 1) * self.h.unique().size\n assert len(g.ax.lines) == want_lines\n\n g = cat.catplot(x=\"g\", y=\"y\", data=self.df, kind=\"bar\")\n want_elements = self.g.unique().size\n assert len(g.ax.patches) == want_elements\n assert len(g.ax.lines) == want_elements\n\n g = cat.catplot(x=\"g\", y=\"y\", hue=\"h\", data=self.df, kind=\"bar\")\n want_elements = self.g.unique().size * self.h.unique().size\n assert len(g.ax.patches) == want_elements\n assert len(g.ax.lines) == want_elements\n\n g = cat.catplot(x=\"g\", data=self.df, kind=\"count\")\n want_elements = self.g.unique().size\n assert len(g.ax.patches) == want_elements\n assert len(g.ax.lines) == 0\n\n g = cat.catplot(x=\"g\", hue=\"h\", data=self.df, kind=\"count\")\n want_elements = self.g.unique().size * self.h.unique().size\n assert len(g.ax.patches) == want_elements\n assert len(g.ax.lines) == 0\n\n g = cat.catplot(y=\"y\", data=self.df, kind=\"box\")\n want_artists = 1\n assert len(self.get_box_artists(g.ax)) == want_artists\n\n g = cat.catplot(x=\"g\", y=\"y\", data=self.df, kind=\"box\")\n want_artists = self.g.unique().size\n assert len(self.get_box_artists(g.ax)) == want_artists\n\n g = cat.catplot(x=\"g\", y=\"y\", hue=\"h\", data=self.df, kind=\"box\")\n want_artists = self.g.unique().size * self.h.unique().size\n assert len(self.get_box_artists(g.ax)) == want_artists\n\n g = cat.catplot(x=\"g\", y=\"y\", data=self.df,\n kind=\"violin\", inner=None)\n want_elements = self.g.unique().size\n assert len(g.ax.collections) == want_elements\n\n g = cat.catplot(x=\"g\", y=\"y\", hue=\"h\", data=self.df,\n kind=\"violin\", inner=None)\n want_elements = self.g.unique().size * self.h.unique().size\n assert len(g.ax.collections) == want_elements\n\n g = cat.catplot(x=\"g\", y=\"y\", data=self.df, kind=\"strip\")\n want_elements = self.g.unique().size\n assert len(g.ax.collections) == want_elements\n\n g = cat.catplot(x=\"g\", y=\"y\", hue=\"h\", data=self.df, kind=\"strip\")\n want_elements = self.g.unique().size + self.h.unique().size\n assert len(g.ax.collections) == want_elements\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 586, "n_words": 219, "vocab_size": 34, "complexity": 1, "nloc": 49, "token_counts": 767, "n_ast_nodes": 1242, "n_identifiers": 25, "random_cut": "def test_plot_elements(self):\n\n g = cat.catplot(x=\"g\", y=\"y\", data=self.df, kind=\"point\")\n assert len(g.ax.collections) == 1\n want_lines = self.g.unique().size + 1\n assert len(g.ax.lines) == want_lines\n\n g = cat.catplot(x=\"g\", y=\"y\", hue=\"h\", data=self.df, kind=\"point\")\n want_collections = self.h.unique().size\n assert len(g.ax.collections) == want_collections\n want_lines = (self.g.unique().size + 1) * self.h.unique().size\n assert len(g.ax.lines) == want_lines\n\n g = cat.catplot(x=\"g\", y=\"y\", data=self.df, kind=\"bar\")\n want_elements = self.g.unique().size\n assert len(g.ax.patches) == want_elements\n assert len(g.ax.lines) == want_elements\n\n g = cat.catplot(x=\"g\", y=\"y\", hue=\"h\", data=self.df, kind=\"bar\")\n want_elements = self.g.unique().size * self.h.unique().size\n assert " }, { "id": 144365, "commit_id": "8806b2d5c43f256188632c245dd741774776dad0", "repo": "ray", "path": "dashboard/modules/job/common.py", "file_name": "common.py", "fun_name": "get_all_jobs", "commit_message": "[jobs] Monitor jobs in the background to avoid requiring clients to poll (#22180)", "code": "def get_all_jobs(self) -> Dict[str, JobStatusInfo]:\n raw_job_ids = _internal_kv_list(self.JOB_STATUS_KEY_PREFIX)\n job_ids = [job_id.decode() for job_id in raw_job_ids]\n return {job_id: self.get_status(job_id) for job_id in job_ids}\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 22, "vocab_size": 18, "complexity": 3, "nloc": 4, "token_counts": 48, "n_ast_nodes": 73, "n_identifiers": 12, "random_cut": "def get_all_jobs(self) -> Dict[str, JobStatusInfo]:\n raw_job_ids = _internal_kv_list(self.JOB_STATUS_KEY_PREFIX)\n job_ids = [job_id.decode() for job_id in raw_job_ids]\n return {job_id: self.get_status(job_id) for job_id in job_ids}\n\n" }, { "id": 117106, "commit_id": "ae4fa77a2c0a9fa57cc9c8bc7e8961dd01e4067e", "repo": "mindsdb", "path": "tests/integration_tests/flows/test_mysql_api_pytest_based.py", "file_name": "test_mysql_api_pytest_based.py", "fun_name": "get_record", "commit_message": "It mysql api test pytest (#3694)\n\n* migration to pytest\r\n\r\n* Tests start passing\r\n\r\n* Fully working tests\r\n\r\n* Increase timeout for mindsdb start\r\n\r\n* reduce amount of logs\r\n\r\n* show logs only for failed tests", "code": "def get_record(self, key, value):\n if key in self:\n for x in self:\n if x[key] == value:\n return x\n return None\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 20, "vocab_size": 15, "complexity": 4, "nloc": 6, "token_counts": 31, "n_ast_nodes": 46, "n_identifiers": 5, "random_cut": "def get_record(self, key, value):\n if key in self:\n for x in self:\n if x[key] == value:\n return x\n r" }, { "id": 97911, "commit_id": "3ba27f5b5845de0a5a89d5cbf2e5df752915d9d7", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_sdk_updates.py", "file_name": "test_organization_sdk_updates.py", "fun_name": "test_unknown_version", "commit_message": "fix(sdk): Do not error if a project is using an unknown version (#32206)", "code": "def test_unknown_version(self, mock_index_state):\n min_ago = iso_format(before_now(minutes=1))\n self.store_event(\n data={\n \"event_id\": \"a\" * 32,\n \"message\": \"oh no\",\n \"timestamp\": min_ago,\n \"fingerprint\": [\"group-1\"],\n \"sdk\": {\"name\": \"example.sdk\", \"version\": \"dev-master@32e5415\"},\n },\n project_id=self.project.id,\n assert_no_errors=False,\n )\n self.store_event(\n data={\n \"event_id\": \"b\" * 32,\n \"message\": \"b\",\n \"timestamp\": min_ago,\n \"fingerprint\": [\"group-2\"],\n \"sdk\": {\"name\": \"example.sdk\", \"version\": \"2.0.0\"},\n },\n project_id=self.project.id,\n assert_no_errors=False,\n )\n\n with self.feature(self.features):\n response = self.client.get(self.url)\n\n update_suggestions = response.data\n assert len(update_suggestions) == 0\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 365, "n_words": 61, "vocab_size": 42, "complexity": 1, "nloc": 28, "token_counts": 160, "n_ast_nodes": 281, "n_identifiers": 21, "random_cut": "def test_unknown_version(self, mock_index_state):\n min_ago = iso_format(before_now(minutes=1))\n self.store_event(\n data={\n " }, { "id": 160198, "commit_id": "730f3154f48e33f22b2ea8814eb10a45aa273e17", "repo": "numpy", "path": "numpy/lib/shape_base.py", "file_name": "shape_base.py", "fun_name": "kron", "commit_message": "ENH: Maintain subclass info for `np.kron`\n\n* Replace `*` call with `multiply`\n* Handle `mat` cases to perform reshape\n* Remove use result wrapping to maintain consistency with ufuncs", "code": "def kron(a, b):\n \n b = asanyarray(b)\n a = array(a, copy=False, subok=True, ndmin=b.ndim)\n ndb, nda = b.ndim, a.ndim\n nd = max(ndb, nda)\n\n if (nda == 0 or ndb == 0):\n return _nx.multiply(a, b)\n\n as_ = a.shape\n bs = b.shape\n if not a.flags.contiguous:\n a = reshape(a, as_)\n if not b.flags.contiguous:\n b = reshape(b, bs)\n\n # Equalise the shapes by prepending smaller one with 1s\n as_ = (1,)*max(0, ndb-nda) + as_\n bs = (1,)*max(0, nda-ndb) + bs\n\n # Compute the product\n a_arr = a.reshape(a.size, 1)\n b_arr = b.reshape(1, b.size)\n is_any_mat = isinstance(a_arr, matrix) or isinstance(b_arr, matrix)\n # In case of `mat`, convert result to `array`\n result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat))\n\n # Reshape back\n result = result.reshape(as_+bs)\n transposer = _nx.arange(nd*2).reshape([2, nd]).ravel(order='f')\n result = result.transpose(transposer)\n result = result.reshape(_nx.multiply(as_, bs))\n\n return result if not is_any_mat else matrix(result, copy=False)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 229, "n_words": 133, "vocab_size": 89, "complexity": 7, "nloc": 24, "token_counts": 278, "n_ast_nodes": 429, "n_identifiers": 33, "random_cut": "def kron(a, b):\n \n b = asanyarray(b)\n a = array(a, copy=False, subok=True, ndmin=b.ndim)\n ndb, nda = b.ndim, a.ndim\n nd = max(ndb, nda)\n\n if (nda == 0 or ndb == 0):\n return _nx.multiply(a, b)\n\n as_ = a.shape\n bs = b.shape\n if not a.flags.contiguous:\n a = reshape(a, as_)\n if not b.flags.contiguous:\n b = reshape(b, bs)\n\n # Equalise the shapes by prepending smaller one with 1s\n as_ = (1,)*max(0, ndb-nda) + as_\n bs = (1,)*max(0, nda-ndb) + bs\n\n # Compute the product\n a_arr = a.reshape(a.size, 1)\n b_arr = b.reshape(1, b.s" }, { "id": 61081, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/base.py", "file_name": "base.py", "fun_name": "get_candidate_lookup", "commit_message": "upd; format", "code": "def get_candidate_lookup(self):\n # type: () -> CandidateLookup\n raise NotImplementedError(\"Subclass should override\")\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 20, "n_identifiers": 3, "random_cut": "def get_candidate_lookup(self):\n # type: () -> CandidateLookup\n raise NotImplementedError(\"Subclass should override\")\n" }, { "id": 296070, "commit_id": "fe6a4bfb1dbb37dd16a0d73d776ad5f604154670", "repo": "core", "path": "tests/components/utility_meter/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "test_self_reset_hourly", "commit_message": "Remove EVENT_TIME_CHANGED and EVENT_TIMER_OUT_OF_SYNC (#69643)\n\nCo-authored-by: Martin Hjelmare ", "code": "async def test_self_reset_hourly(hass):\n \n await _test_self_reset(\n hass, gen_config(\"hourly\"), \"2017-12-31T23:59:00.000000+00:00\"\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 25, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 18, "n_ast_nodes": 36, "n_identifiers": 4, "random_cut": "async def test_self_reset_hourly(hass):\n \n await _test_self_reset(\n hass, gen_config(\"hourly\"), \"2017-12-31T23" }, { "id": 139039, "commit_id": "b2b1c95aa5f94c74d192caca0d86945f2b4ce986", "repo": "ray", "path": "rllib/agents/a3c/tests/test_a2c.py", "file_name": "test_a2c.py", "fun_name": "test_a2c_compilation", "commit_message": "[RLlib] A2/3C Config objects (A2CConfig and A3CConfig). (#24332)", "code": "def test_a2c_compilation(self):\n \n config = a3c.A2CConfig().rollouts(num_rollout_workers=2, num_envs_per_worker=2)\n\n num_iterations = 1\n\n # Test against all frameworks.\n for _ in framework_iterator(config, with_eager_tracing=True):\n for env in [\"CartPole-v0\", \"Pendulum-v1\", \"PongDeterministic-v0\"]:\n trainer = config.build(env=env)\n for i in range(num_iterations):\n results = trainer.train()\n check_train_results(results)\n print(results)\n check_compute_single_action(trainer)\n trainer.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 202, "n_words": 39, "vocab_size": 32, "complexity": 4, "nloc": 12, "token_counts": 92, "n_ast_nodes": 154, "n_identifiers": 23, "random_cut": "def test_a2c_compilation(self):\n \n config = a3c.A2CConfig().rollouts(num_rollout_workers=2, num_envs_per_worker=2)\n\n num_iterations = 1\n\n # Test against all frameworks.\n for _ in framework_iterator(config, with_eager_tracing=True):\n for env in [\"CartPole-v0\", \"Pendulum-v1\", \"PongDeterministic-v0\"]:\n trainer = config.build(env=env)\n for i in range(num_iterations):\n resu" }, { "id": 205901, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/where.py", "file_name": "where.py", "fun_name": "_resolve_leaf", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _resolve_leaf(expr, query, *args, **kwargs):\n if hasattr(expr, \"resolve_expression\"):\n expr = expr.resolve_expression(query, *args, **kwargs)\n return expr\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 15, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 37, "n_ast_nodes": 57, "n_identifiers": 7, "random_cut": "def _resolve_leaf(expr, query, *args, **kwargs):\n if hasattr(expr," }, { "id": 101422, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "tools/preview/preview.py", "file_name": "preview.py", "fun_name": "update_tk_image", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def update_tk_image(self) -> None:\n \n logger.trace(\"Updating tk image\") # type: ignore\n self._build_faces_image()\n img = np.vstack((self._faces_source, self._faces_dest))\n size = self._get_scale_size(img)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n pilimg = Image.fromarray(img)\n pilimg = pilimg.resize(size, Image.ANTIALIAS)\n self._tk_image = ImageTk.PhotoImage(pilimg)\n self._tk_vars[\"refresh\"].set(False)\n logger.trace(\"Updated tk image\") # type: ignore\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 39, "vocab_size": 27, "complexity": 1, "nloc": 12, "token_counts": 102, "n_ast_nodes": 172, "n_identifiers": 25, "random_cut": "def update_tk_image(self) -> None:\n \n logger.trace(\"Updating tk image\") # type: ignore\n self._build_faces_image()\n img = np.vstack((self._" }, { "id": 95301, "commit_id": "68b1cdf3b1bcb7990834a890b8a32a021bc75666", "repo": "sentry", "path": "tests/sentry/middleware/test_ratelimit_middleware.py", "file_name": "test_ratelimit_middleware.py", "fun_name": "get", "commit_message": "feat(ratelimits): Add headers with rate limit details (#30951)\n\nThe headers allow API users to know where they are in terms of their rate limits. It'll be returned for every API that can be rate limited except when there is an internal exception.\r\nAt the time of this commit, rate limits are not enforced except for some specific endpoints.\r\n\r\n* Several improvements to rate limit headers\r\n\r\nHeaders now track how many requests are left in the current window, and when the next window starts\r\nAlso, rate limit metadata is in a dataclass", "code": "def get(self, request):\n return Response({\"ok\": True})\n\n\nurlpatterns = [\n url(r\"^/ratelimit$\", RateLimitHeaderTestEndpoint.as_view(), name=\"ratelimit-header-endpoint\")\n]\n\n\n@override_settings(ROOT_URLCONF=\"tests.sentry.middleware.test_ratelimit_middleware\")", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@override_settings(ROOT_URLCONF=\"tests.sentry.middleware.test_ratelimit_middleware\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 20, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 72, "n_identifiers": 11, "random_cut": "def get(self, request):\n return Response({\"ok\": True})\n\n\nurlpatt" }, { "id": 130477, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/gcp/config.py", "file_name": "config.py", "fun_name": "_configure_project", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _configure_project(config, crm):\n \n config = copy.deepcopy(config)\n\n project_id = config[\"provider\"].get(\"project_id\")\n assert config[\"provider\"][\"project_id\"] is not None, (\n \"'project_id' must be set in the 'provider' section of the autoscaler\"\n \" config. Notice that the project id must be globally unique.\"\n )\n project = _get_project(project_id, crm)\n\n if project is None:\n # Project not found, try creating it\n _create_project(project_id, crm)\n project = _get_project(project_id, crm)\n\n assert project is not None, \"Failed to create project\"\n assert (\n project[\"lifecycleState\"] == \"ACTIVE\"\n ), \"Project status needs to be ACTIVE, got {}\".format(project[\"lifecycleState\"])\n\n config[\"provider\"][\"project_id\"] = project[\"projectId\"]\n\n return config\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 166, "n_words": 87, "vocab_size": 60, "complexity": 2, "nloc": 17, "token_counts": 110, "n_ast_nodes": 193, "n_identifiers": 11, "random_cut": "def _configure_project(config, crm):\n \n config = copy.deepcopy(config)\n\n project_id = config[\"provider\"].get(\"project_id\")\n assert config[\"provider\"][\"project_id\"] is not None, (\n \"'project_id' must be set in the 'provider' section of the autoscaler\"\n \" config. Notice that the project id must be globally unique.\"\n )\n project = _get_project(project_id, crm)\n\n if project is None:\n # Project not found, try creating it\n _create_project(project_id, crm)\n project = _get_project(project_id, crm)\n\n assert proje" }, { "id": 252338, "commit_id": "cd4a74fae7cbd8119afc3900597f798ec1604db7", "repo": "mitmproxy", "path": "test/mitmproxy/addons/test_intercept.py", "file_name": "test_intercept.py", "fun_name": "test_udp", "commit_message": "Add support for raw UDP. (#5414)", "code": "async def test_udp():\n r = intercept.Intercept()\n with taddons.context(r) as tctx:\n tctx.configure(r, intercept=\"~udp\")\n f = tflow.tudpflow()\n await tctx.cycle(r, f)\n assert f.intercepted\n\n tctx.configure(r, intercept_active=False)\n f = tflow.tudpflow()\n await tctx.cycle(r, f)\n assert not f.intercepted\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 31, "vocab_size": 21, "complexity": 1, "nloc": 11, "token_counts": 82, "n_ast_nodes": 138, "n_identifiers": 14, "random_cut": "async def test_udp():\n r = intercept.Intercept()\n with taddons.context(r) as tctx:\n tctx.configure(r, intercept=\"~udp\")\n f = tflow.tudpflow()\n await tctx.cycle(r, f)\n assert f.intercepted\n\n tctx.configure(r, intercept_active=False)\n f = tflow" }, { "id": 243517, "commit_id": "69baeccf2ee7850ccfb9b2b05ab584b87ad50fe1", "repo": "Pillow", "path": "Tests/test_imagemath.py", "file_name": "test_imagemath.py", "fun_name": "pixel", "commit_message": "some pylint warnings\n\nFixed some pylint issues", "code": "def pixel(im):\n if hasattr(im, \"im\"):\n return f\"{im.mode} {repr(im.getpixel((0, 0)))}\"\n if isinstance(im, int):\n return int(im) # hack to deal with booleans\n print(im)\n\n\nA = Image.new(\"L\", (1, 1), 1)\nB = Image.new(\"L\", (1, 1), 2)\nZ = Image.new(\"L\", (1, 1), 0) # Z for zero\nF = Image.new(\"F\", (1, 1), 3)\nI = Image.new(\"I\", (1, 1), 4) # noqa: E741\n\nA2 = A.resize((2, 2))\nB2 = B.resize((2, 2))\n\nimages = {\"A\": A, \"B\": B, \"F\": F, \"I\": I}\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 93, "n_words": 76, "vocab_size": 53, "complexity": 3, "nloc": 6, "token_counts": 33, "n_ast_nodes": 277, "n_identifiers": 20, "random_cut": "def pixel(im):\n if hasattr(im, \"im\"):\n return f\"{im.mode} {repr(im.getpixel((0, 0)))}\"\n if isinstance(im, int):\n return int(im) # hack to deal with booleans\n print(im)\n\n\nA = Image.new(\"L\", (1, 1), 1)\nB = Image.new(\"L\", (1, 1), 2)\nZ = Image.new(\"L\", (1, 1), 0) # Z for zero\nF = Image.new(\"F\", (1, 1), 3)\nI = Image.new(\"I\", (1, 1), 4) # noqa: E741\n\nA2 = A.resize((2, 2))\nB2 = B.resize((2, 2))\n\nimages = {\"A\": A, \"B\": B, \"F\": F, \"I\": I" }, { "id": 252006, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/test_http.py", "file_name": "test_http.py", "fun_name": "test_get_multipart_form", "commit_message": "make it black!", "code": "def test_get_multipart_form(self):\n request = treq(content=b\"foobar\")\n assert not request.multipart_form\n\n request.headers[\"Content-Type\"] = \"multipart/form-data\"\n assert list(request.multipart_form.items()) == []\n\n with mock.patch(\"mitmproxy.net.http.multipart.decode\") as m:\n m.side_effect = ValueError\n assert list(request.multipart_form.items()) == []\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 82, "n_words": 26, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 70, "n_ast_nodes": 123, "n_identifiers": 14, "random_cut": "def test_get_multipart_form(self):\n " }, { "id": 90595, "commit_id": "a7f6111fcf15ef09696991027e473dc42520dadc", "repo": "sentry", "path": "tests/sentry/helpers/test_deprecation.py", "file_name": "test_deprecation.py", "fun_name": "head", "commit_message": "feat(api): Add initial skeleton for deprecation decorator (#34980)\n\n* feat(api): Add initial skeleton for deprecation decorator\r\n\r\nThe decorator will eventually handle all the overhead for a deprecated endpoint including\r\n* Metric tracking\r\n* Brownout\r\n* Deprecation Headers\r\n\r\nLater PRs will introduce crontab logic for brownout periods and unit tests\r\n\r\n* Added a header for suggested API and an exit for self hosted\r\n\r\n* Added the base for the deprecation test\r\n\r\n* Added unit tests, but am running into issues with 403s\r\n\r\n* tell pytest to ignore the endpoint\r\n\r\n* Remove test prefix\r\n\r\n* Got tests passing and handled time boundary corner case\r\n\r\n* typo", "code": "def head(self, request):\n return Response({\"ok\": True})\n\n\ndummy_endpoint = DummyEndpoint.as_view()\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 14, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 39, "n_identifiers": 7, "random_cut": "def head(self, request):\n return Re" }, { "id": 133510, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/workflow/workflow_storage.py", "file_name": "workflow_storage.py", "fun_name": "get_entrypoint_step_id", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_entrypoint_step_id(self) -> StepID:\n \n # empty StepID represents the workflow driver\n try:\n return asyncio_run(self._locate_output_step_id(\"\"))\n except Exception as e:\n raise ValueError(\n \"Fail to get entrypoint step ID from workflow\"\n f\"[id={self._workflow_id}]\"\n ) from e\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 123, "n_words": 32, "vocab_size": 31, "complexity": 2, "nloc": 13, "token_counts": 34, "n_ast_nodes": 71, "n_identifiers": 9, "random_cut": "def get_entrypoint_step_id(self) -> StepID:\n \n # empty StepID represents the workflow driver\n try:\n " }, { "id": 110070, "commit_id": "7d2503b422f98686bef42e7caebe025540ca6aaa", "repo": "matplotlib", "path": "lib/matplotlib/figure.py", "file_name": "figure.py", "fun_name": "tight_layout", "commit_message": "Emit \"axes not compatible with tight_layout\" in a single place.\n\n... instead of triplicating it on the caller side.", "code": "def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None):\n \n # note that here we do not permanently set the figures engine to\n # tight_layout but rather just perform the layout in place and remove\n # any previous engines.\n engine = TightLayoutEngine(pad=pad, h_pad=h_pad, w_pad=w_pad,\n rect=rect)\n try:\n self.set_layout_engine(engine)\n engine.execute(self)\n finally:\n self.set_layout_engine(None)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 163, "n_words": 47, "vocab_size": 43, "complexity": 3, "nloc": 14, "token_counts": 94, "n_ast_nodes": 105, "n_identifiers": 10, "random_cut": "def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None):\n \n # note that here we do not permanently set the figures engine to\n # tight_layout but rather just perform the layout in place and remove\n # any previous engines.\n engine = TightLayoutEngine(pad=pad, h_pad=h_pad, w_pad=w_pad" }, { "id": 35869, "commit_id": "d83d22f578276e9f201b0b3b0f8f9bd68e86c133", "repo": "transformers", "path": "tests/maskformer/test_modeling_maskformer.py", "file_name": "test_modeling_maskformer.py", "fun_name": "test_inference_instance_segmentation_head", "commit_message": "Maskformer (#15682)\n\n* maskformer\r\n\r\n* conflicts\r\n\r\n* conflicts\r\n\r\n* minor fixes\r\n\r\n* feature extractor test fix\r\n\r\nrefactor MaskFormerLoss following conversation\r\n\r\nMaskFormer related types should not trigger a module time import error\r\n\r\nmissed one\r\n\r\nremoved all the types that are not used\r\n\r\nupdate config mapping\r\n\r\nminor updates in the doc\r\n\r\nresolved conversation that doesn't need a discussion\r\n\r\nminor changes\r\n\r\nresolved conversations\r\n\r\nfixed DetrDecoder\r\n\r\n* minor changes\r\n\r\nminor changes\r\n\r\nfixed mdx file\r\n\r\ntest feature_extractor return types\r\n\r\nfunctional losses -> classes\r\n\r\nremoved the return type test for the feature extractor\r\n\r\nminor changes + style + quality\r\n\r\n* conflicts?\r\n\r\n* rebase master\r\n\r\n* readme\r\n\r\n* added missing files\r\n\r\n* deleded poolformers test that where in the wrong palce\r\n\r\n* CI\r\n\r\n* minor changes\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* resolved conversations\r\n\r\n* minor changes\r\n\r\n* conversations\r\n\r\n[Unispeech] Fix slow tests (#15818)\r\n\r\n* remove soundfile old way of loading audio\r\n\r\n* Adapt slow test\r\n\r\n[Barthez Tokenizer] Fix saving (#15815)\r\n\r\n[TFXLNet] Correct tf xlnet generate (#15822)\r\n\r\n* [TFXLNet] Correct tf xlnet\r\n\r\n* adapt test comment\r\n\r\nFix the push run (#15807)\r\n\r\nFix semantic segmentation pipeline test (#15826)\r\n\r\nFix dummy_inputs() to dummy_inputs in symbolic_trace doc (#15776)\r\n\r\nAdd model specific output classes to PoolFormer model docs (#15746)\r\n\r\n* Added model specific output classes to poolformer docs\r\n\r\n* Fixed Segformer typo in Poolformer docs\r\n\r\nAdding the option to return_timestamps on pure CTC ASR models. (#15792)\r\n\r\n* Adding the option to return_timestamps on pure CTC ASR models.\r\n\r\n* Remove `math.prod` which was introduced in Python 3.8\r\n\r\n* int are not floats.\r\n\r\n* Reworking the PR to support \"char\" vs \"word\" output.\r\n\r\n* Fixup!\r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Update src/transformers/pipelines/automatic_speech_recognition.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* Quality.\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\nHFTracer.trace should use/return self.graph to be compatible with torch.fx.Tracer (#15824)\r\n\r\nFix tf.concatenate + test past_key_values for TF models (#15774)\r\n\r\n* fix wrong method name tf.concatenate\r\n\r\n* add tests related to causal LM / decoder\r\n\r\n* make style and quality\r\n\r\n* clean-up\r\n\r\n* Fix TFBertModel's extended_attention_mask when past_key_values is provided\r\n\r\n* Fix tests\r\n\r\n* fix copies\r\n\r\n* More tf.int8 -> tf.int32 in TF test template\r\n\r\n* clean-up\r\n\r\n* Update TF test template\r\n\r\n* revert the previous commit + update the TF test template\r\n\r\n* Fix TF template extended_attention_mask when past_key_values is provided\r\n\r\n* Fix some styles manually\r\n\r\n* clean-up\r\n\r\n* Fix ValueError: too many values to unpack in the test\r\n\r\n* Fix more: too many values to unpack in the test\r\n\r\n* Add a comment for extended_attention_mask when there is past_key_values\r\n\r\n* Fix TFElectra extended_attention_mask when past_key_values is provided\r\n\r\n* Add tests to other TF models\r\n\r\n* Fix for TF Electra test: add prepare_config_and_inputs_for_decoder\r\n\r\n* Fix not passing training arg to lm_head in TFRobertaForCausalLM\r\n\r\n* Fix tests (with past) for TF Roberta\r\n\r\n* add testing for pask_key_values for TFElectra model\r\n\r\nCo-authored-by: ydshieh \r\n\r\n[examples/summarization and translation] fix readme (#15833)\r\n\r\nAdd ONNX Runtime quantization for text classification notebook (#15817)\r\n\r\nRe-enable doctests for the quicktour (#15828)\r\n\r\n* Re-enable doctests for the quicktour\r\n\r\n* Re-enable doctests for task_summary (#15830)\r\n\r\n* Remove &\r\n\r\nFramework split model report (#15825)\r\n\r\nAdd TFConvNextModel (#15750)\r\n\r\n* feat: initial implementation of convnext in tensorflow.\r\n\r\n* fix: sample code for the classification model.\r\n\r\n* chore: added checked for from the classification model.\r\n\r\n* chore: set bias initializer in the classification head.\r\n\r\n* chore: updated license terms.\r\n\r\n* chore: removed ununsed imports\r\n\r\n* feat: enabled argument during using drop_path.\r\n\r\n* chore: replaced tf.identity with layers.Activation(linear).\r\n\r\n* chore: edited default checkpoint.\r\n\r\n* fix: minor bugs in the initializations.\r\n\r\n* partial-fix: tf model errors for loading pretrained pt weights.\r\n\r\n* partial-fix: call method updated\r\n\r\n* partial-fix: cross loading of weights (4x3 variables to be matched)\r\n\r\n* chore: removed unneeded comment.\r\n\r\n* removed playground.py\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: renaming TFConvNextStage conv and layer norm layers\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* add: tests for convnext.\r\n\r\n* fix: integration tester class.\r\n\r\n* fix: issues mentioned in pr feedback (round 1).\r\n\r\n* fix: how output_hidden_states arg is propoagated inside the network.\r\n\r\n* feat: handling of arg for pure cnn models.\r\n\r\n* chore: added a note on equal contribution in model docs.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* feat: encapsulation for the convnext trunk.\r\n\r\n* Fix variable naming; Test-related corrections; Run make fixup\r\n\r\n* chore: added Joao as a contributor to convnext.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: corrected copyright year and added comment on NHWC.\r\n\r\n* chore: fixed the black version and ran formatting.\r\n\r\n* chore: ran make style.\r\n\r\n* chore: removed from_pt argument from test, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: tests in the convnext subclass, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: moved convnext test to the correct location\r\n\r\n* fix: locations for the test file of convnext.\r\n\r\n* fix: convnext tests.\r\n\r\n* chore: applied sgugger's suggestion for dealing w/ output_attentions.\r\n\r\n* chore: added comments.\r\n\r\n* chore: applied updated quality enviornment style.\r\n\r\n* chore: applied formatting with quality enviornment.\r\n\r\n* chore: revert to the previous tests/test_modeling_common.py.\r\n\r\n* chore: revert to the original test_modeling_common.py\r\n\r\n* chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py\r\n\r\n* fix: tests for convnext.\r\n\r\n* chore: removed output_attentions argument from convnext config.\r\n\r\n* chore: revert to the earlier tf utils.\r\n\r\n* fix: output shapes of the hidden states\r\n\r\n* chore: removed unnecessary comment\r\n\r\n* chore: reverting to the right test_modeling_tf_common.py.\r\n\r\n* Styling nits\r\n\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger \r\n\r\n* minor changes\r\n\r\n* doc fix in feature extractor\r\n\r\n* doc\r\n\r\n* typose\r\n\r\n* removed detr logic from config\r\n\r\n* removed detr logic from config\r\n\r\n* removed num_labels\r\n\r\n* small fix in the config\r\n\r\n* auxilary -> auxiliary\r\n\r\n* make style\r\n\r\n* some test is failing\r\n\r\n* fix a weird char in config prevending doc-builder\r\n\r\n* retry to fix the doc-builder issue\r\n\r\n* make style\r\n\r\n* new try to fix the doc builder\r\n\r\n* CI\r\n\r\n* change weights to facebook\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger ", "code": "def test_inference_instance_segmentation_head(self):\n model = MaskFormerForInstanceSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval()\n feature_extractor = self.default_feature_extractor\n image = prepare_img()\n inputs = feature_extractor(image, return_tensors=\"pt\").to(torch_device)\n inputs_shape = inputs[\"pixel_values\"].shape\n # check size is divisible by 32\n self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)\n # check size\n self.assertEqual(inputs_shape, (1, 3, 800, 1088))\n\n with torch.no_grad():\n outputs = model(**inputs)\n # masks_queries_logits\n masks_queries_logits = outputs.masks_queries_logits\n self.assertEqual(\n masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)\n )\n expected_slice = torch.tensor(\n [[-1.3738, -1.7725, -1.9365], [-1.5978, -1.9869, -2.1524], [-1.5796, -1.9271, -2.0940]]\n )\n self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE))\n # class_queries_logits\n class_queries_logits = outputs.class_queries_logits\n self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1))\n expected_slice = torch.tensor(\n [\n [1.6512e00, -5.2572e00, -3.3519e00],\n [3.6169e-02, -5.9025e00, -2.9313e00],\n [1.0766e-04, -7.7630e00, -5.1263e00],\n ]\n )\n self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE))\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 375, "n_words": 115, "vocab_size": 84, "complexity": 2, "nloc": 28, "token_counts": 351, "n_ast_nodes": 483, "n_identifiers": 32, "random_cut": "def test_inference_instance_segmentation_head(self):\n mo" }, { "id": 45131, "commit_id": "4e17528fd2ed69020c3cdc6672e3093254f1477f", "repo": "airflow", "path": "chart/tests/test_migrate_database_job.py", "file_name": "test_migrate_database_job.py", "fun_name": "test_default_command_and_args_airflow_version", "commit_message": "Add support for custom command and args in jobs (#20864)", "code": "def test_default_command_and_args_airflow_version(self, airflow_version, expected_arg):\n docs = render_chart(\n values={\n \"airflowVersion\": airflow_version,\n },\n show_only=[\"templates/jobs/migrate-database-job.yaml\"],\n )\n\n assert jmespath.search(\"spec.template.spec.containers[0].command\", docs[0]) is None\n assert [\n \"bash\",\n \"-c\",\n f\"exec \\\\\\n{expected_arg}\",\n ] == jmespath.search(\"spec.template.spec.containers[0].args\", docs[0])\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 143, "n_words": 28, "vocab_size": 25, "complexity": 1, "nloc": 13, "token_counts": 65, "n_ast_nodes": 110, "n_identifiers": 10, "random_cut": "def test_default_command_and_args_airflow_version(self, airflow_version, expected_arg):\n docs = render_chart(\n values={\n \"airflowVersion\": airflow_version,\n },\n show_only=[\"templates/jobs/" }, { "id": 60532, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py", "file_name": "parser.py", "fun_name": "error", "commit_message": "upd; format", "code": "def error(self, msg):\n # type: (str) -> None\n self.print_usage(sys.stderr)\n self.exit(UNKNOWN_ERROR, f\"{msg}\\n\")\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 44, "n_identifiers": 8, "random_cut": "def error(self, msg):\n # type: (str) -> None\n " }, { "id": 73384, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/redirects/tests/test_redirects.py", "file_name": "test_redirects.py", "fun_name": "test_edit_duplicate", "commit_message": "Reformat with black", "code": "def test_edit_duplicate(self):\n models.Redirect.objects.create(\n old_path=\"/othertest\", site=None, redirect_link=\"http://elsewhere.com/\"\n )\n response = self.post(\n {\n \"old_path\": \"/othertest\",\n \"is_permanent\": \"on\",\n \"site\": \"\",\n \"redirect_link\": \"http://www.test.com/ive-been-edited\",\n }\n )\n\n # Should not redirect to index\n self.assertEqual(response.status_code, 200)\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 163, "n_words": 29, "vocab_size": 28, "complexity": 1, "nloc": 13, "token_counts": 60, "n_ast_nodes": 109, "n_identifiers": 13, "random_cut": "def test_edit_duplicate(self):\n models.Redirect.objects.create(\n old_path=\"/othertest\", site=None, redirect_link=\"h" }, { "id": 162503, "commit_id": "c5332d7fbb654a7127aeb080b91f8e85b48796b4", "repo": "yt-dlp", "path": "yt_dlp/extractor/instagram.py", "file_name": "instagram.py", "fun_name": "_extract_product_media", "commit_message": "[instagram] Fix bug in 013322a95e396ab21c8febc3e560d8a177c87f4a\n\nCloses #2552", "code": "def _extract_product_media(self, product_media):\n media_id = product_media.get('code') or product_media.get('id')\n vcodec = product_media.get('video_codec')\n dash_manifest_raw = product_media.get('video_dash_manifest')\n videos_list = product_media.get('video_versions')\n if not (dash_manifest_raw or videos_list):\n return {}\n\n formats = [{\n 'format_id': format.get('id'),\n 'url': format.get('url'),\n 'width': format.get('width'),\n 'height': format.get('height'),\n 'vcodec': vcodec,\n } for format in videos_list or []]\n if dash_manifest_raw:\n formats.extend(self._parse_mpd_formats(self._parse_xml(dash_manifest_raw, media_id), mpd_id='dash'))\n self._sort_formats(formats)\n\n thumbnails = [{\n 'url': thumbnail.get('url'),\n 'width': thumbnail.get('width'),\n 'height': thumbnail.get('height')\n } for thumbnail in traverse_obj(product_media, ('image_versions2', 'candidates')) or []]\n return {\n 'id': media_id,\n 'duration': float_or_none(product_media.get('video_duration')),\n 'formats': formats,\n 'thumbnails': thumbnails\n }\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 323, "n_words": 79, "vocab_size": 58, "complexity": 9, "nloc": 28, "token_counts": 215, "n_ast_nodes": 380, "n_identifiers": 19, "random_cut": "def _extract_product_media(self, product_media):\n media_id = product_media.get('code') or product_media.get('id')\n vcodec = product_media.get('video_codec')\n dash_manifest_raw = product_media.get('video_dash_manifest')\n videos_list = product_media.get('video_versions')\n " }, { "id": 76935, "commit_id": "e777c22d70191382668efeb04981b4b4acb29905", "repo": "wagtail", "path": "wagtail/contrib/forms/views.py", "file_name": "views.py", "fun_name": "get", "commit_message": "Initialize locale attribute in SafePaginateListView.__init__", "code": "def get(self, request, *args, **kwargs):\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n if request.GET.get(\"locale\"):\n self.locale = get_object_or_404(\n Locale, language_code=request.GET[\"locale\"]\n )\n else:\n self.locale = Locale.get_default()\n\n return super().get(request, *args, **kwargs)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 124, "n_words": 25, "vocab_size": 21, "complexity": 3, "nloc": 9, "token_counts": 76, "n_ast_nodes": 122, "n_identifiers": 14, "random_cut": "def get(self, request, *args, **kwargs):\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n if request.GET.get(\"locale\"):\n self.locale = get_object_or_404(\n Locale, language_code=request.GET[\"locale\"]\n )\n else:\n self.locale = Locale.get_default()\n\n r" }, { "id": 58128, "commit_id": "076fb7d6472874eeec670239590c78a763e0f72d", "repo": "prefect", "path": "src/prefect/orion/schemas/schedules.py", "file_name": "schedules.py", "fun_name": "default_timezone", "commit_message": "Ensure that UTC offsets dont get parsed as timezones (PrefectHQ/orion#2551)", "code": "def default_timezone(cls, v, *, values, **kwargs):\n # if was provided, make sure its a valid IANA string\n if v and v not in pendulum.tz.timezones:\n raise ValueError(f'Invalid timezone: \"{v}\"')\n\n # otherwise infer the timezone from the anchor date\n elif v is None and values.get(\"anchor_date\"):\n tz = values[\"anchor_date\"].tz.name\n if tz in pendulum.tz.timezones:\n return tz\n # sometimes anchor dates have \"timezones\" that are UTC offsets\n # like \"-04:00\". This happens when parsing ISO8601 strings.\n # In this case we, the correct inferred localization is \"UTC\".\n else:\n return \"UTC\"\n\n return v\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 228, "n_words": 87, "vocab_size": 67, "complexity": 6, "nloc": 10, "token_counts": 71, "n_ast_nodes": 124, "n_identifiers": 11, "random_cut": "def default_timezone(cls, v, *, values, **kwargs):\n # if was provided, make sure its a valid IANA string\n if v and v not in pendulum.tz.timezones:\n raise ValueError(f'Invalid timezone: \"{v}\"')\n\n # otherwise infer the timezone from the anchor date\n elif v is None and values.get(\"anchor_date\"):\n tz = values[\"anchor_date\"].tz.name\n if tz in pendulum.tz.timezones:\n return tz\n # sometimes anchor dates have \"timezones\" that" }, { "id": 51475, "commit_id": "94949b0e9120b4bca5888a4d19ff9759a05dd54f", "repo": "PaddleHub", "path": "modules/image/classification/mobilenet_v2_dishes/test.py", "file_name": "test.py", "fun_name": "test_classification2", "commit_message": "update mobilenet_v2_dishes (#2018)", "code": "def test_classification2(self):\n results = self.module.classification(\n images=[cv2.imread('tests/test.jpg')]\n )\n data = results[0]\n self.assertTrue('海鲜面' in data)\n self.assertTrue(data['海鲜面'] > 0.01)\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 61, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 84, "n_identifiers": 10, "random_cut": "def test_classification2(self):\n " }, { "id": 296766, "commit_id": "0e0c0ce22b0a87e0c709abab2091dc5bfddb42bb", "repo": "core", "path": "homeassistant/components/google/calendar.py", "file_name": "calendar.py", "fun_name": "async_update", "commit_message": "Move google calendar integration to aiohttp (#70173)\n\n* Use new aiohttp based google client library in gcal_sync.\r\n\r\n* Use base url in tests for shorter string\r\n\r\n* Remove unnecessary line of code\r\n\r\n* Jump to gcal-sync-0.4.1\r\n\r\n* Update tests/components/google/conftest.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Update to gcal_sync 0.5.0 incorporating PR feedback\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "async def async_update(self) -> None:\n \n request = ListEventsRequest(calendar_id=self._calendar_id, search=self._search)\n try:\n result = await self._calendar_service.async_list_events(request)\n except ApiException as err:\n _LOGGER.error(\"Unable to connect to Google: %s\", err)\n return\n\n # Pick the first visible event and apply offset calculations.\n valid_items = filter(self._event_filter, result.items)\n event = copy.deepcopy(next(valid_items, None))\n if event:\n (event.summary, offset) = extract_offset(event.summary, self._offset)\n self._event = _get_calendar_event(event)\n self._offset_value = offset\n else:\n self._event = None\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 201, "n_words": 61, "vocab_size": 50, "complexity": 3, "nloc": 16, "token_counts": 117, "n_ast_nodes": 191, "n_identifiers": 30, "random_cut": "async def async_update(self) -> None:\n \n request = ListEventsRequest(calendar_id=self._calendar_id, search=self._search)\n try:\n result = await self._calendar_service.async_list_events(request)\n except ApiException as err:\n _LOGGER.error(\"Unable to connect to Google: %s\", err)\n return\n\n # Pick the first visible event and apply offset calculations.\n valid_items = filter(self._event_filter, result.items)\n event " }, { "id": 53339, "commit_id": "be671cbecee46c621dc08ed47bb520f795b34a42", "repo": "prefect", "path": "src/prefect/flow_runners.py", "file_name": "flow_runners.py", "fun_name": "_get_environment_variables", "commit_message": "Kubernetes flow runner (PrefectHQ/orion#780)\n\nAdd a Kubernetes flow runner", "code": "def _get_environment_variables(self):\n env = self.env.copy()\n env.setdefault(\"PREFECT_ORION_HOST\", \"http://orion:4200/api\")\n return env\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 24, "n_ast_nodes": 43, "n_identifiers": 5, "random_cut": "def _get_environment_variables(self):\n env = self.env.copy()\n env.setdefault(\"PREFECT_ORION_HOST\", \"http://orion:4" }, { "id": 194167, "commit_id": "65769ab7662263a032a14c77e7b0890abb7c3001", "repo": "vision", "path": "test/prototype_transforms_kernel_infos.py", "file_name": "prototype_transforms_kernel_infos.py", "fun_name": "reference_inputs_convert_color_space_image_tensor", "commit_message": "fix prototype transforms tests with set agg_method (#6934)\n\n* fix prototype transforms tests with set agg_method\r\n\r\n* use individual tolerances\r\n\r\n* refactor PIL reference test\r\n\r\n* increase tolerance for elastic_mask\r\n\r\n* fix autocontrast tolerances\r\n\r\n* increase tolerance for RandomAutocontrast", "code": "def reference_inputs_convert_color_space_image_tensor():\n for args_kwargs in sample_inputs_convert_color_space_image_tensor():\n (image_loader, *other_args), kwargs = args_kwargs\n if len(image_loader.shape) == 3 and image_loader.dtype == torch.uint8:\n yield args_kwargs\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 48, "n_words": 21, "vocab_size": 18, "complexity": 4, "nloc": 5, "token_counts": 41, "n_ast_nodes": 66, "n_identifiers": 11, "random_cut": "def reference_inputs_convert_color_space_image_tensor():\n for args_kwargs in sample_inputs_convert_color_space_image_tensor():\n (image_loader, *other_args), kwargs = args_kwargs\n if le" }, { "id": 111478, "commit_id": "5afa98aabfc18a23f19b07b13e2cd12ddb6ee009", "repo": "spaCy", "path": "spacy/tests/doc/test_json_doc_conversion.py", "file_name": "test_json_doc_conversion.py", "fun_name": "test_doc_to_json_with_token_attributes_missing", "commit_message": "Support custom attributes for tokens and spans in json conversion (#11125)\n\n* Add token and span custom attributes to to_json()\r\n\r\n* Change logic for to_json\r\n\r\n* Add functionality to from_json\r\n\r\n* Small adjustments\r\n\r\n* Move token/span attributes to new dict key\r\n\r\n* Fix test\r\n\r\n* Fix the same test but much better\r\n\r\n* Add backwards compatibility tests and adjust logic\r\n\r\n* Add test to check if attributes not set in underscore are not saved in the json\r\n\r\n* Add tests for json compatibility\r\n\r\n* Adjust test names\r\n\r\n* Fix tests and clean up code\r\n\r\n* Fix assert json tests\r\n\r\n* small adjustment\r\n\r\n* adjust naming and code readability\r\n\r\n* Adjust naming, added more tests and changed logic\r\n\r\n* Fix typo\r\n\r\n* Adjust errors, naming, and small test optimization\r\n\r\n* Fix byte tests\r\n\r\n* Fix bytes tests\r\n\r\n* Change naming and json structure\r\n\r\n* update schema\r\n\r\n* Update spacy/schemas.py\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Update spacy/tokens/doc.pyx\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Update spacy/tokens/doc.pyx\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Update spacy/schemas.py\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Update schema for underscore attributes\r\n\r\n* Adjust underscore schema\r\n\r\n* adjust schema tests\r\n\r\nCo-authored-by: Adriane Boyd ", "code": "def test_doc_to_json_with_token_attributes_missing(doc):\n Token.set_extension(\"token_test\", default=False)\n Span.set_extension(\"span_test\", default=False)\n\n doc[0:1]._.span_test = \"span_attribute\"\n doc[0]._.token_test = 117\n json_doc = doc.to_json(underscore=[\"span_test\"])\n\n assert \"underscore_token\" in json_doc\n assert \"underscore_span\" in json_doc\n assert json_doc[\"underscore_span\"][\"span_test\"][\"value\"] == \"span_attribute\"\n assert \"token_test\" not in json_doc[\"underscore_token\"]\n assert len(schemas.validate(schemas.DocJSONSchema, json_doc)) == 0\n\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 37, "vocab_size": 24, "complexity": 1, "nloc": 11, "token_counts": 104, "n_ast_nodes": 180, "n_identifiers": 16, "random_cut": "def test_doc_to_json_with_token_attributes_missing(doc):\n Token.set_extension(\"token_test\", default=False)\n Span.set_extension(\"span_test\", default=False)\n\n doc[0:1]._.span_test = \"span_attribute\"\n doc[0]._.token_test = 117\n json_doc = doc.to_json(underscore=[\"span_test\"])\n\n assert \"underscore_token\" in json_doc\n assert \"underscore_span\" in json_doc\n assert json_doc[\"underscore_span\"][\"span_test\"][\"value" }, { "id": 256681, "commit_id": "e20f2e0d541805c3afb1f0948fa85f88b2a4f434", "repo": "haystack", "path": "test/test_pipeline.py", "file_name": "test_pipeline.py", "fun_name": "test_PipelineCodeGen_dual_retriever_pipeline", "commit_message": "Generate code from pipeline (pipeline.to_code()) (#2214)\n\n* pipeline.to_code() with jupyter support\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add imports\r\n\r\n* refactoring\r\n\r\n* Update Documentation & Code Style\r\n\r\n* docstrings added and refactoring\r\n\r\n* Update Documentation & Code Style\r\n\r\n* improve imports code generation\r\n\r\n* add comment param\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add simple test\r\n\r\n* add to_notebook_cell()\r\n\r\n* Update Documentation & Code Style\r\n\r\n* introduce helper classes for code gen and eval report gen\r\n\r\n* add more tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix Dict typings\r\n\r\n* Update Documentation & Code Style\r\n\r\n* validate user input before code gen\r\n\r\n* enable urls for to_code()\r\n\r\n* Update Documentation & Code Style\r\n\r\n* remove all chars except colon from validation regex\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_PipelineCodeGen_dual_retriever_pipeline():\n es_doc_store = ElasticsearchDocumentStore(index=\"my-index\")\n es_retriever = ElasticsearchRetriever(document_store=es_doc_store, top_k=20)\n dense_doc_store = InMemoryDocumentStore(index=\"my-index\")\n emb_retriever = EmbeddingRetriever(\n document_store=dense_doc_store, embedding_model=\"sentence-transformers/all-MiniLM-L6-v2\"\n )\n p_ensemble = Pipeline()\n p_ensemble.add_node(component=es_retriever, name=\"EsRetriever\", inputs=[\"Query\"])\n p_ensemble.add_node(component=emb_retriever, name=\"EmbeddingRetriever\", inputs=[\"Query\"])\n p_ensemble.add_node(\n component=JoinDocuments(join_mode=\"merge\"), name=\"JoinResults\", inputs=[\"EsRetriever\", \"EmbeddingRetriever\"]\n )\n\n code = _PipelineCodeGen.generate_code(pipeline=p_ensemble, pipeline_variable_name=\"p\", generate_imports=False)\n assert code == (\n 'elasticsearch_document_store = ElasticsearchDocumentStore(index=\"my-index\")\\n'\n \"es_retriever = ElasticsearchRetriever(document_store=elasticsearch_document_store, top_k=20)\\n\"\n 'in_memory_document_store = InMemoryDocumentStore(index=\"my-index\")\\n'\n 'embedding_retriever = EmbeddingRetriever(document_store=in_memory_document_store, embedding_model=\"sentence-transformers/all-MiniLM-L6-v2\")\\n'\n 'join_results = JoinDocuments(join_mode=\"merge\")\\n'\n \"\\n\"\n \"p = Pipeline()\\n\"\n 'p.add_node(component=es_retriever, name=\"EsRetriever\", inputs=[\"Query\"])\\n'\n 'p.add_node(component=embedding_retriever, name=\"EmbeddingRetriever\", inputs=[\"Query\"])\\n'\n 'p.add_node(component=join_results, name=\"JoinResults\", inputs=[\"EsRetriever\", \"EmbeddingRetriever\"])'\n )\n\n\n@pytest.mark.elasticsearch", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.elasticsearch", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 196, "n_words": 75, "vocab_size": 55, "complexity": 1, "nloc": 26, "token_counts": 143, "n_ast_nodes": 275, "n_identifiers": 30, "random_cut": "def test_PipelineCodeGen_dual_retriever_pipeline():\n es_doc_store = ElasticsearchDocumentStore(index=\"my-index\")\n es_retriever = ElasticsearchRetriever(document_store=es_doc_store, top_k=20)\n dense_doc_store = InMemoryDocumentStore(index=\"my-index\")\n emb_retriever = EmbeddingRetriever(\n document_store=dense_doc_store, embedding_model=\"sentence-transformers/all-MiniLM-L6-v2\"\n )\n p_ensemble = Pipeline()\n p_ensemble.add_node(component=es_retriever, name=\"EsRetriever\", inputs=[\"Query\"])\n p_ensemble.add_node(component=emb_retriever, name=\"EmbeddingRetriever\", inputs=[\"Query\"])\n p_ensemble.add_node(\n component=JoinDocuments(join_mode=\"merge\"), name=\"JoinResults\", inputs=[\"EsRetriever\", \"EmbeddingRetriever\"]\n )\n\n code = _PipelineCodeGen.generate_code(pipeline=p_ensemble, pipeline_variable_name=\"p\", generate_imports=False)\n assert code == (\n 'elasticsearch_document_store = ElasticsearchDocumentStore(index=\"my-index\")\\n'\n \"es_retriever = ElasticsearchRetriever(document_store=elasticsearch_document_store, top_k=20)\\n\"\n 'in_memory_document_store = InMemoryDocumentStore(index=\"my-index\")\\n'\n 'embedding_retriever = EmbeddingRetriever(document_store=in_memory_document_store, embedding_model=\"sentence-transformers/all-MiniLM-L6-v2\")\\n'\n 'join_results = JoinDocuments(join_mode=\"merge\")\\n'\n \"\\n\"\n \"p = Pipeline()\\n\"\n 'p.add_node(component=es_retriever, name=\"EsRetriever\", inputs=[\"Query\"])\\n'\n 'p.add_node(component=embedding_retriever, name=\"EmbeddingRetriever\", inputs=[\"Query\"])\\n'\n 'p.add_node(component=join_results, name=\"JoinResults\", inputs=[\"EsRetriever" }, { "id": 114674, "commit_id": "2fa2805fd6bbcf3819e8da4f3aba3d4618db082f", "repo": "mindsdb", "path": "mindsdb/interfaces/stream/stream.py", "file_name": "stream.py", "fun_name": "setup", "commit_message": "fix", "code": "def setup(self, db_alias):\n try:\n integration = self.integration_controller.get(db_alias)\n if integration is None:\n raise Exception(f'Unkonw database integration: {db_alias}')\n if integration.get('type') not in self.known_dbs:\n raise Exception(f'Unkonw database integration type for: {db_alias}')\n self.known_dbs[integration['type']](self.config, db_alias, integration).setup()\n except Exception as e:\n logger.warning('Failed to setup stream for ' + db_alias + f', error: {e}')\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 145, "n_words": 47, "vocab_size": 39, "complexity": 4, "nloc": 10, "token_counts": 87, "n_ast_nodes": 156, "n_identifiers": 12, "random_cut": "def setup(self, db_alias):\n try:\n integration = self.integration_co" }, { "id": 13439, "commit_id": "f738d34bfc75437c7203f5746cc602145088d220", "repo": "jina", "path": "jina/serve/instrumentation/__init__.py", "file_name": "__init__.py", "fun_name": "_new_timer", "commit_message": "fix: health check exception with opentelemetry tracing interceptors (#5392)", "code": "def _new_timer(self):\n return self.__class__(\n self._summary_metric, self._histogram, self._histogram_metric_labels\n )\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 22, "n_ast_nodes": 33, "n_identifiers": 6, "random_cut": "def _new_timer(self):\n return self.__class__(\n self._summary_metric, self._histogram, self._histo" }, { "id": 130364, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/aliyun/utils.py", "file_name": "utils.py", "fun_name": "describe_vpcs", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def describe_vpcs(self):\n \n request = DescribeVpcsRequest()\n response = self._send_request(request)\n if response is not None:\n return response.get(\"Vpcs\").get(\"Vpc\")\n return None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 63, "n_words": 17, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 39, "n_ast_nodes": 70, "n_identifiers": 7, "random_cut": "def describe_vpcs(self):\n \n request " }, { "id": 191036, "commit_id": "301124c5b377fa56b940d298900dbc5816dbc24e", "repo": "thumbor", "path": "tests/test_signal_handler.py", "file_name": "test_signal_handler.py", "fun_name": "test_signal_handler_calls_add_callback_from_signal", "commit_message": "Reformat to 80 chars and mypy.ini", "code": "def test_signal_handler_calls_add_callback_from_signal(self, ioloop_mock):\n ioloop_instance_mock = mock.Mock()\n ioloop_mock.return_value = ioloop_instance_mock\n\n signal_handler(mock.Mock(), mock.Mock(), signal.SIGTERM, mock.Mock())\n\n ioloop_instance_mock.add_callback_from_signal.assert_called_with(\n mock.ANY\n )\n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 61, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 53, "n_ast_nodes": 84, "n_identifiers": 13, "random_cut": "def test_signal_handler_calls_add_callback_from_signal(self, ioloop_mock):\n ioloop_instance_mo" }, { "id": 149003, "commit_id": "6cd01c45d5e57d357a6b1c3495ec035e0610fd78", "repo": "freqtrade", "path": "freqtrade/exchange/exchange.py", "file_name": "exchange.py", "fun_name": "get_market_leverage_tiers", "commit_message": "exchange.get_leverage_tiers and exchange.get_market_leverage_tiers", "code": "def get_market_leverage_tiers(self, symbol) -> List[Dict]:\n try:\n return self._api.fetch_market_leverage_tiers(symbol)\n except ccxt.DDoSProtection as e:\n raise DDosProtection(e) from e\n except (ccxt.NetworkError, ccxt.ExchangeError) as e:\n raise TemporaryError(\n f'Could not load leverage tiers for {symbol}'\n f' due to {e.__class__.__name__}. Message: {e}'\n ) from e\n except ccxt.BaseError as e:\n raise OperationalException(e) from e\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 159, "n_words": 47, "vocab_size": 35, "complexity": 4, "nloc": 12, "token_counts": 74, "n_ast_nodes": 134, "n_identifiers": 18, "random_cut": "def get_market_leverage_tiers(self, symbol) -> List[Dict]:\n try:\n return self._api.fetch_market_leverage_tiers(symbol)\n except ccxt.DDoSProtection as e:\n raise DDosProtection(e) from e\n except (ccxt.NetworkError, ccxt.ExchangeError) as e:\n raise TemporaryError(\n f'Could not load leverage tiers for {symbol}'\n f' due to {e.__class__.__name_" }, { "id": 105547, "commit_id": "7380140accf522a4363bb56c0b77a4190f49bed6", "repo": "datasets", "path": "tests/packaged_modules/test_audiofolder.py", "file_name": "test_audiofolder.py", "fun_name": "test_data_files_with_metadata_and_multiple_splits", "commit_message": "Add support for CSV metadata files to ImageFolder (#4837)\n\n* Add support for CSV metadata files to ImageFolder\r\n\r\n* Add tests\r\n\r\n* Update doc\r\n\r\n* Add one more test\r\n\r\n* Add identical tests for audiofolder\r\n\r\n* Docs for audiofolder\r\n\r\n* Address review comments\r\n\r\n* Minor adjustments", "code": "def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata):\n data_files = data_files_with_two_splits_and_metadata\n audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir)\n audiofolder.download_and_prepare()\n datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()\n for split, data_files in data_files.items():\n expected_num_of_audios = len(data_files) - 1 # don't count the metadata file\n assert split in datasets\n dataset = list(datasets[split])\n assert len(dataset) == expected_num_of_audios\n # make sure each sample has its own audio and metadata\n assert len(set(example[\"audio\"][\"path\"] for example in dataset)) == expected_num_of_audios\n assert len(set(example[\"text\"] for example in dataset)) == expected_num_of_audios\n assert all(example[\"text\"] is not None for example in dataset)\n\n\n@require_sndfile\n@pytest.mark.parametrize(\"streaming\", [False, True])", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "@require_sndfile\n@pytest.mark.parametrize(\"streaming\", [False, True])", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 155, "n_words": 86, "vocab_size": 59, "complexity": 6, "nloc": 13, "token_counts": 135, "n_ast_nodes": 242, "n_identifiers": 24, "random_cut": "def test_data_files_with_metadata_and_multiple_splits(streaming, cache_dir, data_files_with_two_splits_and_metadata):\n data_files = data_files_with_two_splits_and_metadata\n audiofolder = AudioFolder(data_files=data_files, cache_dir=cache_dir)\n audiofolder.download_and_prepare()\n datasets = audiofolder.as_streaming_dataset() if streaming else audiofolder.as_dataset()\n for split, data_files in data_files.items():\n expected_num_of_audio" }, { "id": 166838, "commit_id": "7e23a37e1c5bda81234801a6584563e2880769eb", "repo": "pandas", "path": "pandas/tests/scalar/interval/test_interval.py", "file_name": "test_interval.py", "fun_name": "test_is_empty", "commit_message": "ENH: consistency of input args for boundaries - Interval (#46522)", "code": "def test_is_empty(self, left, right, closed):\n # GH27219\n # non-empty always return False\n iv = Interval(left, right, closed)\n assert iv.is_empty is False\n\n # same endpoint is empty except when inclusive='both' (contains one point)\n iv = Interval(left, left, closed)\n result = iv.is_empty\n expected = closed != \"both\"\n assert result is expected\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 111, "n_words": 49, "vocab_size": 32, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 79, "n_identifiers": 10, "random_cut": "def test_is_empty(self, left, right, closed):\n # GH27219\n # non-empty always return False\n iv = Interval(left, right, closed)\n assert iv.is_empty is False\n\n # same endpoint is empty except when inclusive='both' (contains one point)\n iv = Interval(left, left, closed)\n result = iv.is_empty\n expected = closed != \"both\"\n assert" }, { "id": 293521, "commit_id": "0d8f649bd65c8c54cd3503dd75485d3ec35d6076", "repo": "core", "path": "tests/components/websocket_api/test_commands.py", "file_name": "test_commands.py", "fun_name": "test_subscribe_unsubscribe_entities", "commit_message": "Websocket api to subscribe to entities (payloads reduced by ~80%+ vs state_changed events) (#67891)", "code": "async def test_subscribe_unsubscribe_entities(hass, websocket_client, hass_admin_user):\n \n\n hass.states.async_set(\"light.permitted\", \"off\", {\"color\": \"red\"})\n original_state = hass.states.get(\"light.permitted\")\n assert isinstance(original_state, State)\n state_dict = {\n \"attributes\": dict(original_state.attributes),\n \"context\": dict(original_state.context.as_dict()),\n \"entity_id\": original_state.entity_id,\n \"last_changed\": original_state.last_changed.isoformat(),\n \"last_updated\": original_state.last_updated.isoformat(),\n \"state\": original_state.state,\n }\n hass_admin_user.groups = []\n hass_admin_user.mock_policy({\"entities\": {\"entity_ids\": {\"light.permitted\": True}}})\n\n await websocket_client.send_json({\"id\": 7, \"type\": \"subscribe_entities\"})\n\n msg = await websocket_client.receive_json()\n assert msg[\"id\"] == 7\n assert msg[\"type\"] == const.TYPE_RESULT\n assert msg[\"success\"]\n\n msg = await websocket_client.receive_json()\n assert msg[\"id\"] == 7\n assert msg[\"type\"] == \"event\"\n assert isinstance(msg[\"event\"][\"a\"][\"light.permitted\"][\"c\"], str)\n assert msg[\"event\"] == {\n \"a\": {\n \"light.permitted\": {\n \"a\": {\"color\": \"red\"},\n \"c\": ANY,\n \"lc\": ANY,\n \"s\": \"off\",\n }\n }\n }\n hass.states.async_set(\"light.not_permitted\", \"on\")\n hass.states.async_set(\"light.permitted\", \"on\", {\"color\": \"blue\"})\n hass.states.async_set(\"light.permitted\", \"on\", {\"effect\": \"help\"})\n hass.states.async_set(\n \"light.permitted\", \"on\", {\"effect\": \"help\", \"color\": [\"blue\", \"green\"]}\n )\n hass.states.async_remove(\"light.permitted\")\n hass.states.async_set(\"light.permitted\", \"on\", {\"effect\": \"help\", \"color\": \"blue\"})\n\n msg = await websocket_client.receive_json()\n assert msg[\"id\"] == 7\n assert msg[\"type\"] == \"event\"\n assert msg[\"event\"] == {\n \"c\": {\n \"light.permitted\": {\n \"+\": {\n \"a\": {\"color\": \"blue\"},\n \"c\": ANY,\n \"lc\": ANY,\n \"s\": \"on\",\n }\n }\n }\n }\n\n change_set = msg[\"event\"][\"c\"][\"light.permitted\"]\n additions = deepcopy(change_set[\"+\"])\n _apply_entities_changes(state_dict, change_set)\n assert state_dict == {\n \"attributes\": {\"color\": \"blue\"},\n \"context\": {\n \"id\": additions[\"c\"],\n \"parent_id\": None,\n \"user_id\": None,\n },\n \"entity_id\": \"light.permitted\",\n \"last_changed\": additions[\"lc\"],\n \"last_updated\": additions[\"lc\"],\n \"state\": \"on\",\n }\n\n msg = await websocket_client.receive_json()\n assert msg[\"id\"] == 7\n assert msg[\"type\"] == \"event\"\n assert msg[\"event\"] == {\n \"c\": {\n \"light.permitted\": {\n \"+\": {\n \"a\": {\"effect\": \"help\"},\n \"c\": ANY,\n \"lu\": ANY,\n },\n \"-\": {\"a\": [\"color\"]},\n }\n }\n }\n\n change_set = msg[\"event\"][\"c\"][\"light.permitted\"]\n additions = deepcopy(change_set[\"+\"])\n _apply_entities_changes(state_dict, change_set)\n\n assert state_dict == {\n \"attributes\": {\"effect\": \"help\"},\n \"context\": {\n \"id\": additions[\"c\"],\n \"parent_id\": None,\n \"user_id\": None,\n },\n \"entity_id\": \"light.permitted\",\n \"last_changed\": ANY,\n \"last_updated\": additions[\"lu\"],\n \"state\": \"on\",\n }\n\n msg = await websocket_client.receive_json()\n assert msg[\"id\"] == 7\n assert msg[\"type\"] == \"event\"\n assert msg[\"event\"] == {\n \"c\": {\n \"light.permitted\": {\n \"+\": {\n \"a\": {\"color\": [\"blue\", \"green\"]},\n \"c\": ANY,\n \"lu\": ANY,\n }\n }\n }\n }\n\n change_set = msg[\"event\"][\"c\"][\"light.permitted\"]\n additions = deepcopy(change_set[\"+\"])\n _apply_entities_changes(state_dict, change_set)\n\n assert state_dict == {\n \"attributes\": {\"effect\": \"help\", \"color\": [\"blue\", \"green\"]},\n \"context\": {\n \"id\": additions[\"c\"],\n \"parent_id\": None,\n \"user_id\": None,\n },\n \"entity_id\": \"light.permitted\",\n \"last_changed\": ANY,\n \"last_updated\": additions[\"lu\"],\n \"state\": \"on\",\n }\n\n msg = await websocket_client.receive_json()\n assert msg[\"id\"] == 7\n assert msg[\"type\"] == \"event\"\n assert msg[\"event\"] == {\"r\": [\"light.permitted\"]}\n\n msg = await websocket_client.receive_json()\n assert msg[\"id\"] == 7\n assert msg[\"type\"] == \"event\"\n assert msg[\"event\"] == {\n \"a\": {\n \"light.permitted\": {\n \"a\": {\"color\": \"blue\", \"effect\": \"help\"},\n \"c\": ANY,\n \"lc\": ANY,\n \"s\": \"on\",\n }\n }\n }\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1461, "n_words": 376, "vocab_size": 100, "complexity": 1, "nloc": 147, "token_counts": 878, "n_ast_nodes": 1636, "n_identifiers": 34, "random_cut": "async def test_subscribe_unsubscribe_entities(hass, websocket_client, hass_admin_user):\n \n\n hass.states.async_set(\"light.permitted\", \"off\", {\"color\": \"red\"})\n original_state = hass.states.get(\"light.permitted\")\n assert isinstance(original_state, State)\n state_dict = {\n \"attributes\": dict(original_state.attributes),\n \"context\": dict(original_state.context.as_dict()),\n \"entity_id\": original_state.entity_id,\n \"la" }, { "id": 87296, "commit_id": "c8bfd65f261769da2565ca4240f11da6e820a7e4", "repo": "sentry", "path": "tests/sentry/relay/test_config.py", "file_name": "test_config.py", "fun_name": "test_get_experimental_config", "commit_message": "feat(dyn-sampling): Switch to new feature flag multiplexer in projectconfig (#40498)\n\nThis PR switch to new feature flag multiplexer\r\nin projectconfig.", "code": "def test_get_experimental_config(mock_sentry_sdk, _, default_project):\n keys = ProjectKey.objects.filter(project=default_project)\n with Feature(\n {\"organizations:dynamic-sampling\": True, \"organizations:server-side-sampling\": True}\n ):\n # Does not raise:\n cfg = get_project_config(default_project, full_config=True, project_keys=keys)\n # Key is missing from config:\n assert \"dynamicSampling\" not in cfg.to_dict()[\"config\"]\n assert mock_sentry_sdk.capture_exception.call_args == mock.call(SOME_EXCEPTION)\n\n\n@pytest.mark.django_db\n@pytest.mark.parametrize(\"has_custom_filters\", [False, True])", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.mark.django_db\n@pytest.mark.parametrize(\"has_custom_filters\", [False, True])", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 78, "n_words": 42, "vocab_size": 38, "complexity": 1, "nloc": 8, "token_counts": 74, "n_ast_nodes": 156, "n_identifiers": 24, "random_cut": "def test_get_experimental_config(mock_sentry_sdk, _, default_project):\n keys = ProjectKey.objects.filter(project=default_project)\n with Feature(\n {\"organizations:dynamic-sampling\": True, \"organizations:server-side-sampling\": True}\n ):\n # Does not raise:\n " }, { "id": 251174, "commit_id": "fdde9ba3b3caaa2654048cec0af07bfcc3a6a3f8", "repo": "mitmproxy", "path": "examples/addons/options-configure.py", "file_name": "options-configure.py", "fun_name": "load", "commit_message": "use Python 3.9+ typing", "code": "def load(self, loader):\n loader.add_option(\n name = \"addheader\",\n typespec = Optional[int],\n default = None,\n help = \"Add a header to responses\",\n )\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 78, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 31, "n_ast_nodes": 48, "n_identifiers": 10, "random_cut": "def load(self, loader):\n loader.add_option(\n name = \"addhe" }, { "id": 155175, "commit_id": "193505fdf0c984743397ba3df56262f30aee13a8", "repo": "modin", "path": "modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py", "file_name": "partition.py", "fun_name": "_get_index_and_columns_size", "commit_message": "FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)\n\nSigned-off-by: Igoshev, Iaroslav ", "code": "def _get_index_and_columns_size(df):\n \n return len(df.index), len(df.columns)\n\n\n@unidist.remote(num_returns=4)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@unidist.remote(num_returns=4)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 50, "n_identifiers": 8, "random_cut": "def _get_index_and_columns_size(df):\n \n return len(df.index), len(df.columns)\n\n\n@unidist.remote(num_returns=4)" }, { "id": 184172, "commit_id": "fa4b971bffb9f488d155981794b7f0bf3b657c72", "repo": "textual", "path": "sandbox/will/basic.py", "file_name": "basic.py", "fun_name": "compose", "commit_message": "more docs", "code": "def compose(self) -> ComposeResult:\n table = DataTable()\n self.scroll_to_target = Tweet(TweetBody())\n\n yield Static(\n Text.from_markup(\n \"[b]This is a [u]Textual[/u] app, running in the terminal\"\n ),\n id=\"header\",\n )\n yield from (\n Tweet(TweetBody()),\n Widget(\n Static(\n Syntax(CODE, \"python\", line_numbers=True, indent_guides=True),\n classes=\"code\",\n ),\n classes=\"scrollable\",\n ),\n table,\n Error(),\n Tweet(TweetBody(), classes=\"scrollbar-size-custom\"),\n Warning(),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Success(),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n )\n yield Widget(id=\"footer\")\n yield Widget(\n Widget(classes=\"title\"),\n Widget(classes=\"user\"),\n OptionItem(),\n OptionItem(),\n OptionItem(),\n Widget(classes=\"content\"),\n id=\"sidebar\",\n )\n\n table.add_column(\"Foo\", width=20)\n table.add_column(\"Bar\", width=20)\n table.add_column(\"Baz\", width=20)\n table.add_column(\"Foo\", width=20)\n table.add_column(\"Bar\", width=20)\n table.add_column(\"Baz\", width=20)\n table.zebra_stripes = True\n for n in range(100):\n table.add_row(*[f\"Cell ([b]{n}[/b], {col})\" for col in range(6)])\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 587, "n_words": 96, "vocab_size": 61, "complexity": 3, "nloc": 49, "token_counts": 308, "n_ast_nodes": 516, "n_identifiers": 29, "random_cut": "def compose(self) -> ComposeResult:\n table = DataTable()\n self.scroll_to_target = Tweet(TweetBody())\n\n yield Static(\n Text.from_markup(\n \"[b]This is a [u]Textual[/u] app, running in the terminal\"\n ),\n id=\"header\",\n )\n yield from (\n Tweet(TweetBody()),\n Widget(\n Static(\n Syntax(CODE, \"python\", line_numbers=True, indent_guides=True),\n classes=\"code\",\n ),\n classes=\"scrollable\",\n ),\n table,\n Error(),\n Tweet(TweetBody(), classes=\"scrollbar-size-custom\"),\n Warning(),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Success(),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n Tweet(TweetBody(), classes=\"scroll-horizontal\"),\n )\n yield Widget(id=\"footer\")\n yield Widget(\n Widget(classes=\"title\"),\n Widget(classes=\"user\"),\n OptionItem(),\n OptionItem(),\n OptionItem(),\n Widget(classes=\"content\"),\n id=\"sidebar\",\n )\n\n table.add_column(\"Foo\", width=20)\n table.add_column(\"Bar\", width=20)\n table.add" }, { "id": 291940, "commit_id": "af4e37339a39badd5596e8bc9ba86d6c1994aa1b", "repo": "core", "path": "homeassistant/components/sia/sia_entity_base.py", "file_name": "sia_entity_base.py", "fun_name": "async_added_to_hass", "commit_message": "Add Connectivity sensor to SIA (#64305)\n\n* implemented connectivity sensor\r\n\r\n* further cleanup off update code\r\n\r\n* cleanup and tighter behaviour for attributes\r\n\r\n* added seperate connectivity class to binary sensor\r\n\r\n* callbacks and keys\r\n\r\n* redid name and unique_id logic, non-breaking result\r\n\r\n* using entry more in inits\r\n\r\n* Fix import\r\n\r\n* fix ping_interval in sia_entity_base\r\n\r\n* added ping_interval default to next\r\n\r\n* fixed next\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "async def async_added_to_hass(self) -> None:\n \n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n SIA_EVENT.format(self.port, self.account),\n self.async_handle_event,\n )\n )\n self.handle_last_state(await self.async_get_last_state())\n if self._attr_available:\n self.async_create_post_interval_update_cb()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 131, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 19, "token_counts": 58, "n_ast_nodes": 95, "n_identifiers": 14, "random_cut": "async def async_added_to_hass(self) -> None:\n \n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n " }, { "id": 278345, "commit_id": "b0ffc0031e9c1964e7398ca47c6666bbfc0d5086", "repo": "keras", "path": "keras/saving/saved_model/saved_model_test.py", "file_name": "saved_model_test.py", "fun_name": "test_custom_metric_model", "commit_message": "resolve line-too-long in saving", "code": "def test_custom_metric_model(self):\n # TODO(b/134519980): Issue with `model.fit` if the model call function\n # uses a `tf.function` in graph mode.\n if not tf.executing_eagerly():\n return\n\n x = np.random.random((1, 3))\n y = np.random.random((1, 4))\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 76, "n_words": 31, "vocab_size": 27, "complexity": 3, "nloc": 23, "token_counts": 180, "n_ast_nodes": 69, "n_identifiers": 8, "random_cut": "def test_custom_metric_model(self):\n # TODO(b/134519980): Issue with `model.fit` if the model call function\n # uses a `tf.function` in graph mode.\n " }, { "id": 275973, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/save_weights_test.py", "file_name": "save_weights_test.py", "fun_name": "test_sequential_weight_loading", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_sequential_weight_loading(self):\n if h5py is None:\n return\n\n h5_path = self._save_model_dir(\"test.h5\")\n\n num_hidden = 5\n input_dim = 3\n batch_size = 5\n num_classes = 2\n\n with self.cached_session():\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))\n model.add(keras.layers.Dense(num_classes))\n\n x = np.random.random((batch_size, input_dim))\n ref_y = model.predict(x)\n\n model.save_weights(h5_path)\n\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))\n model.add(keras.layers.Dense(num_classes))\n model.load_weights(h5_path)\n y = model.predict(x)\n\n self.assertAllClose(y, ref_y)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 241, "n_words": 50, "vocab_size": 34, "complexity": 2, "nloc": 21, "token_counts": 166, "n_ast_nodes": 271, "n_identifiers": 26, "random_cut": "def test_sequential_weight_loading(self):\n if h5py is None:\n " }, { "id": 196818, "commit_id": "f757f3daae6e11ea0cfb7dadc133274d8d74315f", "repo": "sympy", "path": "sympy/series/gruntz.py", "file_name": "gruntz.py", "fun_name": "sign", "commit_message": "Reordered imports 2", "code": "def sign(e, x):\n \n if not isinstance(e, Basic):\n raise TypeError(\"e should be an instance of Basic\")\n\n if e.is_positive:\n return 1\n elif e.is_negative:\n return -1\n elif e.is_zero:\n return 0\n\n elif not e.has(x):\n from sympy.simplify import logcombine\n e = logcombine(e)\n return _sign(e)\n elif e == x:\n return 1\n elif e.is_Mul:\n a, b = e.as_two_terms()\n sa = sign(a, x)\n if not sa:\n return 0\n return sa * sign(b, x)\n elif isinstance(e, exp):\n return 1\n elif e.is_Pow:\n if e.base == S.Exp1:\n return 1\n s = sign(e.base, x)\n if s == 1:\n return 1\n if e.exp.is_Integer:\n return s**e.exp\n elif isinstance(e, log):\n return sign(e.args[0] - 1, x)\n\n # if all else fails, do it the hard way\n c0, e0 = mrv_leadterm(e, x)\n return sign(c0, x)\n\n\n@debug\n@timeit\n@cacheit", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "@debug\n@timeit\n@cacheit", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 330, "n_words": 121, "vocab_size": 73, "complexity": 15, "nloc": 35, "token_counts": 209, "n_ast_nodes": 339, "n_identifiers": 34, "random_cut": "def sign(e, x):\n \n if not isinstance(e, Basic):\n raise TypeError(\"e should be an instance of Basic\")\n\n " }, { "id": 90464, "commit_id": "d9e850f2723a9d4919b0038d2f6cb59321eef295", "repo": "sentry", "path": "tests/sentry/tasks/test_relay.py", "file_name": "test_relay.py", "fun_name": "redis_cache", "commit_message": "feat(proj-config): Add v3 of proj config endpoint (#34746)\n\nThis new version of the endpoint doesn't perform any computation on the\r\nproject configs. Instead, it performs computation async, as follows:\r\n\r\n- If a requested project config exists in the cache, the endpoint\r\n returns it.\r\n- If a requested project config doesn't exist in the cache, the endpoint\r\n schedules a new task to compute that config, and returns a pending\r\n response.\r\n- If a requested project config doesn't exist in the cache, but a task\r\n is already scheduled, the endpoint returns a pending request.\r\n\r\nTasks are debounced based on the public key of the project. Pending\r\nprojects are returned as a list part of the config in the response, for\r\nexample:\r\n\r\n```\r\n{\r\n proj1_key: { proj1_config },\r\n proj2_key: { proj2_config },\r\n pending: [proj3_key, proj4_key]\r\n}\r\n```\r\n\r\nThe `pending` entry only exists if there is at least one pending\r\nproject.\r\n\r\n**Redis cache changes**\r\nRedis is now a requirement for the project configs cache, since the endpoint can't operate without redis anymore. On the other hand, the debouncing cache hasn't been updated because it's not needed for local development (requests will never be debounced and always processed).", "code": "def redis_cache(monkeypatch):\n monkeypatch.setattr(\n \"django.conf.settings.SENTRY_RELAY_PROJECTCONFIG_CACHE\",\n \"sentry.relay.projectconfig_cache.redis.RedisProjectConfigCache\",\n )\n\n cache = RedisProjectConfigCache()\n monkeypatch.setattr(\"sentry.relay.projectconfig_cache.set_many\", cache.set_many)\n monkeypatch.setattr(\"sentry.relay.projectconfig_cache.delete_many\", cache.delete_many)\n monkeypatch.setattr(\"sentry.relay.projectconfig_cache.get\", cache.get)\n\n monkeypatch.setattr(\n \"django.conf.settings.SENTRY_RELAY_PROJECTCONFIG_DEBOUNCE_CACHE\",\n \"sentry.relay.projectconfig_debounce_cache.redis.RedisProjectConfigDebounceCache\",\n )\n\n return cache\n\n\n@pytest.fixture", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 75, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 14, "token_counts": 60, "n_ast_nodes": 113, "n_identifiers": 10, "random_cut": "def redis_cache(monkeypatch):\n monkeypatch.set" }, { "id": 190387, "commit_id": "4fc3616712edb19179b17dd270ad6cf63abf99c2", "repo": "DeOldify", "path": "fastai/metrics.py", "file_name": "metrics.py", "fun_name": "_precision", "commit_message": "Upgrading to support latest Pytorch version", "code": "def _precision(self):\n prec = torch.diag(self.cm) / self.cm.sum(dim=0)\n if self.average is None: return prec\n else:\n weights = self._weights(avg=self.average)\n return (prec * weights).sum()\n", "url": "https://github.com/jantic/DeOldify.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 63, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 59, "n_ast_nodes": 95, "n_identifiers": 12, "random_cut": "def _precision(self):\n prec = torch.diag(self.cm) / self.cm.sum(dim=0)\n if self.average is None: return prec\n else:\n weights = self._w" }, { "id": 141120, "commit_id": "00e3fd75f33d762387ab6fa379743cd21c409ea6", "repo": "ray", "path": "python/ray/experimental/state/state_manager.py", "file_name": "state_manager.py", "fun_name": "get_all_registered_agent_ids", "commit_message": "[State Observability] Ray log alpha API (#24964)\n\nThis is the PR to implement ray log to the server side. The PR is continued from #24068.\r\n\r\nThe PR supports two endpoints;\r\n\r\n/api/v0/logs # list logs of the node id filtered by the given glob. \r\n/api/v0/logs/{[file | stream]}?filename&pid&actor_id&task_id&interval&lines # Stream the requested file log. The filename can be inferred by pid/actor_id/task_id\r\nSome tests need to be re-written, I will do it soon.\r\n\r\nAs a follow-up after this PR, there will be 2 PRs.\r\n\r\nPR to add actual CLI\r\nPR to remove in-memory cached logs and do on-demand query for actor/worker logs", "code": "def get_all_registered_agent_ids(self) -> List[str]:\n assert len(self._log_agent_stub) == len(self._runtime_env_agent_stub)\n return self._runtime_env_agent_stub.keys()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "def get_all_registered_agent_ids(self) -> List[str]:\n assert len(self._log_agent_stub) == len(self._runtime_env_agent_stub)\n return self._runtim" }, { "id": 176191, "commit_id": "5dfd57af2a141a013ae3753e160180b82bec9469", "repo": "networkx", "path": "networkx/linalg/laplacianmatrix.py", "file_name": "laplacianmatrix.py", "fun_name": "normalized_laplacian_matrix", "commit_message": "Use scipy.sparse array datastructure (#5139)\n\n* Step 1: use sparse arrays in nx.to_scipy_sparse_matrix.\r\n\r\nSeems like a reasonable place to start.\r\nnx.to_scipy_sparse_matrix is one of the primary interfaces to\r\nscipy.sparse from within NetworkX.\r\n\r\n* 1: Use np.outer instead of mult col/row vectors\r\n\r\nFix two instances in modularitymatrix where a new 2D array was being\r\ncreated via an outer product of two \\\"vectors\\\".\r\n\r\nIn the matrix case, this was a row vector \\* a column vector. In the\r\narray case this can be disambiguated by being explicit with np.outer.\r\n\r\n* Update _transition_matrix in laplacianmatrix module\r\n\r\n - A few instances of matrix multiplication operator\r\n - Add np.newaxis + transpose to get shape right for broadcasting\r\n - Explicitly convert e.g. sp.sparse.spdiags to a csr_array.\r\n\r\n* Update directed_combinitorial_laplacian w/ sparse array.\r\n\r\n - Wrap spdiags in csr_array and update matmul operators.\r\n\r\n* Rm matrix-specific code from lgc and hmn modules\r\n\r\n - Replace .A call with appropriate array semantics\r\n - wrap sparse.diags in csr_array.\r\n\r\n* Change hits to use sparse array semantics.\r\n\r\n - Replace * with @\r\n - Remove superfluous calls to flatten.\r\n\r\n* Update sparse matrix usage in layout module.\r\n - Simplify lil.getrowview call\r\n - Wrap spdiags in csr_array.\r\n\r\n* lil_matrix -> lil_array in graphmatrix.py.\r\n\r\n* WIP: Start working on algebraic connectivity module.\r\n\r\n* Incorporate auth mat varname feedback.\r\n\r\n* Revert 1D slice and comment for 1D sparse future.\r\n\r\n* Add TODOs: rm csr_array wrapper around spdiags etc.\r\n\r\n* WIP: cleanup algebraicconn: tracemin_fiedler.\r\n\r\n* Typo.\r\n\r\n* Finish reviewing algebraicconnectivity.\r\n\r\n* Convert bethe_hessian matrix to use sparse arrays.\r\n\r\n* WIP: update laplacian.\r\n\r\nUpdate undirected laplacian functions.\r\n\r\n* WIP: laplacian - add comment about _transition_matrix return types.\r\n\r\n* Finish laplacianmatrix review.\r\n\r\n* Update attrmatrix.\r\n\r\n* Switch to official laplacian function.\r\n\r\n* Update pagerank to use sparse array.\r\n\r\n* Switch bipartite matrix to sparse arrays.\r\n\r\n* Check from_scipy_sparse_matrix works with arrays.\r\n\r\nModifies test suite.\r\n\r\n* Apply changes from review.\r\n\r\n* Fix failing docstring tests.\r\n\r\n* Fix missing axis for in-place multiplication.\r\n\r\n* Use scipy==1.8rc2\r\n\r\n* Use matrix multiplication\r\n\r\n* Fix PyPy CI\r\n\r\n* [MRG] Create plot_subgraphs.py example (#5165)\r\n\r\n* Create plot_subgraphs.py\r\n\r\nhttps://github.com/networkx/networkx/issues/4220\r\n\r\n* Update plot_subgraphs.py\r\n\r\nblack\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint plus font_size\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded more plots\r\n\r\n* Update plot_subgraphs.py\r\n\r\nremoved plots from the unit test and added comments\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint\r\n\r\n* Update plot_subgraphs.py\r\n\r\ntypos fixed\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded nodes to the plot of the edges removed that was commented out for whatever reason\r\n\r\n* Update plot_subgraphs.py\r\n\r\nrevert the latest commit - the line was commented out for a reason - it's broken\r\n\r\n* Update plot_subgraphs.py\r\n\r\nfixed node color issue\r\n\r\n* Update plot_subgraphs.py\r\n\r\nformat fix\r\n\r\n* Update plot_subgraphs.py\r\n\r\nforgot to draw the nodes... now fixed\r\n\r\n* Fix sphinx warnings about heading length.\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult \r\n\r\n* Add traveling salesman problem to example gallery (#4874)\r\n\r\nAdds an example of the using Christofides to solve the TSP problem to the example galery.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037)\r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges()\r\n\r\n* Resolved Requested Changes\r\n\r\n* Revert changes to degree docstrings.\r\n\r\n* Update comments in example.\r\n\r\n* Apply wording to edges method in all graph classes.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Compatibility updates from testing with numpy/scipy/pytest rc's (#5226)\r\n\r\n* Rm deprecated scipy subpkg access.\r\n\r\n* Use recwarn fixture in place of deprecated pytest pattern.\r\n\r\n* Rm unnecessary try/except from tests.\r\n\r\n* Replace internal `close` fn with `math.isclose`. (#5224)\r\n\r\n* Replace internal close fn with math.isclose.\r\n\r\n* Fix lines in docstring examples.\r\n\r\n* Fix Python 3.10 deprecation warning w/ int div. (#5231)\r\n\r\n* Touchups and suggestions for subgraph gallery example (#5225)\r\n\r\n* Simplify construction of G with edges rm'd\r\n\r\n* Rm unused graph attribute.\r\n\r\n* Shorten categorization by node type.\r\n\r\n* Simplify node coloring.\r\n\r\n* Simplify isomorphism check.\r\n\r\n* Rm unit test.\r\n\r\n* Rm redundant plotting of each subgraph.\r\n\r\n* Use new package name (#5234)\r\n\r\n* Allowing None edges in weight function of bidirectional Dijkstra (#5232)\r\n\r\n* added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None.\r\n\r\n* changed syntax for better readability and code duplicate avoidance\r\n\r\nCo-authored-by: Hohmann, Nikolas \r\n\r\n* Add an FAQ about assigning issues. (#5182)\r\n\r\n* Add FAQ about assigning issues.\r\n\r\n* Add note about linking issues from new PRs.\r\n\r\n* Update dev deps (#5243)\r\n\r\n* Update minor doc issues with tex notation (#5244)\r\n\r\n* Add FutureWarnings to fns that return sparse matrices\r\n\r\n - biadjacency_matrix.\r\n - bethe_hessian_matrix.\r\n - incidence_matrix.\r\n - laplacian functions.\r\n - modularity_matrix functions.\r\n - adjacency_matrix.\r\n\r\n* Add to_scipy_sparse_array and use it everywhere.\r\n\r\nAdd a new conversion function to preserve array semantics internally\r\nwhile not altering behavior for users.\r\n\r\nAlso adds FutureWarning to to_scipy_sparse_matrix.\r\n\r\n* Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix.\r\n\r\n* Handle deprecations in separate PR.\r\n\r\n* Fix docstring examples.\r\n\r\nCo-authored-by: Mridul Seth \r\n\r\nCo-authored-by: Jarrod Millman \r\nCo-authored-by: Andrew Knyazev \r\nCo-authored-by: Dan Schult \r\nCo-authored-by: eskountis <56514439+eskountis@users.noreply.github.com>\r\nCo-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com>\r\nCo-authored-by: NikHoh \r\nCo-authored-by: Hohmann, Nikolas \r\nCo-authored-by: Sultan Orazbayev \r\nCo-authored-by: Mridul Seth ", "code": "def normalized_laplacian_matrix(G, nodelist=None, weight=\"weight\"):\n r\n import numpy as np\n import scipy as sp\n import scipy.sparse # call as sp.sparse\n\n if nodelist is None:\n nodelist = list(G)\n A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=\"csr\")\n n, m = A.shape\n diags = A.sum(axis=1)\n # TODO: rm csr_array wrapper when spdiags can produce arrays\n D = sp.sparse.csr_array(sp.sparse.spdiags(diags, 0, m, n, format=\"csr\"))\n L = D - A\n with sp.errstate(divide=\"ignore\"):\n diags_sqrt = 1.0 / np.sqrt(diags)\n diags_sqrt[np.isinf(diags_sqrt)] = 0\n # TODO: rm csr_array wrapper when spdiags can produce arrays\n DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format=\"csr\"))\n import warnings\n\n warnings.warn(\n \"normalized_laplacian_matrix will return a scipy.sparse array instead of a matrix in Networkx 3.0.\",\n FutureWarning,\n stacklevel=2,\n )\n # TODO: rm csr_matrix wrapper for NX 3.0\n return sp.sparse.csr_matrix(DH @ (L @ DH))\n\n\n###############################################################################\n# Code based on\n# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/\n\n\n@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 220, "n_words": 130, "vocab_size": 88, "complexity": 2, "nloc": 72, "token_counts": 197, "n_ast_nodes": 336, "n_identifiers": 36, "random_cut": "def normalized_laplacian_matrix(G, nodelist=None, weight=\"weight\"):\n r\n import numpy as np\n import scipy as sp\n import scipy.sparse # call as sp.sparse\n\n if nodelist is None:\n nodelist = list(G)\n A = " }, { "id": 268947, "commit_id": "373ad97c72ed1ac4b6898e85b2cfd7b016e4b469", "repo": "keras", "path": "keras/preprocessing/image.py", "file_name": "image.py", "fun_name": "random_channel_shift", "commit_message": "Copy image utils from keras_preprocessing directly into core keras\n\nThis is not new code, we are just moving these utilities directly\ninto keras from keras-preprocessing.\n\nFor the library code, just fixed linting errors.\nFor the test code, had to do more major changes to port from pytest, but\nhopefully any errors have been caught by the tests themselves.\n\nPiperOrigin-RevId: 427274651", "code": "def random_channel_shift(x, intensity_range, channel_axis=0):\n \n intensity = np.random.uniform(-intensity_range, intensity_range)\n return apply_channel_shift(x, intensity, channel_axis=channel_axis)\n\n\n@keras_export('keras.preprocessing.image.apply_brightness_shift')", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export('keras.preprocessing.image.apply_brightness_shift')", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 15, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 66, "n_identifiers": 10, "random_cut": "def random_channel_shift(x, intensity_range, channel_axis=0):\n \n intensi" }, { "id": 69326, "commit_id": "518ab93e039d68827506c3ac92db3c09aea644e3", "repo": "erpnext", "path": "erpnext/patches/v14_0/migrate_remarks_from_gl_to_payment_ledger.py", "file_name": "migrate_remarks_from_gl_to_payment_ledger.py", "fun_name": "execute", "commit_message": "refactor: remove duplicate entries on remarks migration patch", "code": "def execute():\n\tif frappe.reload_doc(\"accounts\", \"doctype\", \"payment_ledger_entry\"):\n\n\t\tgle = qb.DocType(\"GL Entry\")\n\t\tple = qb.DocType(\"Payment Ledger Entry\")\n\n\t\t# get ple and their remarks from GL Entry\n\t\tpl_entries = (\n\t\t\tqb.from_(ple)\n\t\t\t.left_join(gle)\n\t\t\t.on(\n\t\t\t\t(ple.account == gle.account)\n\t\t\t\t& (ple.party_type == gle.party_type)\n\t\t\t\t& (ple.party == gle.party)\n\t\t\t\t& (ple.voucher_type == gle.voucher_type)\n\t\t\t\t& (ple.voucher_no == gle.voucher_no)\n\t\t\t\t& (ple.company == gle.company)\n\t\t\t)\n\t\t\t.select(\n\t\t\t\tple.company,\n\t\t\t\tple.account,\n\t\t\t\tple.party_type,\n\t\t\t\tple.party,\n\t\t\t\tple.voucher_type,\n\t\t\t\tple.voucher_no,\n\t\t\t\tgle.remarks.as_(\"gle_remarks\"),\n\t\t\t)\n\t\t\t.where((ple.delinked == 0) & (gle.is_cancelled == 0))\n\t\t\t.run(as_dict=True)\n\t\t)\n\n\t\tpl_entries = remove_duplicate_entries(pl_entries)\n\n\t\tif pl_entries:\n\t\t\t# split into multiple batches, update and commit for each batch\n\t\t\tbatch_size = 1000\n\t\t\tfor batch in create_batch(pl_entries, batch_size):\n\t\t\t\tfor entry in batch:\n\t\t\t\t\tquery = (\n\t\t\t\t\t\tqb.update(ple)\n\t\t\t\t\t\t.set(ple.remarks, entry.gle_remarks)\n\t\t\t\t\t\t.where(\n\t\t\t\t\t\t\t(ple.company == entry.company)\n\t\t\t\t\t\t\t& (ple.account == entry.account)\n\t\t\t\t\t\t\t& (ple.party_type == entry.party_type)\n\t\t\t\t\t\t\t& (ple.party == entry.party)\n\t\t\t\t\t\t\t& (ple.voucher_type == entry.voucher_type)\n\t\t\t\t\t\t\t& (ple.voucher_no == entry.voucher_no)\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\t\t\t\t\tquery.run()\n\n\t\t\t\tfrappe.db.commit()\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 25, "n_whitespaces": 86, "n_words": 134, "vocab_size": 85, "complexity": 5, "nloc": 46, "token_counts": 296, "n_ast_nodes": 463, "n_identifiers": 36, "random_cut": "def execute():\n\tif frappe.reload_doc(\"accounts\", \"doctype\", \"payment_ledger_entry\"):\n\n\t\tgle = qb.DocType(\"GL Entry\")\n\t\tple = qb.DocType(\"Payment Ledger Entry\")\n\n\t\t# get ple and their remarks from GL Entry\n\t\tpl_entries = (\n\t\t\tqb.from_(ple)\n\t\t\t.left_join(gle)\n\t\t\t.on(\n\t\t\t\t(ple.account == gle.account)\n\t\t\t\t& (ple.party_type == gle.party_type)\n\t\t\t\t& (ple.party == gle.party)\n\t\t\t\t& (ple.voucher_type == gle.voucher_type)\n\t\t\t\t& (ple.voucher_no == gle.voucher_no)\n\t\t\t\t& (ple.company == gle.company)\n\t\t\t)\n\t\t\t.select(\n\t\t\t\tple.company,\n\t\t\t\tple.account,\n\t\t\t\tple.party_type,\n\t\t\t\tple.party,\n\t\t\t\tple.voucher_type,\n\t\t\t\tple.voucher_no,\n\t\t\t\tgle.remarks.as_(\"gle_remarks\"),\n\t\t\t)\n\t\t\t.where((ple.delinked == 0) & (gle.is_cancelled == 0))\n\t\t\t.run(as_dict=True)\n\t\t)\n\n\t\tpl_entries = remove_duplicate_entries(pl_entries)\n\n\t\tif pl_entries:\n\t\t\t# split into multiple batches, update and commit for each batch\n\t\t\tbatch_size = 1000\n\t\t\tfor batch in create_batch(pl_entries, batch_size):\n\t\t\t\tfor entry in batch:\n\t\t\t\t\tquery = (\n\t\t\t\t\t\tqb.update(ple)\n\t\t\t\t\t\t.set(ple.remarks, entry.gle_remarks)\n\t\t\t\t\t\t.where(\n\t\t\t\t\t\t\t(ple.company == entry.company)\n\t\t\t\t\t\t\t& (ple.account == entry.account)\n\t\t\t\t\t\t\t& (ple.party_type == entry.party_type)\n\t\t\t\t\t\t\t& (ple.party == entry.party)\n\t\t\t\t\t\t\t& (ple.voucher_type == entry.vouche" }, { "id": 281666, "commit_id": "9e671aeba98dacc69ecbbfec1f087aca3b139ee7", "repo": "OpenBBTerminal", "path": "terminal.py", "file_name": "terminal.py", "fun_name": "call_funds", "commit_message": "Remember Contexts (#1187)\n\n* Refacotred classes\r\n\r\n* Handling for new instance desired\r\n\r\n* Added feature flag\r\n\r\n* Converted all menu calls", "code": "def call_funds(self, _):\n \n from gamestonk_terminal.mutual_funds.mutual_fund_controller import (\n FundController,\n )\n\n self.queue = self.load_class(FundController, self.queue)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 52, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 33, "n_ast_nodes": 50, "n_identifiers": 9, "random_cut": "def call_funds(self, _):\n \n from ga" }, { "id": 313861, "commit_id": "05d7d31dfd4d4eb4a8b10a84319d7f2778e65298", "repo": "core", "path": "homeassistant/components/elgato/light.py", "file_name": "light.py", "fun_name": "async_identify", "commit_message": "Improve Elgato error handling (#73444)", "code": "async def async_identify(self) -> None:\n \n try:\n await self.client.identify()\n except ElgatoError as error:\n raise HomeAssistantError(\n \"An error occurred while identifying the Elgato Light\"\n ) from error\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 94, "n_words": 25, "vocab_size": 24, "complexity": 2, "nloc": 8, "token_counts": 30, "n_ast_nodes": 56, "n_identifiers": 7, "random_cut": "async def async_identify(self) -> None:\n \n try:\n await self.client.ident" }, { "id": 123863, "commit_id": "e40877d4306787acb15985888e1f33ad4bdd9912", "repo": "mypy", "path": "mypy/test/teststubtest.py", "file_name": "teststubtest.py", "fun_name": "test_bad_literal", "commit_message": "stubtest: fix literal type construction (#11931)\n\nCo-authored-by: hauntsaninja <>", "code": "def test_bad_literal(self) -> Iterator[Case]:\n yield Case(\"from typing_extensions import Literal\", \"\", None) # dummy case\n yield Case(\n stub=\"INT_FLOAT_MISMATCH: Literal[1]\",\n runtime=\"INT_FLOAT_MISMATCH = 1.0\",\n error=\"INT_FLOAT_MISMATCH\",\n )\n yield Case(\n stub=\"WRONG_INT: Literal[1]\",\n runtime=\"WRONG_INT = 2\",\n error=\"WRONG_INT\",\n )\n yield Case(\n stub=\"WRONG_STR: Literal['a']\",\n runtime=\"WRONG_STR = 'b'\",\n error=\"WRONG_STR\",\n )\n yield Case(\n stub=\"BYTES_STR_MISMATCH: Literal[b'value']\",\n runtime=\"BYTES_STR_MISMATCH = 'value'\",\n error=\"BYTES_STR_MISMATCH\",\n )\n yield Case(\n stub=\"STR_BYTES_MISMATCH: Literal['value']\",\n runtime=\"STR_BYTES_MISMATCH = b'value'\",\n error=\"STR_BYTES_MISMATCH\",\n )\n yield Case(\n stub=\"WRONG_BYTES: Literal[b'abc']\",\n runtime=\"WRONG_BYTES = b'xyz'\",\n error=\"WRONG_BYTES\",\n )\n yield Case(\n stub=\"WRONG_BOOL_1: Literal[True]\",\n runtime=\"WRONG_BOOL_1 = False\",\n error='WRONG_BOOL_1',\n )\n yield Case(\n stub=\"WRONG_BOOL_2: Literal[False]\",\n runtime=\"WRONG_BOOL_2 = True\",\n error='WRONG_BOOL_2',\n )\n\n", "url": "https://github.com/python/mypy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 469, "n_words": 86, "vocab_size": 56, "complexity": 1, "nloc": 42, "token_counts": 147, "n_ast_nodes": 266, "n_identifiers": 7, "random_cut": "def test_bad_literal(self) -> Iterator[Case]:\n yield Case(\"from typing_extensions import Literal\", \"\", None) # dummy case\n yield Case(\n stub=\"INT_FLOAT_MISMATCH: Literal[1]\",\n runtime=\"INT_FLOAT_MISMATCH = 1.0\",\n error=\"INT_FLOAT_MISMATCH\",\n )\n yield Case(\n stub=\"WRONG_INT: Literal[1]\",\n runtime=\"WRONG_INT = 2\",\n error=\"WRONG_INT\",\n )\n yield Case(\n stub=\"WRONG_STR: Literal['a']\",\n runtime=\"WRONG_STR = 'b'\",\n error=\"WRONG_STR\",\n )\n yield Case(\n stub=\"BYTES_STR_MISMATCH: Literal[b'value']\",\n runtime=\"BYTES_STR_MISMATCH = 'value'\",\n error=\"BYTES_STR_MISMA" }, { "id": 69657, "commit_id": "b741ae143c514cf832a435db1902986a915efde4", "repo": "erpnext", "path": "erpnext/accounts/doctype/pricing_rule/utils.py", "file_name": "utils.py", "fun_name": "apply_pricing_rule_for_free_items", "commit_message": "fix: Reapply pricing rule on qty change", "code": "def apply_pricing_rule_for_free_items(doc, pricing_rule_args):\n\tif pricing_rule_args:\n\t\titems = tuple((d.item_code, d.pricing_rules) for d in doc.items if d.is_free_item)\n\n\t\tfor args in pricing_rule_args:\n\t\t\tif not items or (args.get(\"item_code\"), args.get(\"pricing_rules\")) not in items:\n\t\t\t\tdoc.append(\"items\", args)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 24, "n_words": 30, "vocab_size": 22, "complexity": 7, "nloc": 6, "token_counts": 70, "n_ast_nodes": 111, "n_identifiers": 12, "random_cut": "def apply_pricing_rule_for_free_items(doc, pricing_rule_args):\n\tif pricing_rule_args:\n\t\titems = tuple((d.item_code, d.pricing_rules) for d in doc.items if d.is_free_item)\n\n\t\tfor args in pricing_rule_args:\n\t\t\tif not items or (args.get(\"item_code\"), args.get(\"pricing_rules\")) not in items:\n\t\t\t\tdo" }, { "id": 22998, "commit_id": "11f6ff38dcc61348aa4aae8ad2fbbe42b0eab34d", "repo": "PaddleOCR", "path": "test_tipc/supplementary/config.py", "file_name": "config.py", "fun_name": "parse_args", "commit_message": "add supplementary", "code": "def parse_args(self, argv=None):\n args = super(ArgsParser, self).parse_args(argv)\n assert args.config is not None, \\\n \"Please specify --config=configure_file_path.\"\n args.opt = self._parse_opt(args.opt)\n return args\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 6, "token_counts": 46, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "def parse_args(self, argv=None):\n args = super(ArgsParser, self).parse_args(argv)\n assert args.config is not None, \\\n \"Please specify --config=configure_file_path.\"\n args.opt = self._parse_opt(args.opt)\n return args\n" }, { "id": 266760, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/sanity/import.py", "file_name": "import.py", "fun_name": "test", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, PythonConfig) -> TestResult\n settings = self.load_processor(args, python.version)\n\n paths = [target.path for target in targets.include]\n\n if python.version.startswith('2.') and (get_virtualenv_version(args, python.path) or (0,)) < (13,):\n # hack to make sure that virtualenv is available under Python 2.x\n # on Python 3.x we can use the built-in venv\n # version 13+ is required to use the `--no-wheel` option\n try:\n install_requirements(args, python, virtualenv=True, controller=False) # sanity (import)\n except PipUnavailableError as ex:\n display.warning(str(ex))\n\n temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')\n\n messages = []\n\n for import_type, test in (\n ('module', _get_module_test(True)),\n ('plugin', _get_module_test(False)),\n ):\n if import_type == 'plugin' and python.version in REMOTE_ONLY_PYTHON_VERSIONS:\n continue\n\n data = '\\n'.join([path for path in paths if test(path)])\n\n if not data and not args.prime_venvs:\n continue\n\n virtualenv_python = create_sanity_virtualenv(args, python, f'{self.name}.{import_type}', coverage=args.coverage, minimize=True)\n\n if not virtualenv_python:\n display.warning(f'Skipping sanity test \"{self.name}\" on Python {python.version} due to missing virtual environment support.')\n return SanitySkipped(self.name, python.version)\n\n virtualenv_yaml = check_sanity_virtualenv_yaml(virtualenv_python)\n\n if virtualenv_yaml is False:\n display.warning(f'Sanity test \"{self.name}\" ({import_type}) on Python {python.version} may be slow due to missing libyaml support in PyYAML.')\n\n env = ansible_environment(args, color=False)\n\n env.update(\n SANITY_TEMP_PATH=ResultType.TMP.path,\n SANITY_IMPORTER_TYPE=import_type,\n )\n\n if data_context().content.collection:\n external_python = create_sanity_virtualenv(args, args.controller_python, self.name)\n\n env.update(\n SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,\n SANITY_EXTERNAL_PYTHON=external_python.path,\n SANITY_YAML_TO_JSON=os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yaml_to_json.py'),\n ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION=CONTROLLER_MIN_PYTHON_VERSION,\n PYTHONPATH=':'.join((get_ansible_test_python_path(), env[\"PYTHONPATH\"])),\n )\n\n if args.prime_venvs:\n continue\n\n display.info(import_type + ': ' + data, verbosity=4)\n\n cmd = ['importer.py']\n\n # add the importer to the path so it can be accessed through the coverage injector\n env.update(\n PATH=os.pathsep.join([os.path.join(TARGET_SANITY_ROOT, 'import'), env['PATH']]),\n )\n\n try:\n stdout, stderr = cover_python(args, virtualenv_python, cmd, self.name, env, capture=True, data=data)\n\n if stdout or stderr:\n raise SubprocessError(cmd, stdout=stdout, stderr=stderr)\n except SubprocessError as ex:\n if ex.status != 10 or ex.stderr or not ex.stdout:\n raise\n\n pattern = r'^(?P[^:]*):(?P[0-9]+):(?P[0-9]+): (?P.*)$'\n\n parsed = parse_to_list_of_dict(pattern, ex.stdout)\n\n relative_temp_root = os.path.relpath(temp_root, data_context().content.root) + os.path.sep\n\n messages += [SanityMessage(\n message=r['message'],\n path=os.path.relpath(r['path'], relative_temp_root) if r['path'].startswith(relative_temp_root) else r['path'],\n line=int(r['line']),\n column=int(r['column']),\n ) for r in parsed]\n\n if args.prime_venvs:\n return SanitySkipped(self.name, python_version=python.version)\n\n results = settings.process_errors(messages, paths)\n\n if results:\n return SanityFailure(self.name, messages=results, python_version=python.version)\n\n return SanitySuccess(self.name, python_version=python.version)\n\n\n@cache", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "@cache", "n_ast_errors": 1, "ast_levels": 20, "n_whitespaces": 1243, "n_words": 311, "vocab_size": 209, "complexity": 27, "nloc": 69, "token_counts": 648, "n_ast_nodes": 1058, "n_identifiers": 92, "random_cut": "def test(self, args, targets, python): # type: (SanityConfig, SanityTargets, PythonConfig) -> TestResult\n settings = self.load_processor(args, python.version)\n\n paths = [target.path for target in targets.include]\n\n if python.version.startswith('2.') and (get_virtualenv_version(args, python.path) or (0,)) < (13,):\n # hack to make sure that virtualenv is available under Python 2.x\n # on Python 3.x we can use the built-in venv\n # version 13+ is required to use the `--no-wheel` option\n try:\n install_requirements(args, python, virtualenv=True, controller=False) # sanity (import)\n except PipUnavailableError as ex:\n display.warning(str(ex))\n\n temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')\n\n messages = []\n\n for import_type, test in (\n ('module', _get_module_test(True)),\n ('plugin', _get_module_test(False)),\n ):\n if import_type == 'plugin' and python.version in REMOTE_ONLY_PYTHON_VERSIONS:\n continue\n\n data = '\\n'.join([path for path in paths if test(path)])\n\n if not data and not args.prime_venvs:\n continue\n\n virtualenv_python = create_sanity_virtualenv(args, python, f'{self.name}.{import_type}', coverage=args.coverage, minimize=True)\n\n if not virtualenv_python:\n display.warning(f'Skipping sanity test \"{self.name}\" on Python {python.version} due to missing virtual environment support.')\n return SanitySkipped(self.name, python.version)\n\n virtualenv_yaml = check_sanity_virtualenv_yaml(virtualenv_python)\n\n if virtualenv_yaml is False:\n display.warning(f'Sanity test \"{self.name}\" ({import_type}) on Python {python.version} may be slow due to missing libyaml support in PyYAML.')\n\n env = ansible_environment(args, color=False)\n\n env.update(\n SANITY_TEMP_PATH=ResultType.TMP.path,\n SANITY_IMPORTER_TYPE=import_type,\n )\n\n if data_context().content.collection:\n external_python = create_sanity_virtualenv(args, args.controller_python, self.name)\n\n env.update(\n SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,\n SANITY_EXTERNAL_PYTHON=external_python.path,\n SANITY_YAML_TO_JSON=os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yaml_to_json.py'),\n ANSIBL" }, { "id": 181227, "commit_id": "27d60792ead5152672d613282c8012cebc39af19", "repo": "gradio", "path": "website/homepage/src/guides/__init__.py", "file_name": "__init__.py", "fun_name": "format_name", "commit_message": "Use underscore instead of parentheses (#2625)", "code": "def format_name(guide_name):\n index = None\n if re.match(\"^[0-9]+_\", guide_name):\n index = int(guide_name[: guide_name.index(\"_\")])\n guide_name = guide_name[guide_name.index(\"_\") + 1 :]\n if guide_name.lower().endswith(\".md\"):\n guide_name = guide_name[:-3]\n pretty_guide_name = \" \".join([word[0].upper() + word[1:] for word in guide_name.split(\"_\")])\n return index, guide_name, pretty_guide_name\n\n\nguide_folders = sorted(os.listdir(GUIDES_DIR))\nguide_folders.remove(\"CONTRIBUTING.md\")\nguide_folders.remove(\"assets\")\n\nguides = []\nguides_by_category = []\nabsolute_index = 0\nfor guide_folder in guide_folders:\n guide_list = sorted(os.listdir(os.path.join(GUIDES_DIR, guide_folder)))\n _, guide_category, pretty_guide_category = format_name(guide_folder)\n guides_by_category.append({\"category\": pretty_guide_category, \"guides\": []})\n for guide_file in guide_list:\n guide_index, guide_name, pretty_guide_name = format_name(guide_file)\n with open(os.path.join(GUIDES_DIR, guide_folder, guide_file), \"r\") as f:\n guide_content = f.read()\n\n title = guide_content.split(\"\\n\")[0]\n\n metadata_labels = []\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 172, "n_words": 93, "vocab_size": 66, "complexity": 4, "nloc": 9, "token_counts": 105, "n_ast_nodes": 414, "n_identifiers": 37, "random_cut": "def format_name(guide_name):\n index = None\n if re.match(\"^[0-9]+_\", guide_name):\n " }, { "id": 218586, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/btm_utils.py", "file_name": "btm_utils.py", "fun_name": "get_linear_subpattern", "commit_message": "add python 3.10.4 for windows", "code": "def get_linear_subpattern(self):\n \n\n for l in self.leaves():\n subp = l.leaf_to_root()\n if subp:\n return subp\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 64, "n_words": 13, "vocab_size": 12, "complexity": 3, "nloc": 5, "token_counts": 27, "n_ast_nodes": 47, "n_identifiers": 6, "random_cut": "def get_linear_subpattern(self):\n \n\n for l in self.leaves():\n subp = l.leaf_to_root(" }, { "id": 149749, "commit_id": "116b58e97cad2b86aff5e20d97f494b5ef9abd41", "repo": "freqtrade", "path": "tests/exchange/test_exchange.py", "file_name": "test_exchange.py", "fun_name": "test_date_minus_candles", "commit_message": "add \"date_minus_candles\" method", "code": "def test_date_minus_candles():\n\n date = datetime(2019, 8, 12, 13, 25, 0, tzinfo=timezone.utc)\n\n assert date_minus_candles(\"5m\", 3, date) == date - timedelta(minutes=15)\n assert date_minus_candles(\"5m\", 5, date) == date - timedelta(minutes=25)\n assert date_minus_candles(\"1m\", 6, date) == date - timedelta(minutes=6)\n assert date_minus_candles(\"1h\", 3, date) == date - timedelta(hours=3, minutes=25)\n assert date_minus_candles(\"1h\", 3) == timeframe_to_prev_date('1h') - timedelta(hours=3)\n\n\n@pytest.mark.parametrize(\n \"market_symbol,base,quote,exchange,spot,margin,futures,trademode,add_dict,expected_result\",\n [\n (\"BTC/USDT\", 'BTC', 'USDT', \"binance\", True, False, False, 'spot', {}, True),\n (\"USDT/BTC\", 'USDT', 'BTC', \"binance\", True, False, False, 'spot', {}, True),\n # No seperating /\n (\"BTCUSDT\", 'BTC', 'USDT', \"binance\", True, False, False, 'spot', {}, True),\n (\"BTCUSDT\", None, \"USDT\", \"binance\", True, False, False, 'spot', {}, False),\n (\"USDT/BTC\", \"BTC\", None, \"binance\", True, False, False, 'spot', {}, False),\n (\"BTCUSDT\", \"BTC\", None, \"binance\", True, False, False, 'spot', {}, False),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, False, False, 'spot', {}, True),\n # Futures mode, spot pair\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, False, False, 'futures', {}, False),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, False, False, 'margin', {}, False),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, True, True, 'margin', {}, True),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", False, True, False, 'margin', {}, True),\n # Futures mode, futures pair\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", False, False, True, 'futures', {}, True),\n # Futures market\n (\"BTC/UNK\", \"BTC\", 'UNK', \"binance\", False, False, True, 'spot', {}, False),\n (\"BTC/EUR\", 'BTC', 'EUR', \"kraken\", True, False, False, 'spot', {\"darkpool\": False}, True),\n (\"EUR/BTC\", 'EUR', 'BTC', \"kraken\", True, False, False, 'spot', {\"darkpool\": False}, True),\n # no darkpools\n (\"BTC/EUR\", 'BTC', 'EUR', \"kraken\", True, False, False, 'spot',\n {\"darkpool\": True}, False),\n # no darkpools\n (\"BTC/EUR.d\", 'BTC', 'EUR', \"kraken\", True, False, False, 'spot',\n {\"darkpool\": True}, False),\n (\"BTC/USD\", 'BTC', 'USD', \"ftx\", True, False, False, 'spot', {}, True),\n (\"USD/BTC\", 'USD', 'BTC', \"ftx\", True, False, False, 'spot', {}, True),\n # Can only trade spot markets\n (\"BTC/USD\", 'BTC', 'USD', \"ftx\", False, False, True, 'spot', {}, False),\n (\"BTC/USD\", 'BTC', 'USD', \"ftx\", False, False, True, 'futures', {}, True),\n # Can only trade spot markets\n (\"BTC-PERP\", 'BTC', 'USD', \"ftx\", False, False, True, 'spot', {}, False),\n (\"BTC-PERP\", 'BTC', 'USD', \"ftx\", False, False, True, 'margin', {}, False),\n (\"BTC-PERP\", 'BTC', 'USD', \"ftx\", False, False, True, 'futures', {}, True),\n\n (\"BTC/USDT:USDT\", 'BTC', 'USD', \"okx\", False, False, True, 'spot', {}, False),\n (\"BTC/USDT:USDT\", 'BTC', 'USD', \"okx\", False, False, True, 'margin', {}, False),\n (\"BTC/USDT:USDT\", 'BTC', 'USD', \"okx\", False, False, True, 'futures', {}, True),\n ])", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"market_symbol,base,quote,exchange,spot,margin,futures,trademode,add_dict,expected_result\",\n [\n (\"BTC/USDT\", 'BTC', 'USDT', \"binance\", True, False, False, 'spot', {}, True),\n (\"USDT/BTC\", 'USDT', 'BTC', \"binance\", True, False, False, 'spot', {}, True),\n # No seperating /\n (\"BTCUSDT\", 'BTC', 'USDT', \"binance\", True, False, False, 'spot', {}, True),\n (\"BTCUSDT\", None, \"USDT\", \"binance\", True, False, False, 'spot', {}, False),\n (\"USDT/BTC\", \"BTC\", None, \"binance\", True, False, False, 'spot', {}, False),\n (\"BTCUSDT\", \"BTC\", None, \"binance\", True, False, False, 'spot', {}, False),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, False, False, 'spot', {}, True),\n # Futures mode, spot pair\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, False, False, 'futures', {}, False),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, False, False, 'margin', {}, False),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, True, True, 'margin', {}, True),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", False, True, False, 'margin', {}, True),\n # Futures mode, futures pair\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", False, False, True, 'futures', {}, True),\n # Futures market\n (\"BTC/UNK\", \"BTC\", 'UNK', \"binance\", False, False, True, 'spot', {}, False),\n (\"BTC/EUR\", 'BTC', 'EUR', \"kraken\", True, False, False, 'spot', {\"darkpool\": False}, True),\n (\"EUR/BTC\", 'EUR', 'BTC', \"kraken\", True, False, False, 'spot', {\"darkpool\": False}, True),\n # no darkpools\n (\"BTC/EUR\", 'BTC', 'EUR', \"kraken\", True, False, False, 'spot',\n {\"darkpool\": True}, False),\n # no darkpools\n (\"BTC/EUR.d\", 'BTC', 'EUR', \"kraken\", True, False, False, 'spot',\n {\"darkpool\": True}, False),\n (\"BTC/USD\", 'BTC', 'USD', \"ftx\", True, False, False, 'spot', {}, True),\n (\"USD/BTC\", 'USD', 'BTC', \"ftx\", True, False, False, 'spot', {}, True),\n # Can only trade spot markets\n (\"BTC/USD\", 'BTC', 'USD', \"ftx\", False, False, True, 'spot', {}, False),\n (\"BTC/USD\", 'BTC', 'USD', \"ftx\", False, False, True, 'futures', {}, True),\n # Can only trade spot markets\n (\"BTC-PERP\", 'BTC', 'USD', \"ftx\", False, False, True, 'spot', {}, False),\n (\"BTC-PERP\", 'BTC', 'USD', \"ftx\", False, False, True, 'margin', {}, False),\n (\"BTC-PERP\", 'BTC', 'USD', \"ftx\", False, False, True, 'futures', {}, True),\n\n (\"BTC/USDT:USDT\", 'BTC', 'USD', \"okx\", False, False, True, 'spot', {}, False),\n (\"BTC/USDT:USDT\", 'BTC', 'USD', \"okx\", False, False, True, 'margin', {}, False),\n (\"BTC/USDT:USDT\", 'BTC', 'USD', \"okx\", False, False, True, 'futures', {}, True),\n ])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 650, "n_words": 364, "vocab_size": 83, "complexity": 1, "nloc": 7, "token_counts": 121, "n_ast_nodes": 1178, "n_identifiers": 14, "random_cut": "def test_date_minus_candles():\n\n date = datetime(2019, 8, 12, 13, 25, 0, tzinfo=timezone.utc)\n\n assert date_minus_candles(\"5m\", 3, date) == date - timedelta(minutes=15)\n assert date_minus_candles(\"5m\", 5, date) == date - timedelta(minutes=25)\n assert date_minus_candles(\"1m\", 6, date) == date - timedelta(minutes=6)\n assert date_minus_candles(\"1h\", 3, date) == date - timedelta(hours=3, minutes=25)\n assert date_minus_candles(\"1h\", 3) == timeframe_to_prev_date('1h') - timedelta(hours=3)\n\n\n@pytest.mark.parametrize(\n \"market_symbol,base,quote,exchange,spot,margin,futures,trademode,add_dict,expected_result\",\n [\n (\"BTC/USDT\", 'BTC', 'USDT', \"binance\", True, False, False, 'spot', {}, True),\n (\"USDT/BTC\", 'USDT', 'BTC', \"binance\", True, False, False, 'spot', {}, True),\n # No seperating /\n (\"BTCUSDT\", 'BTC', 'USDT', \"binance\", True, False, False, 'spot', {}, True),\n (\"BTCUSDT\", None, \"USDT\", \"binance\", True, False, False, 'spot', {}, False),\n (\"USDT/BTC\", \"BTC\", None, \"binance\", True, False, False, 'spot', {}, False),\n (\"BTCUSDT\", \"BTC\", None, \"binance\", True, False, False, 'spot', {}, False),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, False, False, 'spot', {}, True),\n # Futures mode, spot pair\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, False, False, 'futures', {}, False),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, False, False, 'margin', {}, False),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", True, True, True, 'margin', {}, True),\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", False, True, False, 'margin', {}, True),\n # Futures mode, futures pair\n (\"BTC/USDT\", \"BTC\", \"USDT\", \"binance\", False, False, True, 'futures', {}, True),\n # Futures market\n (\"BTC/UNK\", \"BTC\", 'UNK', \"binance\", False, False, True, 'spot', {}, False),\n (\"BTC/EUR\", 'BTC', 'EUR', \"kraken\", True, False, False, 'spot', {\"darkpool\": False}, True),\n (\"EUR/BTC\", 'EUR', 'BTC', \"kraken\", True" }, { "id": 28670, "commit_id": "577d43f21b7d6774dd272fedbb5719e101b92308", "repo": "saleor", "path": "saleor/checkout/utils.py", "file_name": "utils.py", "fun_name": "_append_line_to_create", "commit_message": "Allow to pass metadata directly to line data in checkoutCreate and checkoutLinesAdd mutations (#10592)", "code": "def _append_line_to_create(to_create, checkout, variant, line_data, line):\n if line is None:\n if line_data.quantity > 0:\n checkout_line = CheckoutLine(\n checkout=checkout,\n variant=variant,\n quantity=line_data.quantity,\n currency=checkout.currency,\n price_override=line_data.custom_price,\n )\n if line_data.metadata_list:\n checkout_line.store_value_in_metadata(\n {data.key: data.value for data in line_data.metadata_list}\n )\n to_create.append(checkout_line)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 211, "n_words": 34, "vocab_size": 31, "complexity": 5, "nloc": 15, "token_counts": 87, "n_ast_nodes": 127, "n_identifiers": 18, "random_cut": "def _append_line_to_create(to_create, checkout, variant, line_data, line):\n if line is None:\n if line_data.quantity > 0:\n checkout_line = CheckoutLine(\n checkout=checkout,\n variant=variant,\n quantity=line_data.quantity,\n currency=checkout.currency,\n price_override=line_data.custom_price,\n )\n if line_data.metadata_list:\n " }, { "id": 11602, "commit_id": "51403a57d03f0b1ddfd7fc533ccee78e23f5faa1", "repo": "jina", "path": "tests/integration/reduce/test_reduce.py", "file_name": "test_reduce.py", "fun_name": "test_uses_before_no_reduce_real_executor_uses", "commit_message": "refactor: unify port args (#4382)", "code": "def test_uses_before_no_reduce_real_executor_uses():\n flow = (\n Flow(port=exposed_port)\n .add(uses=Executor1, name='pod0')\n .add(uses=Executor2, needs='gateway', name='pod1')\n .add(uses=Executor3, needs='gateway', name='pod2')\n .add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses=DummyExecutor)\n )\n\n with flow as f:\n da = DocumentArray([Document() for _ in range(5)])\n resp = Client(port=exposed_port, return_responses=True).post('/', inputs=da)\n\n # assert no reduce happened\n assert len(resp[0].docs) == 1\n assert resp[0].docs[0].id == 'fake_document'\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 115, "n_words": 49, "vocab_size": 42, "complexity": 2, "nloc": 13, "token_counts": 145, "n_ast_nodes": 240, "n_identifiers": 27, "random_cut": "def test_uses_before_no_reduce_real_executor_uses():\n flow = (\n Flow(port=exposed_port)\n .add(uses=Executor1, name='pod0')\n .add(uses=Executor2, needs='gateway', name='pod1')\n .add(uses=Executor3, needs='gateway', name='pod2')\n .add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses=DummyExecutor)\n )\n\n with flow as f:\n da = DocumentArray([Document() for _ in range(5)])\n resp = Client(port=exposed_port, return_responses=True).post('/', inputs=da)\n\n # assert no reduce happened\n assert len(resp[0].docs) == 1\n assert resp[0].docs[0].id == 'fake_document'\n\n" }, { "id": 28599, "commit_id": "a579eb534e06a854805f037a587f13258f22fdf5", "repo": "saleor", "path": "saleor/graphql/checkout/mutations/order_create_from_checkout.py", "file_name": "order_create_from_checkout.py", "fun_name": "perform_mutation", "commit_message": "Use dataloader for discounts instead of lazy object (#10512)\n\n* Use dataloader for discounts instead of lazy object\r\n\r\n* Rename load function to be in par with other loaders", "code": "def perform_mutation(cls, _root, info, **data):\n checkout_id = data.get(\"id\")\n checkout = cls.get_node_or_error(\n info,\n checkout_id,\n field=\"id\",\n only_type=Checkout,\n code=OrderCreateFromCheckoutErrorCode.CHECKOUT_NOT_FOUND.value,\n )\n tracking_code = analytics.get_client_id(info.context)\n\n discounts = load_discounts(info.context)\n manager = info.context.plugins\n checkout_lines, unavailable_variant_pks = fetch_checkout_lines(checkout)\n checkout_info = fetch_checkout_info(\n checkout, checkout_lines, discounts, manager\n )\n\n validate_checkout(\n checkout_info=checkout_info,\n lines=checkout_lines,\n unavailable_variant_pks=unavailable_variant_pks,\n discounts=discounts,\n manager=manager,\n )\n app = load_app(info.context)\n try:\n order = create_order_from_checkout(\n checkout_info=checkout_info,\n checkout_lines=checkout_lines,\n discounts=discounts,\n manager=info.context.plugins,\n user=info.context.user,\n app=app,\n tracking_code=tracking_code,\n delete_checkout=data[\"remove_checkout\"],\n )\n except NotApplicable:\n code = OrderCreateFromCheckoutErrorCode.VOUCHER_NOT_APPLICABLE.value\n raise ValidationError(\n {\n \"voucher_code\": ValidationError(\n \"Voucher not applicable\",\n code=code,\n )\n }\n )\n except InsufficientStock as e:\n error = prepare_insufficient_stock_checkout_validation_error(e)\n raise error\n except GiftCardNotApplicable as e:\n raise ValidationError({\"gift_cards\": e})\n\n return OrderCreateFromCheckout(order=order)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 657, "n_words": 96, "vocab_size": 68, "complexity": 4, "nloc": 51, "token_counts": 234, "n_ast_nodes": 358, "n_identifiers": 46, "random_cut": "def perform_mutation(cls, _root, info, **data):\n checkout_id = data.get(\"id\")\n checkout = cls.get_node_or_error(\n info,\n checkout_id,\n field=\"id\",\n only_type=Checkout,\n code=OrderCreateFromCheckoutErrorCode.CHECKOUT_NOT_FOUND.value,\n )\n tracking_code = analytics.get_client_id(info.context)\n\n discounts = load_discounts(info.context)\n man" }, { "id": 292674, "commit_id": "dd88a05cb400d416a68a1be16fee8ee2ab48a70f", "repo": "core", "path": "homeassistant/components/deconz/gateway.py", "file_name": "gateway.py", "fun_name": "master", "commit_message": "Make type checking pass for deCONZ init, gateway and services (#66054)\n\n* Type and enable type checking for init, config_flow, diagnostics, gateway and services\r\n\r\n* Fix import\r\n\r\n* Fix review comment", "code": "def master(self) -> bool:\n \n return cast(bool, self.config_entry.options[CONF_MASTER_GATEWAY])\n\n # Options\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 26, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 37, "n_identifiers": 7, "random_cut": "def master(self) -> bool:\n \n retur" }, { "id": 309183, "commit_id": "8f6e24aa1ea7061e9b52085deb57c49e9ccf4a86", "repo": "core", "path": "tests/components/homewizard/test_coordinator.py", "file_name": "test_coordinator.py", "fun_name": "test_coordinator_failed_to_update", "commit_message": "Add HomeWizard Energy integration (#55812)\n\nCo-authored-by: Matthias Alphart \r\nCo-authored-by: Paulus Schoutsen ", "code": "async def test_coordinator_failed_to_update(aioclient_mock, hass):\n \n\n # Update failed by internal error\n meter = get_mock_device(product_type=\"p1_meter\")\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 11, "token_counts": 55, "n_ast_nodes": 32, "n_identifiers": 6, "random_cut": "async def test_coordinator_failed_to_update(aioclient_mock, hass):\n \n\n # Update failed by internal error\n meter = get_mock_device(product_type=\"p" }, { "id": 179966, "commit_id": "da4a59459f7eaa7312b50687a4a61cef1fb411d6", "repo": "gradio", "path": "gradio/blocks.py", "file_name": "blocks.py", "fun_name": "__enter__", "commit_message": "blocks-with-fix\n- add support for \"with gr.Blocks() as demo:\" usage", "code": "def __enter__(self):\n self.parent = Context.block\n Context.block = self\n return self\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 30, "n_words": 10, "vocab_size": 7, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def __enter__(self):\n " }, { "id": 78206, "commit_id": "952edd84c7a0fd9249257591bc92cd55cf59c0f8", "repo": "wagtail", "path": "wagtail/admin/views/generic/models.py", "file_name": "models.py", "fun_name": "save_instance", "commit_message": "Add breadcrumbs and new Page Editor side panels to Snippets views (#8623)", "code": "def save_instance(self):\n \n instance = self.form.save()\n revision = None\n\n self.has_content_changes = self.form.has_changed()\n\n # Save revision if the model inherits from RevisionMixin\n if self.revision_enabled:\n revision = instance.save_revision(\n user=self.request.user,\n changed=self.has_content_changes,\n )\n\n log(\n instance=instance,\n action=\"wagtail.edit\",\n revision=revision,\n content_changed=self.has_content_changes,\n )\n\n return instance\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 195, "n_words": 36, "vocab_size": 28, "complexity": 2, "nloc": 16, "token_counts": 78, "n_ast_nodes": 125, "n_identifiers": 16, "random_cut": "def save_instance(self):\n \n instance = self.form.save()\n revision = None\n\n self.has_content_changes = self.form.has_changed()\n\n # Save revision if the model inherits from RevisionMixin\n if self.revision_enabled:\n revision = instance.sa" }, { "id": 249162, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_room.py", "file_name": "test_room.py", "fun_name": "test_context_as_admin", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_context_as_admin(self) -> None:\n \n\n # Create a room. We're not part of it.\n user_id = self.register_user(\"test\", \"test\")\n user_tok = self.login(\"test\", \"test\")\n room_id = self.helper.create_room_as(user_id, tok=user_tok)\n\n # Populate the room with events.\n events = []\n for i in range(30):\n events.append(\n self.helper.send_event(\n room_id, \"com.example.test\", content={\"index\": i}, tok=user_tok\n )\n )\n\n # Now let's fetch the context for this room.\n midway = (len(events) - 1) // 2\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/rooms/%s/context/%s\"\n % (room_id, events[midway][\"event_id\"]),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(\n channel.json_body[\"event\"][\"event_id\"], events[midway][\"event_id\"]\n )\n\n for found_event in channel.json_body[\"events_before\"]:\n for j, posted_event in enumerate(events):\n if found_event[\"event_id\"] == posted_event[\"event_id\"]:\n self.assertTrue(j < midway)\n break\n else:\n self.fail(\"Event %s from events_before not found\" % j)\n\n for found_event in channel.json_body[\"events_after\"]:\n for j, posted_event in enumerate(events):\n if found_event[\"event_id\"] == posted_event[\"event_id\"]:\n self.assertTrue(j > midway)\n break\n else:\n self.fail(\"Event %s from events_after not found\" % j)\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 557, "n_words": 132, "vocab_size": 89, "complexity": 8, "nloc": 39, "token_counts": 259, "n_ast_nodes": 435, "n_identifiers": 32, "random_cut": "def test_context_as_admin(self) -> None:\n \n\n # Create a room. We're not part of it.\n user_id = self.register_user(\"test\", \"test\")\n user_tok = self.login(\"test\", \"test\")\n room_id = self.helper.create_room_as(user_id, tok=user_tok)\n\n # Populate the room with events.\n events = []\n for i in range(30):\n events.append(\n self.helper.send_event(\n room_id, \"com.example.test\", content={\"index\": i}, tok=user_tok\n )\n )\n\n # Now let's fetch the context for this room.\n midway = (len(events) - 1) // 2\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/rooms/%s/context/%s\"\n % (room_id, events[midw" }, { "id": 45457, "commit_id": "69f6f9e01b6df76c3c8fa266d460324163957887", "repo": "airflow", "path": "airflow/migrations/versions/3c20cacc0044_add_dagrun_run_type.py", "file_name": "3c20cacc0044_add_dagrun_run_type.py", "fun_name": "upgrade", "commit_message": "Autogenerate migration reference doc (#21601)\n\n* document airflow version in each alembic migration module and use this to autogen the doc\r\n* update each migration module to have the same description used in migration ref (so it can be used in autogen)", "code": "def upgrade():\n \n run_type_col_type = sa.String(length=50)\n\n conn = op.get_bind()\n inspector = Inspector.from_engine(conn)\n dag_run_columns = [col.get('name') for col in inspector.get_columns(\"dag_run\")]\n\n if \"run_type\" not in dag_run_columns:\n\n # Add nullable column\n with op.batch_alter_table(\"dag_run\") as batch_op:\n batch_op.add_column(sa.Column(\"run_type\", run_type_col_type, nullable=True))\n\n # Generate run type for existing records\n sessionmaker = sa.orm.sessionmaker()\n session = sessionmaker(bind=conn)\n\n for run_type in DagRunType:\n session.query(DagRun).filter(DagRun.run_id.like(f\"{run_type.value}__%\")).update(\n {DagRun.run_type: run_type.value}, synchronize_session=False\n )\n\n session.query(DagRun).filter(DagRun.run_type.is_(None)).update(\n {DagRun.run_type: DagRunType.MANUAL.value}, synchronize_session=False\n )\n session.commit()\n\n # Make run_type not nullable\n with op.batch_alter_table(\"dag_run\") as batch_op:\n batch_op.alter_column(\n \"run_type\", existing_type=run_type_col_type, type_=run_type_col_type, nullable=False\n )\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 268, "n_words": 77, "vocab_size": 55, "complexity": 4, "nloc": 22, "token_counts": 210, "n_ast_nodes": 360, "n_identifiers": 40, "random_cut": "def upgrade():\n \n run_type_col_type = sa.String(length=50)\n\n conn = op.get_bind()\n inspector = Inspector.from_engine(conn)\n dag_run_columns = [col.get('name') for col in inspector.get_columns(\"dag_run\")]\n\n if \"run_type\" not in dag_run_columns:\n\n # Add nullable column\n with op.batch_alter_table(\"dag_run\") as batch_op:\n batch_op.add_column(sa.Column(\"run_type\", run_type_col_type, nullable=True))\n\n # Generate run type for existing records\n sessionmaker = sa.orm.sessionmaker()\n session = sessionmaker(bind=conn)\n\n for run_type in DagRunType:\n session.query(DagRun).filter(DagRun.run_id.like(f\"{run_type.value}__%\")).update(\n {DagRun.run_type: run_type.value}, synchronize_session=False\n )\n\n session.query(DagRun).filter(DagRun.run_type.is_(None)).update(\n {DagRun.run_type: DagRunType.MANUAL.value}, synchron" }, { "id": 155853, "commit_id": "48820dfc4565d0dcd299409553e6721c246a9f88", "repo": "dask", "path": "dask/tests/test_distributed.py", "file_name": "test_distributed.py", "fun_name": "test_to_sql_engine_kwargs", "commit_message": "Move creation of sqlalchemy connection for picklability (#8745)", "code": "async def test_to_sql_engine_kwargs(c, s, a, b):\n # https://github.com/dask/dask/issues/8738\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n pytest.importorskip(\"sqlalchemy\")\n\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10)})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=1)\n with tmpfile() as f:\n uri = f\"sqlite:///{f}\"\n result = ddf.to_sql(\n \"test\", uri, index=True, engine_kwargs={\"echo\": False}, compute=False\n )\n await c.compute(result)\n\n dd.utils.assert_eq(\n ddf,\n dd.read_sql_table(\"test\", uri, \"index\"),\n check_divisions=False,\n )\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 163, "n_words": 54, "vocab_size": 46, "complexity": 1, "nloc": 18, "token_counts": 142, "n_ast_nodes": 243, "n_identifiers": 28, "random_cut": "async def test_to_sql_engine_kwargs(c, s, a, b):\n # https://github.com/dask/dask/issues/8738\n pd = pytest.importorskip(\"pandas\")\n dd = pytest.importorskip(\"dask.dataframe\")\n pytest.importorskip(\"sqlalchemy\")\n\n df = pd.DataFrame({\"a\": range(10), \"b\": range(10)})\n df.index.name = \"index\"\n ddf = dd.from_pandas(df, npartitions=1)\n with tmpfile() as f:\n uri = f\"sqlite:///{f}\"\n result = ddf.to_sql(\n \"test\"" }, { "id": 188195, "commit_id": "e259d2a9e9167c58fa75a78d1050dd5dcfde96f4", "repo": "jumpserver", "path": "apps/orgs/models.py", "file_name": "models.py", "fun_name": "get_total_resources_amount", "commit_message": "fix: fix rbac to dev (#7636)\n\n* feat: 添加 RBAC 应用模块\r\n\r\n* feat: 添加 RBAC Model、API\r\n\r\n* feat: 添加 RBAC Model、API 2\r\n\r\n* feat: 添加 RBAC Model、API 3\r\n\r\n* feat: 添加 RBAC Model、API 4\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC\r\n\r\n* feat: RBAC 整理权限位\r\n\r\n* feat: RBAC 整理权限位2\r\n\r\n* feat: RBAC 整理权限位2\r\n\r\n* feat: RBAC 整理权限位\r\n\r\n* feat: RBAC 添加默认角色\r\n\r\n* feat: RBAC 添加迁移文件;迁移用户角色->用户角色绑定\r\n\r\n* feat: RBAC 添加迁移文件;迁移用户角色->用户角色绑定\r\n\r\n* feat: RBAC 修改用户模块API\r\n\r\n* feat: RBAC 添加组织模块迁移文件 & 修改组织模块API\r\n\r\n* feat: RBAC 添加组织模块迁移文件 & 修改组织模块API\r\n\r\n* feat: RBAC 修改用户角色属性的使用\r\n\r\n* feat: RBAC No.1\r\n\r\n* xxx\r\n\r\n* perf: 暂存\r\n\r\n* perf: ...\r\n\r\n* perf(rbac): 添加 perms 到 profile serializer 中\r\n\r\n* stash\r\n\r\n* perf: 使用init\r\n\r\n* perf: 修改migrations\r\n\r\n* perf: rbac\r\n\r\n* stash\r\n\r\n* stash\r\n\r\n* pref: 修改rbac\r\n\r\n* stash it\r\n\r\n* stash: 先去修复其他bug\r\n\r\n* perf: 修改 role 添加 users\r\n\r\n* pref: 修改 RBAC Model\r\n\r\n* feat: 添加权限的 tree api\r\n\r\n* stash: 暂存一下\r\n\r\n* stash: 暂存一下\r\n\r\n* perf: 修改 model verbose name\r\n\r\n* feat: 添加model各种 verbose name\r\n\r\n* perf: 生成 migrations\r\n\r\n* perf: 优化权限位\r\n\r\n* perf: 添加迁移脚本\r\n\r\n* feat: 添加组织角色迁移\r\n\r\n* perf: 添加迁移脚本\r\n\r\n* stash\r\n\r\n* perf: 添加migrateion\r\n\r\n* perf: 暂存一下\r\n\r\n* perf: 修改rbac\r\n\r\n* perf: stash it\r\n\r\n* fix: 迁移冲突\r\n\r\n* fix: 迁移冲突\r\n\r\n* perf: 暂存一下\r\n\r\n* perf: 修改 rbac 逻辑\r\n\r\n* stash: 暂存一下\r\n\r\n* perf: 修改内置角色\r\n\r\n* perf: 解决 root 组织的问题\r\n\r\n* perf: stash it\r\n\r\n* perf: 优化 rbac\r\n\r\n* perf: 优化 rolebinding 处理\r\n\r\n* perf: 完成用户离开组织的问题\r\n\r\n* perf: 暂存一下\r\n\r\n* perf: 修改翻译\r\n\r\n* perf: 去掉了 IsSuperUser\r\n\r\n* perf: IsAppUser 去掉完成\r\n\r\n* perf: 修改 connection token 的权限\r\n\r\n* perf: 去掉导入的问题\r\n\r\n* perf: perms define 格式,修改 app 用户 的全新啊\r\n\r\n* perf: 修改 permission\r\n\r\n* perf: 去掉一些 org admin\r\n\r\n* perf: 去掉部分 org admin\r\n\r\n* perf: 再去掉点 org admin role\r\n\r\n* perf: 再去掉部分 org admin\r\n\r\n* perf: user 角色搜索\r\n\r\n* perf: 去掉很多 js\r\n\r\n* perf: 添加权限位\r\n\r\n* perf: 修改权限\r\n\r\n* perf: 去掉一个 todo\r\n\r\n* merge: with dev\r\n\r\n* fix: 修复冲突\r\n\r\nCo-authored-by: Bai \r\nCo-authored-by: Michael Bai \r\nCo-authored-by: ibuler ", "code": "def get_total_resources_amount(self):\n from django.apps import apps\n from orgs.mixins.models import OrgModelMixin\n summary = {'users.Members': self.get_members().count()}\n for app_name, app_config in apps.app_configs.items():\n models_cls = app_config.get_models()\n for model in models_cls:\n if not issubclass(model, OrgModelMixin):\n continue\n key = '{}.{}'.format(app_name, model.__name__)\n summary[key] = self.get_resource_amount(model)\n return summary\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 160, "n_words": 40, "vocab_size": 32, "complexity": 4, "nloc": 12, "token_counts": 94, "n_ast_nodes": 151, "n_identifiers": 23, "random_cut": "def get_total_resources_amount(self):\n from django.apps import apps\n from orgs.mixins.models import OrgModelMixin\n summary = {'users.Members': self.get_members().count()}\n for app_name, app_config in apps.app_configs.items():\n models_cls = app_config.get_models()\n for model in models_cls:\n if not issubclass(model, OrgModelMixin" }, { "id": 165396, "commit_id": "004b4c58779612a91972e3d9b1ce3c8e045d8e14", "repo": "pandas", "path": "pandas/tests/io/excel/test_odf.py", "file_name": "test_odf.py", "fun_name": "test_read_newlines_between_xml_elements_table", "commit_message": "BUG: error in read_excel with some ods files #45598 (#46050)\n\n* BUG: error in read_excel with some ods files #45598\r\n\r\n* BUG: use hasattr instead of dir\r\n\r\n* DOC: add issue number in new test case\r\n\r\n* DOC: remove comment\r\n\r\nCo-authored-by: Dimitra Karadima ", "code": "def test_read_newlines_between_xml_elements_table():\n # GH#45598\n expected = pd.DataFrame(\n [[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]],\n columns=[\"Column 1\", \"Column 2\", \"Column 3\"],\n )\n\n result = pd.read_excel(\"test_newlines.ods\")\n\n tm.assert_frame_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 56, "n_words": 28, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 75, "n_ast_nodes": 102, "n_identifiers": 11, "random_cut": "def test_read_newlines_between_xml_elements_table():\n # GH#45598\n expected = pd.DataFrame(\n [[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]],\n columns=[\"Column 1\", \"Column 2\", \"Column 3\"],\n )\n\n result = pd.read_excel(\"test_newlines.ods\")\n\n tm.assert_frame_equal(result, expected)\n" }, { "id": 105223, "commit_id": "f5826eff9b06ab10dba1adfa52543341ef1e6009", "repo": "datasets", "path": "tests/test_iterable_dataset.py", "file_name": "test_iterable_dataset.py", "fun_name": "test_concatenate_datasets_with_different_columns", "commit_message": "Add `concatenate_datasets` for iterable datasets (#4500)\n\n* add concatenate_datasets for iterable datasets\r\n\r\n* fix\r\n\r\n* infer features\r\n\r\n* fill missing rowzs and columns\r\n\r\n* comments\r\n\r\n* only check for duplicate keys once\r\n\r\n* comments\r\n\r\n* keep concatenate_datasets in arrow_dataset (to be deprecated)\r\n\r\n* style\r\n\r\n* comments, typing, fix missing token_per_repo_id\r\n\r\n* style", "code": "def test_concatenate_datasets_with_different_columns():\n ex_iterable1 = ExamplesIterable(generate_examples_fn, {\"label\": 10})\n dataset1 = IterableDataset(ex_iterable1)\n ex_iterable2 = ExamplesIterable(generate_examples_fn, {})\n dataset2 = IterableDataset(ex_iterable2)\n # missing column \"label\" -> it should be replaced with nulls\n extended_dataset2_list = [{\"label\": None, **x} for x in dataset2]\n\n concatenated_dataset = concatenate_datasets([dataset1, dataset2])\n assert list(concatenated_dataset) == list(dataset1) + extended_dataset2_list\n # change order\n concatenated_dataset = concatenate_datasets([dataset2, dataset1])\n assert list(concatenated_dataset) == extended_dataset2_list + list(dataset1)\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 60, "vocab_size": 44, "complexity": 2, "nloc": 10, "token_counts": 97, "n_ast_nodes": 157, "n_identifiers": 13, "random_cut": "def test_concatenate_datasets_with_different_columns():\n ex_iterable1 = ExamplesIterable(generate_examples_fn, {\"label\": 10})\n dataset1 = It" }, { "id": 196954, "commit_id": "bbca83fd553f5f14251ab08ae06cbd7524d2bbc1", "repo": "sympy", "path": "sympy/solvers/tests/test_decompogen.py", "file_name": "test_decompogen.py", "fun_name": "test_decompogen", "commit_message": "made changes", "code": "def test_decompogen():\n assert decompogen(sin(cos(x)), x) == [sin(x), cos(x)]\n assert decompogen(sin(x)**2 + sin(x) + 1, x) == [x**2 + x + 1, sin(x)]\n assert decompogen(sqrt(6*x**2 - 5), x) == [sqrt(x), 6*x**2 - 5]\n assert decompogen(sin(sqrt(cos(x**2 + 1))), x) == [sin(x), sqrt(x), cos(x), x**2 + 1]\n assert decompogen(Abs(cos(x)**2 + 3*cos(x) - 4), x) == [Abs(x), x**2 + 3*x - 4, cos(x)]\n assert decompogen(sin(x)**2 + sin(x) - sqrt(3)/2, x) == [x**2 + x - sqrt(3)/2, sin(x)]\n assert decompogen(Abs(cos(y)**2 + 3*cos(x) - 4), x) == [Abs(x), 3*x + cos(y)**2 - 4, cos(x)]\n assert decompogen(x, y) == [x]\n assert decompogen(1, x) == [1]\n assert decompogen(Max(3, x), x) == [Max(3, x)]\n raises(TypeError, lambda: decompogen(x < 5, x))\n u = 2*x + 3\n assert decompogen(Max(sqrt(u),(u)**2), x) == [Max(sqrt(x), x**2), u]\n assert decompogen(Max(u, u**2, y), x) == [Max(x, x**2, y), u]\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 174, "n_words": 133, "vocab_size": 64, "complexity": 1, "nloc": 16, "token_counts": 430, "n_ast_nodes": 605, "n_identifiers": 12, "random_cut": "def test_decompogen():\n assert decompogen(sin(cos(x)), x) == [sin(x), cos(x)]\n assert decompogen(sin(x)**2 + sin(x) + 1, x) == [x**2 + x + 1, sin(x)]\n assert decompogen(sqrt(6*x**2 - 5), x) == [sqrt(x), 6*x**2 - 5]\n assert decompogen(sin(sqrt(cos(x**2 + 1))), x) == [sin(x), sqrt(x), cos(x), x**2 + 1]\n assert decompogen(Abs(cos(x)**2 + 3*cos(x) - 4), x) == [Abs(x), x**2 + 3*x - 4, cos(x)]\n assert decompogen(sin(x)**2 + sin(x) - sqrt(3)/2, x) == [x**2 + x - sqrt(3)/2, sin(x)]\n assert decompogen(Abs(cos(y)**2 + 3*cos(x) - 4), x) == [Abs(x), 3*x + cos(y)**2 - 4, cos(x)]\n assert decompogen(x, y) == [x]\n assert decompogen(1, x) == [1]\n assert decompogen" }, { "id": 171540, "commit_id": "3fffb6d49abe20ebc4a3380181f90103fb9ce22e", "repo": "pandas", "path": "pandas/tests/groupby/test_groupby.py", "file_name": "test_groupby.py", "fun_name": "test_frame_set_name_single", "commit_message": "DEPR: Enforce numeric_only=False in groupby sum/mean (#49829)\n\n* DEPR: Enforce numeric_only=False in groupby sum/mean\r\n\r\n* cleanup\r\n\r\n* Refinements\r\n\r\n* whatsnew fixup", "code": "def test_frame_set_name_single(df):\n grouped = df.groupby(\"A\")\n\n msg = \"The default value of numeric_only\"\n with pytest.raises(TypeError, match=\"Could not convert\"):\n grouped.mean()\n result = grouped.mean(numeric_only=True)\n assert result.index.name == \"A\"\n\n with pytest.raises(TypeError, match=\"Could not convert\"):\n df.groupby(\"A\", as_index=False).mean()\n result = df.groupby(\"A\", as_index=False).mean(numeric_only=True)\n assert result.index.name != \"A\"\n\n with pytest.raises(TypeError, match=\"Could not convert\"):\n grouped.agg(np.mean)\n result = grouped[[\"C\", \"D\"]].agg(np.mean)\n assert result.index.name == \"A\"\n\n result = grouped.agg({\"C\": np.mean, \"D\": np.std})\n assert result.index.name == \"A\"\n\n result = grouped[\"C\"].mean()\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg(np.mean)\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg([np.mean, np.std])\n assert result.index.name == \"A\"\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(SpecificationError, match=msg):\n grouped[\"C\"].agg({\"foo\": np.mean, \"bar\": np.std})\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 190, "n_words": 100, "vocab_size": 46, "complexity": 1, "nloc": 26, "token_counts": 280, "n_ast_nodes": 485, "n_identifiers": 19, "random_cut": "def test_frame_set_name_single(df):\n grouped = df.groupby(\"A\")\n\n msg = \"The default value of numeric_only\"\n with pytest.raises(TypeError, match=\"Could not convert\"):\n grouped.mean()\n result = grouped.mean(numeric_only=True)\n assert result.index.name == \"A\"\n\n with pytest.raises(TypeError, match=\"Could not convert\"):\n df.groupby(\"A\", as_index=False).mean()\n result = df.groupby(\"A\", as_index=False).mean(numeric_only=True)\n assert result.index.name != \"A\"\n\n with pytest.raises(TypeError, match=\"Could not convert\"):\n grouped.agg(np.mean)\n result = grouped[[\"C\", \"D\"]].agg(np.mean)\n assert result.index.name == \"A\"\n\n result = grouped.agg({\"C\": np.mean, \"D\": np.std})\n assert result.index.name == \"A\"\n\n result = grouped[\"C\"].mean()\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg(np.mean)\n assert result.index.name == \"A\"\n result = grouped[\"C\"].agg([np.mean, np.std])\n assert result.index.name == \"A\"\n\n msg = r\"nested renamer is not supported\"\n with pytest.raises(Specificatio" }, { "id": 155698, "commit_id": "80f9821f4d5a4badc2179dbd17f1fc7730cc9f50", "repo": "dask", "path": "dask/dataframe/io/tests/test_sql.py", "file_name": "test_sql.py", "fun_name": "test_to_sql_engine_kwargs", "commit_message": "Add engine_kwargs support to dask.dataframe.to_sql (#8609)\n\n### Use case: SSL\r\n\r\nThis was the original [use case] that motivated #8596: to force SSL on the db connection. Whether the new `engine_kwargs` argument is helpful/necessary for this use case depends on the db driver used by SQLAlchemy:\r\n\r\n* [MySQL] (helpful): either use `connect_args={'ssl': ...}` as engine kwarg or add `?ssl_cert=...&ssl_key=...` to URI.\r\n* [psycopg2] (not helpful): must use `?sslmode=require` in connection URI, not supported as engine argument.\r\n* [pg8000] (necessary): must use `connect_args={'ssl_context': ...}` as engine kwarg.\r\n\r\n[use case]: https://github.com/coiled/dask-community/issues/186\r\n[MySQL]: https://docs.sqlalchemy.org/en/14/dialects/mysql.html#ssl-connections\r\n[psycopg2]: https://docs.sqlalchemy.org/en/14/dialects/postgresql.html#ssl-connections\r\n[pg8000]: https://docs.sqlalchemy.org/en/14/dialects/postgresql.html#pg8000-ssl", "code": "def test_to_sql_engine_kwargs(caplog):\n ddf = dd.from_pandas(df, 2)\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, engine_kwargs={\"echo\": False})\n logs = \"\\n\".join(r.message for r in caplog.records)\n assert logs == \"\"\n assert_eq(df, read_sql_table(\"test\", uri, \"number\"))\n\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, engine_kwargs={\"echo\": True})\n logs = \"\\n\".join(r.message for r in caplog.records)\n assert \"CREATE\" in logs\n assert \"INSERT\" in logs\n\n assert_eq(df, read_sql_table(\"test\", uri, \"number\"))\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 127, "n_words": 56, "vocab_size": 29, "complexity": 3, "nloc": 13, "token_counts": 131, "n_ast_nodes": 230, "n_identifiers": 17, "random_cut": "def test_to_sql_engine_kwargs(caplog):\n ddf = dd.from_pandas(df, 2)\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, engine_kwargs={\"echo\": False})\n logs = \"\\n\".join(r.message for r in caplog.records)\n assert logs == \"\"\n assert_eq(df, read_sql_table(\"test\", uri, \"number\"))\n\n with tmp_db_uri() as uri:\n ddf.to_sql(\"test\", uri, engine_kwargs={\"echo\": True})\n logs = \"\\n\".join(r.message for r in caplog.records)\n assert \"CREATE\" in logs\n " }, { "id": 202585, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/db_functions/comparison/test_greatest.py", "file_name": "test_greatest.py", "fun_name": "test_decimal_filter", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_decimal_filter(self):\n obj = DecimalModel.objects.create(n1=Decimal(\"1.1\"), n2=Decimal(\"1.2\"))\n self.assertCountEqual(\n DecimalModel.objects.annotate(\n greatest=Greatest(\"n1\", \"n2\"),\n ).filter(greatest=Decimal(\"1.2\")),\n [obj],\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 81, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 8, "token_counts": 63, "n_ast_nodes": 107, "n_identifiers": 14, "random_cut": "def test_decimal_filter(self):\n obj = DecimalModel.objects.create(n1=Decimal(\"1.1\"), n2=Decimal(\"1.2\"))\n self.assertCountEqual(\n DecimalModel.objects.annotate(\n greatest=Greatest(\"n1\", \"n2\"),\n )." }, { "id": 205690, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/lookups.py", "file_name": "lookups.py", "fun_name": "get_bilateral_transforms", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_bilateral_transforms(self):\n if hasattr(self.lhs, \"get_bilateral_transforms\"):\n bilateral_transforms = self.lhs.get_bilateral_transforms()\n else:\n bilateral_transforms = []\n if self.bilateral:\n bilateral_transforms.append(self.__class__)\n return bilateral_transforms\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 17, "vocab_size": 13, "complexity": 3, "nloc": 8, "token_counts": 45, "n_ast_nodes": 76, "n_identifiers": 8, "random_cut": "def get_bilateral_transforms(self):\n if hasattr(self.lhs, \"get_bilateral_transforms\"):\n bilateral_transforms = self.lhs.get_bilateral_transforms()\n else:\n bilateral_transforms = []\n if self.bilateral:\n bilateral_transforms.append(self.__class__)\n return bilateral_transforms\n\n" }, { "id": 26978, "commit_id": "513fc80bc698c177b87774b3aff3da7b9aedbe06", "repo": "saleor", "path": "saleor/graphql/order/schema.py", "file_name": "schema.py", "fun_name": "resolve_homepage_events", "commit_message": "Stricter signatures for resolvers and mutations (#9649)", "code": "def resolve_homepage_events(_root, info, **kwargs):\n qs = resolve_homepage_events()\n return create_connection_slice(qs, info, kwargs, OrderEventCountableConnection)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 38, "n_identifiers": 7, "random_cut": "def resolve_homepage_events(_root, info, **kwargs):\n qs = resolve_homepage_events()\n return create_connection_slice(qs, info, kwargs, OrderEventCountableConnection)\n" }, { "id": 84027, "commit_id": "ba5cf331a2c65bc5d09be28892327e59698eda0e", "repo": "zulip", "path": "zerver/tests/test_digest.py", "file_name": "test_digest.py", "fun_name": "test_no_logging", "commit_message": "testing: 100% coverage for zerver/tests/test_digest.py.", "code": "def test_no_logging(self) -> None:\n hamlet = self.example_user(\"hamlet\")\n startlen = len(RealmAuditLog.objects.all())\n bulk_write_realm_audit_logs([])\n self.assert_length(RealmAuditLog.objects.all(), startlen)\n bulk_write_realm_audit_logs([hamlet])\n self.assert_length(RealmAuditLog.objects.all(), startlen + 1)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 68, "n_ast_nodes": 113, "n_identifiers": 11, "random_cut": "def test_no_logging(self) -> None:\n hamlet = self.example_user(\"hamlet\")\n startlen = len(RealmAuditLog.objects.all())\n bulk_write_realm_audit_logs([])\n self.a" }, { "id": 191459, "commit_id": "db37bd089fc18c8215da42202dfadc397b20d26c", "repo": "langchain", "path": "langchain/llms/cohere.py", "file_name": "cohere.py", "fun_name": "_default_params", "commit_message": "model laboratory (#95)", "code": "def _default_params(self) -> Mapping[str, Any]:\n \n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"k\": self.k,\n \"p\": self.p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n }\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 107, "n_words": 20, "vocab_size": 20, "complexity": 1, "nloc": 10, "token_counts": 52, "n_ast_nodes": 87, "n_identifiers": 11, "random_cut": "def _default_params(self) -> Mapping[str, Any]:\n \n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"k\": self.k,\n \"p\": self.p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n }\n" }, { "id": 286488, "commit_id": "aed683f44015cb5aa6cae9c2ce719c956cda7b46", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_controller.py", "file_name": "portfolio_controller.py", "fun_name": "print_help", "commit_message": "Feature/attribution toolkit (#3156)\n\n* add attribution toolkit\r\n\r\n* add attrib to test script for portfolio\r\n\r\n* removes yahooquery dependency and early rounding\r\n\r\n* Update _index.md\r\n\r\n* update feature to include raw and type flags, graph always shows, table output optional, one type of output at a time\r\n\r\n* Linting\r\n\r\n* Update index\r\n\r\n* Update index 2\r\n\r\n* Update tests\r\n\r\n* changes argument descriptions\r\n\r\n* Small fix\r\n\r\n* Formatting Black\r\n\r\nCo-authored-by: S3908818 \r\nCo-authored-by: Louise Platts (S3908818) <88080425+s3908818@users.noreply.github.com>\r\nCo-authored-by: Jeroen Bouma \r\nCo-authored-by: James Maslek \r\nCo-authored-by: Louise Amy <74476622+louiseamy4@users.noreply.github.com>\r\nCo-authored-by: Jeroen Bouma ", "code": "def print_help(self):\n \n mt = MenuText(\"portfolio/\")\n mt.add_menu(\"bro\")\n mt.add_menu(\"po\")\n mt.add_raw(\"\\n\")\n\n mt.add_cmd(\"load\")\n mt.add_cmd(\"show\")\n mt.add_cmd(\"bench\")\n mt.add_raw(\"\\n\")\n mt.add_param(\"_loaded\", self.portfolio_name)\n mt.add_param(\"_riskfreerate\", self.portfolio_name)\n mt.add_param(\"_benchmark\", self.benchmark_name)\n mt.add_raw(\"\\n\")\n\n mt.add_info(\"_graphs_\")\n mt.add_cmd(\"holdv\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"holdp\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"yret\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"mret\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"dret\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"distr\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"maxdd\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"rvol\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"rsharpe\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"rsort\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"rbeta\", self.portfolio_name and self.benchmark_name)\n\n mt.add_info(\"_metrics_\")\n mt.add_cmd(\"alloc\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"attrib\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"summary\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"alloc\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"attrib\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"metric\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"perf\", self.portfolio_name and self.benchmark_name)\n\n mt.add_info(\"_risk_\")\n mt.add_cmd(\"var\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"es\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"os\", self.portfolio_name and self.benchmark_name)\n\n port = bool(self.portfolio_name)\n port_bench = bool(self.portfolio_name) and bool(self.benchmark_name)\n\n help_text = f\n # TODO: Clean up the reports inputs\n # TODO: Edit the allocation to allow the different asset classes\n # [info]Reports:[/info]\n # ar annual report for performance of a given portfolio\n console.print(text=help_text, menu=\"Portfolio\")\n self.update_choices()\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 484, "n_words": 150, "vocab_size": 73, "complexity": 23, "nloc": 78, "token_counts": 446, "n_ast_nodes": 899, "n_identifiers": 21, "random_cut": "def print_help(self):\n \n mt = MenuText(\"portfolio/\")\n mt.add_menu(\"bro\")\n mt.add_menu(\"po\")\n mt.add_raw(\"\\n\")\n\n mt.add_cmd(\"load\")\n mt.add_cmd(\"show\")\n mt.add_cmd(\"bench\")\n mt.add_raw(\"\\n\")\n mt.add_param(\"_loaded\", self.portfolio_name)\n mt.add_param(\"_riskfreerate\", self.portfolio_name)\n mt.add_param(\"_benchmark\", self.benchmark_name)\n mt.add_raw(\"\\n\")\n\n mt.add_info(\"_graphs_\")\n mt.add_cmd(\"holdv\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"holdp\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"yret\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"mret\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"dret\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"distr\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"maxdd\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"rvol\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"rsharpe\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"rsort\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"rbeta\", self.portfolio_name and self.benchmark_name)\n\n mt.add_info(\"_metrics_\")\n mt.add_cmd(\"alloc\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"attrib\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"summary\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"alloc\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"attrib\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"metric\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"perf\", self.portfolio_name and self.benchmark_name)\n\n mt.add_info(\"_risk_\")\n mt.add_cmd(\"var\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"es\", self.portfolio_name and self.benchmark_name)\n mt.add_cmd(\"os\", self.portfolio_name and self.benchmark_name)\n\n port = bool(self.portfolio_name)\n port_bench = bool(self.portfolio_name) and bool(self.benchmark_name)\n\n help_text = f\n # TODO: Clean up the reports inputs\n # TODO: Edit the allocation to allow the different asset classes\n # [info]Reports:[/info]\n # ar annual report for performance of a given portfolio\n console.print(text=help_text, menu" }, { "id": 194246, "commit_id": "74ea933c29898f838991acdf49a70cb20b4ec3ad", "repo": "vision", "path": "test/test_prototype_transforms_functional.py", "file_name": "test_prototype_transforms_functional.py", "fun_name": "test_float32_vs_uint8", "commit_message": "Cleanup prototype transforms tests (#6984)\n\n* minor cleanup of the prototype transforms tests\r\n\r\n* refactor ImagePair\r\n\r\n* pretty format enum", "code": "def test_float32_vs_uint8(self, test_id, info, args_kwargs):\n (input, *other_args), kwargs = args_kwargs.load(\"cpu\")\n\n if input.dtype != torch.uint8:\n pytest.skip(f\"Input dtype is {input.dtype}.\")\n\n adapted_other_args, adapted_kwargs = info.float32_vs_uint8(other_args, kwargs)\n\n actual = info.kernel(\n F.convert_dtype_image_tensor(input, dtype=torch.float32),\n *adapted_other_args,\n **adapted_kwargs,\n )\n\n expected = F.convert_dtype_image_tensor(info.kernel(input, *other_args, **kwargs), dtype=torch.float32)\n\n assert_close(\n actual,\n expected,\n **info.get_closeness_kwargs(test_id, dtype=torch.float32, device=input.device),\n msg=parametrized_error_message(*other_args, **kwargs),\n )\n\n\n@pytest.fixture", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 189, "n_words": 47, "vocab_size": 42, "complexity": 2, "nloc": 17, "token_counts": 143, "n_ast_nodes": 227, "n_identifiers": 29, "random_cut": "def test_float32_vs_uint8(self, test_id, info, args_kwargs):\n " }, { "id": 43992, "commit_id": "2b4bf7fe67fc656ceb7bdaad36453b7a5b83ef04", "repo": "airflow", "path": "tests/api/common/test_mark_tasks.py", "file_name": "test_mark_tasks.py", "fun_name": "_create_test_dag_run", "commit_message": "Use `DagRun.run_id` instead of `execution_date` when updating state of TIs(UI & REST API) (#18724)\n\nWe can now use run_id as well as execution_date to update states\r\nof task instances\r\n\r\nCo-authored-by: Tzu-ping Chung \r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def _create_test_dag_run(self, state, date):\n return self.dag1.create_dagrun(\n run_type=DagRunType.MANUAL, state=state, start_date=date, execution_date=date\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 48, "n_identifiers": 11, "random_cut": "def _create_test_dag_run(self, state, date):\n return self.dag1.create_dagrun(\n run_type=DagRunType.MANUAL," }, { "id": 220623, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/queues.py", "file_name": "queues.py", "fun_name": "put_nowait", "commit_message": "add python 3.10.4 for windows", "code": "def put_nowait(self, item):\n \n if self.full():\n raise QueueFull\n self._put(item)\n self._unfinished_tasks += 1\n self._finished.clear()\n self._wakeup_next(self._getters)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 66, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 7, "token_counts": 43, "n_ast_nodes": 73, "n_identifiers": 11, "random_cut": "def put_nowait(self, item):\n \n " }, { "id": 105908, "commit_id": "2945690ea731f85a356220a71cdc630281c676f4", "repo": "datasets", "path": "src/datasets/arrow_writer.py", "file_name": "arrow_writer.py", "fun_name": "get_parquet_lengths", "commit_message": "Multiprocessed dataset builder [WIP] (#5107)\n\n* multiprocessing-compatible naming scheme and refactor\r\n\r\n* multiprocessed shard writing for GeneratorBasedBuilder\r\n\r\n* multiprocessed shard writing for ArrowBasedBuilder\r\n\r\n* style\r\n\r\n* multiprocessed dataset loading\r\n\r\n* compatibility with non-sharded datasets\r\n\r\n* bugfix\r\n\r\n* bugfix\r\n\r\n* removed unused import\r\n\r\n* fixed bad ordering\r\n\r\n* less misleading tqdm\r\n\r\n* fix gen_kwargs distribution + read shards\r\n\r\n* minor\r\n\r\n* minor2\r\n\r\n* support beam datasets\r\n\r\n* docstrings + minor\r\n\r\n* add iflatmap_unordered for parallel write & progress updates\r\n\r\n* use 1 tqdm bar receiving updates from subprocesses\r\n\r\n* docs\r\n\r\n* add test_iflatmap_unordered\r\n\r\n* style\r\n\r\n* test arrow_reader.py\r\n\r\n* fix test_iflatmap_unordered\r\n\r\n* add Beam test_download_and_prepare_sharded\r\n\r\n* test gen_kwargs distribution\r\n\r\n* test download_and_prepare with num_proc\r\n\r\n* style\r\n\r\n* improve test\r\n\r\n* don't close the pool\r\n\r\n* fix multiprocessing on windows\r\n\r\n* keep multiprocessing disabled by default\r\n\r\n* again + docs\r\n\r\n* more docs\r\n\r\n* more docs\r\n\r\n* some var renaming\r\n\r\n* style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\n* added utils/sharding.py\r\n\r\n* style\r\n\r\n* style\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Mario Šaško ", "code": "def get_parquet_lengths(sources) -> List[int]:\n shard_lengths = []\n disable = not logging.is_progress_bar_enabled()\n for source in logging.tqdm(sources, unit=\"parquet files\", disable=disable):\n parquet_file = pa.parquet.ParquetFile(source)\n shard_lengths.append(parquet_file.metadata.num_rows)\n return shard_lengths\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 24, "vocab_size": 21, "complexity": 2, "nloc": 7, "token_counts": 62, "n_ast_nodes": 99, "n_identifiers": 18, "random_cut": "def get_parquet_lengths(sources) -> List[int]:\n shard_lengths = []\n disable = not logging.is_progress_bar_enabled()\n for source in logging.tqdm(sources, unit=\"" }, { "id": 279397, "commit_id": "be73ac1a1e25d9abd4d793cba9707098d7adf231", "repo": "keras", "path": "keras/datasets/mnist.py", "file_name": "mnist.py", "fun_name": "load_data", "commit_message": "Add f-string format and lint with flynt on the whole codebase", "code": "def load_data(path=\"mnist.npz\"):\n \n origin_folder = (\n \"https://storage.googleapis.com/tensorflow/tf-keras-datasets/\"\n )\n path = get_file(\n path,\n origin=origin_folder + \"mnist.npz\",\n file_hash=( # noqa: E501\n \"731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1\"\n ),\n )\n with np.load(path, allow_pickle=True) as f:\n x_train, y_train = f[\"x_train\"], f[\"y_train\"]\n x_test, y_test = f[\"x_test\"], f[\"y_test\"]\n\n return (x_train, y_train), (x_test, y_test)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 127, "n_words": 41, "vocab_size": 37, "complexity": 1, "nloc": 15, "token_counts": 84, "n_ast_nodes": 146, "n_identifiers": 14, "random_cut": "def load_data(path=\"mnist.npz\"):\n \n origin_folder = (\n \"https://storage.googleapis.com/tensorflow/tf-keras-datasets/\"\n )\n path = get_file(\n path,\n origin=origin_folder + \"mnist.npz\",\n file_hash=( # noqa: E501\n \"731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1\"\n ),\n )\n with np.load(path, allow_pickle=True) as f:\n x_train, y_train = f[\"x_train\"]" }, { "id": 147994, "commit_id": "52eaf020bcd4e8ebeb94af11a8039313a37488d1", "repo": "ray", "path": "python/ray/tune/tests/test_checkpoint_manager.py", "file_name": "test_checkpoint_manager.py", "fun_name": "testBestCheckpointsWithNan", "commit_message": "[tune] Treat checkpoints with nan value as worst (#23862)\n\nChanges the logic in CheckpointManager to consider checkpoints with nan value of the metric as worst values, meaning they will be deleted first if keep_checkpoints_num is set.", "code": "def testBestCheckpointsWithNan(self):\n \n keep_checkpoints_num = 2\n checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)\n checkpoints = [\n _TuneCheckpoint(\n _TuneCheckpoint.PERSISTENT, None, self.mock_result(float(\"nan\"), i)\n )\n for i in range(2)\n ]\n checkpoints += [\n _TuneCheckpoint(_TuneCheckpoint.PERSISTENT, 3, self.mock_result(0, 3))\n ]\n random.shuffle(checkpoints)\n\n for checkpoint in checkpoints:\n checkpoint_manager.on_checkpoint(checkpoint)\n\n best_checkpoints = checkpoint_manager.best_checkpoints()\n # best_checkpoints is sorted from worst to best\n self.assertEqual(len(best_checkpoints), keep_checkpoints_num)\n self.assertEqual(best_checkpoints[0].value, None)\n self.assertEqual(best_checkpoints[1].value, 3)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 221, "n_words": 53, "vocab_size": 44, "complexity": 3, "nloc": 19, "token_counts": 130, "n_ast_nodes": 203, "n_identifiers": 19, "random_cut": "def testBestCheckpointsWithNan(self):\n \n keep_checkpoints_num = 2\n checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)\n checkpoints = [\n _TuneCheckpoint(\n _TuneCheckpoint.PERSISTENT, None, self.mock_result(float(\"nan\"), i)\n )\n for i in range(2)\n ]\n checkpoints += [\n _TuneCheckpoint(_TuneCheckpoint.PERSISTENT, 3, self.mock_result(0, 3))\n ]\n random.shuffle(checkpoints)\n\n for checkpoint in checkpoints:\n checkpoint_manager.on_checkpoint(checkpoint)\n\n best_checkpoints = checkpoint_manager.best_checkpoints" }, { "id": 178612, "commit_id": "a470b75c8e045312ea22dbfb6c5fc6702835b31c", "repo": "Nuitka", "path": "nuitka/freezer/Standalone.py", "file_name": "Standalone.py", "fun_name": "copyDllsUsed", "commit_message": "macOS: Massive improvements for dependency scans\n\n* Was not recursively scanning dependencies and therefore could\n miss some of them.\n\n* Made internal functions private.\n\n* Make sure to pass proper \"package\" value to DLL scans, so it\n can include the needed directories.\n\n* Do not mutate information of DLL map, it is used later for\n other things and we now detect errors in that.", "code": "def copyDllsUsed(source_dir, dist_dir, standalone_entry_points):\n # This is complex, because we also need to handle OS specifics.\n\n used_dlls = detectUsedDLLs(\n source_dir=source_dir,\n standalone_entry_points=standalone_entry_points,\n use_cache=not Options.shallNotUseDependsExeCachedResults()\n and not Options.getWindowsDependencyTool() == \"depends.exe\",\n update_cache=not Options.shallNotStoreDependsExeCachedResults()\n and not Options.getWindowsDependencyTool() == \"depends.exe\",\n )\n\n _removeDuplicateDlls(used_dlls=used_dlls)\n\n dll_map = _copyDllsUsed(dist_dir=dist_dir, used_dlls=used_dlls)\n\n # TODO: This belongs inside _copyDllsUsed\n if Utils.isMacOS():\n # For macOS, the binary and the DLLs needs to be changed to reflect\n # the relative DLL location in the \".dist\" folder.\n for standalone_entry_point in standalone_entry_points:\n _fixupBinaryDLLPathsMacOS(\n binary_filename=standalone_entry_point.dest_path,\n package_name=standalone_entry_point.package_name,\n dll_map=dll_map,\n original_location=standalone_entry_point.source_path,\n )\n\n for original_path, package_name, dll_filename in dll_map:\n _fixupBinaryDLLPathsMacOS(\n binary_filename=os.path.join(dist_dir, dll_filename),\n package_name=package_name,\n dll_map=dll_map,\n original_location=original_path,\n )\n\n # Remove or update rpath settings.\n if Utils.getOS() in (\"Linux\", \"Darwin\"):\n # For Linux, the \"rpath\" of libraries may be an issue and must be\n # removed.\n if Utils.isMacOS():\n start = 0\n else:\n start = 1\n\n for standalone_entry_point in standalone_entry_points[start:]:\n count = relpath(\n path=standalone_entry_point.dest_path, start=dist_dir\n ).count(os.path.sep)\n\n rpath = os.path.join(\"$ORIGIN\", *([\"..\"] * count))\n setSharedLibraryRPATH(standalone_entry_point.dest_path, rpath)\n\n for _original_path, _package_name, dll_filename in dll_map:\n setSharedLibraryRPATH(os.path.join(dist_dir, dll_filename), \"$ORIGIN\")\n\n if Utils.isMacOS():\n addMacOSCodeSignature(\n filenames=[\n standalone_entry_point.dest_path\n for standalone_entry_point in standalone_entry_points\n ]\n + [\n os.path.join(dist_dir, dll_filename)\n for _original_path, _package_name, dll_filename in dll_map\n ]\n )\n\n Plugins.onCopiedDLLs(dist_dir=dist_dir, used_dlls=used_dlls)\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 700, "n_words": 182, "vocab_size": 119, "complexity": 13, "nloc": 51, "token_counts": 315, "n_ast_nodes": 493, "n_identifiers": 42, "random_cut": "def copyDllsUsed(source_dir, dist_dir, standalone_entry_points):\n # This is complex, because we also need to handle OS specifics.\n\n used_dlls = detectUsedDLLs(\n source_dir=source_dir,\n standalone_entry_points=standalone_entry_points,\n use_cache=not Options.shallNotUseDependsExeCachedResults()" }, { "id": 294559, "commit_id": "c024033dae98f01380842dac35be743fbefa0a36", "repo": "core", "path": "homeassistant/components/samsungtv/media_player.py", "file_name": "media_player.py", "fun_name": "async_update", "commit_message": "Add Upnp volume control/status to SamsungTV (#68663)\n\nCo-authored-by: epenet \r\nCo-authored-by: J. Nick Koston ", "code": "async def async_update(self) -> None:\n \n if self._auth_failed or self.hass.is_stopping:\n return\n if self._power_off_in_progress():\n self._attr_state = STATE_OFF\n else:\n self._attr_state = (\n STATE_ON if await self._bridge.async_is_on() else STATE_OFF\n )\n\n if self._attr_state != STATE_ON:\n return\n\n startup_tasks: list[Coroutine[Any, Any, None]] = []\n\n if not self._app_list_event.is_set():\n startup_tasks.append(self._async_startup_app_list())\n\n if not self._upnp_device and self._ssdp_rendering_control_location:\n startup_tasks.append(self._async_startup_upnp())\n\n if startup_tasks:\n await asyncio.gather(*startup_tasks)\n\n if not (service := self._get_upnp_service()):\n return\n\n get_volume, get_mute = await asyncio.gather(\n service.action(\"GetVolume\").async_call(InstanceID=0, Channel=\"Master\"),\n service.action(\"GetMute\").async_call(InstanceID=0, Channel=\"Master\"),\n )\n LOGGER.debug(\"Upnp GetVolume on %s: %s\", self._host, get_volume)\n if (volume_level := get_volume.get(\"CurrentVolume\")) is not None:\n self._attr_volume_level = volume_level / 100\n LOGGER.debug(\"Upnp GetMute on %s: %s\", self._host, get_mute)\n if (is_muted := get_mute.get(\"CurrentMute\")) is not None:\n self._attr_is_volume_muted = is_muted\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 373, "n_words": 103, "vocab_size": 65, "complexity": 13, "nloc": 31, "token_counts": 252, "n_ast_nodes": 415, "n_identifiers": 40, "random_cut": "async def async_update(self) -> None:\n \n if self._auth_failed or self.hass.is_stopping:\n return\n if self._power_off_in_progress():\n self._attr_state = STATE_OFF\n else:\n self._attr_state = (\n STATE_ON if await self._bridge.async_is_on() else STATE_OFF\n )\n\n if self._attr_state != STATE_ON:\n return\n\n startup_tasks: list[Coroutine[Any, Any, None]] = []\n\n if not self._app_list_event.is_set():\n startup_tasks.append(self._async_startup_app_list())\n\n if not self._upnp_device and self._ssdp_rendering_control_location:\n startup_tasks.append(self._async_startup_upnp())\n\n if startup_tasks:\n await asyncio.gather(*startup_tasks)\n\n if not (service := self._get_upnp_service()):\n return\n\n get_volume, get_mute = await asyncio.gather(\n service.action(\"GetVolume\").async_call(InstanceID=0, Channel=\"Master\"),\n service.action(\"GetMute\").async_call(InstanceID=0, Channel=\"Master\"),\n )\n LOGGER.debug(" }, { "id": 164696, "commit_id": "047137ce2619cfe2027e3999dfb92eb614d9a485", "repo": "pandas", "path": "pandas/io/excel/_xlwt.py", "file_name": "_xlwt.py", "fun_name": "fm_datetime", "commit_message": "DEP: Protect some ExcelWriter attributes (#45795)\n\n* DEP: Deprecate ExcelWriter attributes\r\n\r\n* DEP: Deprecate ExcelWriter attributes\r\n\r\n* Fixup for test\r\n\r\n* Move tests and restore check_extension\r\n\r\ny\r\n\r\n* Deprecate xlwt fm_date and fm_datetime; doc improvements", "code": "def fm_datetime(self):\n \n self._deprecate(\"fm_datetime\")\n return self._fm_datetime\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 31, "n_identifiers": 4, "random_cut": "def fm_datetime(self):\n \n self._deprecate(\"fm_datetime\")\n r" }, { "id": 280543, "commit_id": "dc1fe7f95b389e1bda9056ba53e739821fbe8e6e", "repo": "keras", "path": "keras/layers/preprocessing/index_lookup.py", "file_name": "index_lookup.py", "fun_name": "_load_assets", "commit_message": "Add Preprocessing Layer support in Keras v3 saving (IntegerLookup, StringLoop, TextVectorization).\n\nPiperOrigin-RevId: 491682409", "code": "def _load_assets(self, dir_path):\n if self.input_vocabulary:\n # Vocab saved in config.\n # TODO: consider unifying both paths.\n return\n vocabulary_filepath = tf.io.gfile.join(dir_path, \"vocabulary.txt\")\n # TODO: fix bug with include_special_tokens and set reload from file.\n with open(vocabulary_filepath, \"r\") as f:\n lines = f.read().split(\"\\n\")\n if tf.as_dtype(self.vocabulary_dtype) == tf.string:\n values = [str(line) for line in lines]\n else:\n values = [int(line) for line in lines]\n if self.output_mode == TF_IDF:\n self.set_vocabulary(values, idf_weights=False)\n else:\n self.set_vocabulary(values)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 242, "n_words": 67, "vocab_size": 50, "complexity": 6, "nloc": 14, "token_counts": 114, "n_ast_nodes": 192, "n_identifiers": 25, "random_cut": "def _load_assets(self, dir_path):\n if self.input_vocabulary:\n # Vocab saved in config.\n # TODO: consider unifying both paths.\n return\n " }, { "id": 14095, "commit_id": "faee3301eb2c0d4157150a2f4cde2b4edb32ac8e", "repo": "pydantic", "path": "tests/test_schema.py", "file_name": "test_schema.py", "fun_name": "test_nested_python_dataclasses", "commit_message": "Fix regression in handling of nested dataclasses in `get_flat_models_from_field` (#3819)\n\n* add test for nested python dataclass schema generation\r\n\r\n* fix handling of dataclasses in `get_flat_models_from_field`\r\n\r\n* add change note", "code": "def test_nested_python_dataclasses():\n \n\n from dataclasses import dataclass as python_dataclass\n", "url": "https://github.com/pydantic/pydantic.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 22, "token_counts": 118, "n_ast_nodes": 22, "n_identifiers": 4, "random_cut": "def test_nested_python_dataclasses():\n \n\n from dataclasses import dataclass as python_dataclass\n" }, { "id": 208416, "commit_id": "82d1a374575d9785708f144976cf139c76c7acb7", "repo": "ipython", "path": "IPython/conftest.py", "file_name": "conftest.py", "fun_name": "pytest_collection_modifyitems", "commit_message": "make sure to run async tests\n\nthere are some `async def` tests, but they are skipped without `mark(\"asyncio\")`", "code": "def pytest_collection_modifyitems(items):\n \n for item in items:\n if inspect.iscoroutinefunction(item.obj):\n item.add_marker(\"asyncio\")\n assert not inspect.isasyncgenfunction(item.obj)\n\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 43, "n_words": 12, "vocab_size": 12, "complexity": 3, "nloc": 5, "token_counts": 37, "n_ast_nodes": 64, "n_identifiers": 8, "random_cut": "def pytest_collection_modifyitems(items):\n \n for item in items:\n if inspect.iscoroutinefunction(item.obj):\n item.add_marker(\"asyncio\")\n assert not inspect.isasync" }, { "id": 213537, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy/backends/numpy/core/image.py", "file_name": "image.py", "fun_name": "gradient_image", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def gradient_image(x):\n x_shape = _ivy.shape(x)\n batch_shape = x_shape[:-3]\n image_dims = x_shape[-3:-1]\n dev = _ivy.dev(x)\n # to list\n batch_shape = list(batch_shape)\n image_dims = list(image_dims)\n num_dims = x_shape[-1]\n # BS x H-1 x W x D\n dy = x[..., 1:, :, :] - x[..., :-1, :, :]\n # BS x H x W-1 x D\n dx = x[..., :, 1:, :] - x[..., :, :-1, :]\n # BS x H x W x D\n dy = _ivy.concatenate((dy, _ivy.zeros(batch_shape + [1, image_dims[1], num_dims], dev=dev)), -3)\n dx = _ivy.concatenate((dx, _ivy.zeros(batch_shape + [image_dims[0], 1, num_dims], dev=dev)), -2)\n # BS x H x W x D, BS x H x W x D\n return dy, dx\n", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 163, "n_words": 110, "vocab_size": 48, "complexity": 1, "nloc": 13, "token_counts": 184, "n_ast_nodes": 280, "n_identifiers": 14, "random_cut": "def gradient_image(x):\n x_shape = _ivy.shape(x)\n batch_shape = x_shape[:-3]\n image_dims = x_shape[-3:-1]\n dev = _ivy.dev(x)\n # to list\n batch_sh" }, { "id": 297869, "commit_id": "cb13418babd21a1e9584978b0c523f1b1e4e1cb0", "repo": "core", "path": "homeassistant/components/http/__init__.py", "file_name": "__init__.py", "fun_name": "_create_ssl_context", "commit_message": "String formatting and max line length - Part 2 (#84393)", "code": "def _create_ssl_context(self) -> ssl.SSLContext | None:\n context: ssl.SSLContext | None = None\n assert self.ssl_certificate is not None\n try:\n if self.ssl_profile == SSL_INTERMEDIATE:\n context = ssl_util.server_context_intermediate()\n else:\n context = ssl_util.server_context_modern()\n context.load_cert_chain(self.ssl_certificate, self.ssl_key)\n except OSError as error:\n if not self.hass.config.safe_mode:\n raise HomeAssistantError(\n f\"Could not use SSL certificate from {self.ssl_certificate}:\"\n f\" {error}\"\n ) from error\n _LOGGER.error(\n \"Could not read SSL certificate from %s: %s\",\n self.ssl_certificate,\n error,\n )\n try:\n context = self._create_emergency_ssl_context()\n except OSError as error2:\n _LOGGER.error(\n \"Could not create an emergency self signed ssl certificate: %s\",\n error2,\n )\n context = None\n else:\n _LOGGER.critical(\n \"Home Assistant is running in safe mode with an emergency self\"\n \" signed ssl certificate because the configured SSL certificate was\"\n \" not usable\"\n )\n return context\n\n if self.ssl_peer_certificate:\n if context is None:\n raise HomeAssistantError(\n \"Failed to create ssl context, no fallback available because a peer\"\n \" certificate is required.\"\n )\n\n context.verify_mode = ssl.CERT_REQUIRED\n context.load_verify_locations(self.ssl_peer_certificate)\n\n return context\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 731, "n_words": 147, "vocab_size": 87, "complexity": 7, "nloc": 44, "token_counts": 173, "n_ast_nodes": 303, "n_identifiers": 27, "random_cut": "def _create_ssl_context(self) -> ssl.SSLContext | None:\n context: ssl.SSLContext | None = None\n assert self.ssl_certificate is not None\n try:\n if self.ssl_profile == SSL_INTERMEDIATE:\n context = ssl_util.server_context_intermediate()\n else:\n context = ssl_util.server_context_modern()\n contex" }, { "id": 186574, "commit_id": "16aad35d31a887dab157f9d4f5e0fe9218d06064", "repo": "certbot", "path": "certbot-nginx/certbot_nginx/_internal/configurator.py", "file_name": "configurator.py", "fun_name": "recovery_routine", "commit_message": "Fully type certbot-nginx module (#9124)\n\n* Work in progress\r\n\r\n* Fix type\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Oups.\r\n\r\n* Fix typing in UnspacedList\r\n\r\n* Fix logic\r\n\r\n* Finish typing\r\n\r\n* List certbot-nginx as fully typed in tox\r\n\r\n* Fix lint\r\n\r\n* Fix checks\r\n\r\n* Organize imports\r\n\r\n* Fix typing for Python 3.6\r\n\r\n* Fix checks\r\n\r\n* Fix lint\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix signature of deploy_cert regarding the installer interface\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/obj.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix types\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/parser.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Precise type\r\n\r\n* Precise _coerce possible inputs/outputs\r\n\r\n* Fix type\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/http_01.py\r\n\r\nCo-authored-by: ohemorange \r\n\r\n* Fix type\r\n\r\n* Remove an undesirable implementation.\r\n\r\n* Fix type\r\n\r\nCo-authored-by: alexzorin \r\nCo-authored-by: ohemorange ", "code": "def recovery_routine(self) -> None:\n \n super().recovery_routine()\n self.new_vhost = None\n self.parser.load()\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 11, "token_counts": 27, "n_ast_nodes": 49, "n_identifiers": 6, "random_cut": "def recovery_routine(self) -> None:\n \n super().recovery_routine()\n self.new_vho" }, { "id": 225796, "commit_id": "a48611ee12b6758d752b4ca2f3f640f94234522d", "repo": "llama_index", "path": "tests/indices/embedding/test_base.py", "file_name": "test_base.py", "fun_name": "test_embedding_similarity", "commit_message": "Add general embedding abstraction (#79)\n\n\r\n\r\nCo-authored-by: Jerry Liu ", "code": "def test_embedding_similarity() -> None:\n \n embed_model = OpenAIEmbedding()\n text_embedding = [3.0, 4.0, 0.0]\n query_embedding = [0.0, 1.0, 0.0]\n cosine = embed_model.similarity(query_embedding, text_embedding)\n assert cosine == 0.8\n\n\n@pytest.fixture", "url": "https://github.com/jerryjliu/llama_index.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 43, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 58, "n_ast_nodes": 76, "n_identifiers": 9, "random_cut": "def test_embedding_similarity() -> None:\n \n embed_model = OpenAIEmbedding()\n text_embedding = [3.0, 4.0, 0.0]\n query_embedding = [0.0, 1.0, 0.0]\n cosine = embed_model.similarity(query_embeddin" }, { "id": 195758, "commit_id": "ed4b2f2458b02f18df0e4449be38269ef88b90f6", "repo": "sympy", "path": "sympy/integrals/rubi/tests/test_utility_function.py", "file_name": "test_utility_function.py", "fun_name": "test_TrigReduce", "commit_message": "Fixed stuff in rubi", "code": "def test_TrigReduce():\n assert TrigReduce(cos(x)**2) == cos(2*x)/2 + S.Half\n assert TrigReduce(cos(x)**2*sin(x)) == sin(x)/4 + sin(3*x)/4\n assert TrigReduce(cos(x)**2+sin(x)) == sin(x) + cos(2*x)/2 + S.Half\n assert TrigReduce(cos(x)**2*sin(x)**5) == 5*sin(x)/64 + sin(3*x)/64 - 3*sin(5*x)/64 + sin(7*x)/64\n assert TrigReduce(2*sin(x)*cos(x) + 2*cos(x)**2) == sin(2*x) + cos(2*x) + 1\n assert TrigReduce(sinh(a + b*x)**2) == cosh(2*a + 2*b*x)/2 - S.Half\n assert TrigReduce(sinh(a + b*x)*cosh(a + b*x)) == sinh(2*a + 2*b*x)/2\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 82, "n_words": 62, "vocab_size": 31, "complexity": 1, "nloc": 8, "token_counts": 255, "n_ast_nodes": 401, "n_identifiers": 11, "random_cut": "def test_TrigReduce():\n assert TrigReduce(cos(x)**2) == cos(2*x)/2 + S.Half\n assert TrigReduce(cos(x)**2*sin(x" }, { "id": 118558, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/streamlit/in_memory_file_manager.py", "file_name": "in_memory_file_manager.py", "fun_name": "_get_session_id", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def _get_session_id() -> str:\n \n ctx = get_script_run_ctx()\n if ctx is None:\n # This is only None when running \"python myscript.py\" rather than\n # \"streamlit run myscript.py\". In which case the session ID doesn't\n # matter and can just be a constant, as there's only ever \"session\".\n return \"dontcare\"\n else:\n return ctx.session_id\n\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 98, "n_words": 51, "vocab_size": 45, "complexity": 2, "nloc": 7, "token_counts": 25, "n_ast_nodes": 50, "n_identifiers": 5, "random_cut": "def _get_session_id() -> str:\n \n ctx " }, { "id": 300243, "commit_id": "1be2438ef67c7f523654bdb849cbed5f4c865365", "repo": "core", "path": "homeassistant/components/mqtt/climate.py", "file_name": "climate.py", "fun_name": "async_set_swing_mode", "commit_message": "Use climate enums in mqtt (#70696)", "code": "async def async_set_swing_mode(self, swing_mode):\n \n # CONF_SEND_IF_OFF is deprecated, support will be removed with release 2022.9\n if self._send_if_off or self._current_operation != HVACMode.OFF:\n payload = self._command_templates[CONF_SWING_MODE_COMMAND_TEMPLATE](\n swing_mode\n )\n await self._publish(CONF_SWING_MODE_COMMAND_TOPIC, payload)\n\n if self._topic[CONF_SWING_MODE_STATE_TOPIC] is None:\n self._current_swing_mode = swing_mode\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 135, "n_words": 37, "vocab_size": 33, "complexity": 4, "nloc": 9, "token_counts": 61, "n_ast_nodes": 100, "n_identifiers": 16, "random_cut": "async def async_set_swing_mode(self, swing_mode):\n \n # CONF_SEND_IF_OFF is deprecated, support will be removed with release 2022.9\n if self._send_if_off or self._current_operation != HVACMode.OFF:\n payload = self._command_templates[CONF_SWING_MODE_COMMAND_TEMPLATE](\n swing_mode\n )\n await self._publish(CONF_SWING_MODE_COMMAND_TOPIC, payload)\n\n if self._topic[CONF_SWING_MODE_STATE_" }, { "id": 279189, "commit_id": "afd86e95fc91b98dfb30eac27933b1e10b201b97", "repo": "keras", "path": "keras/benchmarks/model_components_benchmarks_test.py", "file_name": "model_components_benchmarks_test.py", "fun_name": "_run", "commit_message": "Code reformated", "code": "def _run(self, func, num_iters, execution_mode=None):\n total_time = run_benchmark(func, num_iters, execution_mode)\n mean_us = total_time * 1e6 / num_iters\n self.report_benchmark(\n iters=num_iters,\n wall_time=mean_us,\n metrics=[\n {\n \"name\": \"exp_per_sec\",\n \"value\": float(f\"{num_iters / total_time:.3f}\"),\n },\n {\n \"name\": \"us_per_exp\",\n \"value\": float(f\"{total_time * 1000000.0 / num_iters:.3f}\"),\n },\n ],\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 248, "n_words": 41, "vocab_size": 31, "complexity": 1, "nloc": 17, "token_counts": 78, "n_ast_nodes": 144, "n_identifiers": 13, "random_cut": "def _run(self, func, num_iters, execution_mode=None):\n total_time = run_benchmark(func, num_iters, execution_mode)\n mean_us = total_time * 1e6 / num_iters\n self.report_benchmark(\n iters=num_iters,\n wall_time=mean_us,\n metrics=[\n {\n \"name\": \"exp_per_sec\",\n \"value\": float(f\"{num_iters / total_time:.3f}\"),\n },\n {\n \"name\": \"us_per_exp\",\n \"value\": float(f\"{total_time * 1000000.0 / num_iters:.3f}\"),\n },\n ],\n )\n" }, { "id": 123618, "commit_id": "df4293473d2fb6e887e31522cab5aff95e201581", "repo": "sqlmap", "path": "plugins/dbms/maxdb/enumeration.py", "file_name": "enumeration.py", "fun_name": "getHostname", "commit_message": "Fixing DeprecationWarning (logger.warn)", "code": "def getHostname(self):\n warnMsg = \"on SAP MaxDB it is not possible to enumerate the hostname\"\n logger.warning(warnMsg)\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 29, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 26, "n_identifiers": 5, "random_cut": "def getHostname(self):\n warnMsg = \"on SAP MaxDB it is not possible to enumerate the hostname\"\n logger." }, { "id": 261582, "commit_id": "d8fa96c29828e3ca79ddd5d7466521ac4d95213c", "repo": "scikit-learn", "path": "sklearn/impute/tests/test_common.py", "file_name": "test_common.py", "fun_name": "test_keep_empty_features", "commit_message": "ENH keep features with all missing values during imputation (#24770)\n\nCo-authored-by: Chiara Marmo \r\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>\r\nCo-authored-by: Vitor SRG \r\nFixes https://github.com/scikit-learn/scikit-learn/pull/16695\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16426\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16977", "code": "def test_keep_empty_features(imputer, keep_empty_features):\n \n X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]])\n imputer = imputer.set_params(\n add_indicator=False, keep_empty_features=keep_empty_features\n )\n\n for method in [\"fit_transform\", \"transform\"]:\n X_imputed = getattr(imputer, method)(X)\n if keep_empty_features:\n assert X_imputed.shape == X.shape\n else:\n assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 105, "n_words": 40, "vocab_size": 34, "complexity": 3, "nloc": 11, "token_counts": 109, "n_ast_nodes": 165, "n_identifiers": 13, "random_cut": "def test_keep_empty_features(imputer, keep_empty_features):\n \n X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]])\n imputer = imputer.set_params(\n add_indicator=False, keep_empty_features=keep_empty_features\n )\n\n for method in [\"fit_transform\", \"transform\"]:\n X_imputed = getattr(imputer, method)(X)\n if keep_empty_features:\n assert X_imputed.shape == X.shape\n else:\n assert X_imputed.shap" }, { "id": 148448, "commit_id": "09fae25c9426cd03df2f2d031d16a6e3b1533a55", "repo": "freqtrade", "path": "tests/test_plotting.py", "file_name": "test_plotting.py", "fun_name": "test_generate_profit_graph", "commit_message": "Fix some tests after drawdown calculation change", "code": "def test_generate_profit_graph(testdatadir):\n filename = testdatadir / \"backtest-result_test.json\"\n trades = load_backtest_data(filename)\n timerange = TimeRange.parse_timerange(\"20180110-20180112\")\n pairs = [\"TRX/BTC\", \"XLM/BTC\"]\n trades = trades[trades['close_date'] < pd.Timestamp('2018-01-12', tz='UTC')]\n\n data = history.load_data(datadir=testdatadir,\n pairs=pairs,\n timeframe='5m',\n timerange=timerange)\n\n trades = trades[trades['pair'].isin(pairs)]\n\n fig = generate_profit_graph(pairs, data, trades, timeframe=\"5m\", stake_currency='BTC')\n assert isinstance(fig, go.Figure)\n\n assert fig.layout.title.text == \"Freqtrade Profit plot\"\n assert fig.layout.yaxis.title.text == \"Price\"\n assert fig.layout.yaxis2.title.text == \"Profit BTC\"\n assert fig.layout.yaxis3.title.text == \"Profit BTC\"\n\n figure = fig.layout.figure\n assert len(figure.data) == 7\n\n avgclose = find_trace_in_fig_data(figure.data, \"Avg close price\")\n assert isinstance(avgclose, go.Scatter)\n\n profit = find_trace_in_fig_data(figure.data, \"Profit\")\n assert isinstance(profit, go.Scatter)\n drawdown = find_trace_in_fig_data(figure.data, \"Max drawdown 35.69%\")\n assert isinstance(drawdown, go.Scatter)\n parallel = find_trace_in_fig_data(figure.data, \"Parallel trades\")\n assert isinstance(parallel, go.Scatter)\n\n underwater = find_trace_in_fig_data(figure.data, \"Underwater Plot\")\n assert isinstance(underwater, go.Scatter)\n\n for pair in pairs:\n profit_pair = find_trace_in_fig_data(figure.data, f\"Profit {pair}\")\n assert isinstance(profit_pair, go.Scatter)\n\n with pytest.raises(OperationalException, match=r\"No trades found.*\"):\n # Pair cannot be empty - so it's an empty dataframe.\n generate_profit_graph(pairs, data, trades.loc[trades['pair'].isnull()], timeframe=\"5m\",\n stake_currency='BTC')\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 364, "n_words": 143, "vocab_size": 93, "complexity": 2, "nloc": 35, "token_counts": 337, "n_ast_nodes": 551, "n_identifiers": 47, "random_cut": "def test_generate_profit_graph(testdatadir):\n filename = testdatadir / \"backtest-result_test.json\"\n trades = load_backtest_data(filename)\n timerange = TimeRange.parse_timerange(\"20180110-20180112\")\n pairs = [\"TRX/BTC\", \"XLM/BTC\"]\n trades = trades[trades['close_date'] < pd.Timestamp('2018-01-12', tz='UTC')]\n\n data = history.load_data(datadir=testdatadir,\n pairs=pairs,\n timeframe='5m',\n timerange=timerange)\n\n trades = trades[trades['pair'].isin(pairs)]\n\n fig = generate_profit_graph(pairs, data, trades, timef" }, { "id": 42550, "commit_id": "8a4cf5d94eb94b6427c5d1d7907ba07b119932c5", "repo": "nltk", "path": "nltk/tokenize/util.py", "file_name": "util.py", "fun_name": "regexp_span_tokenize", "commit_message": "Docstring tests (#3050)\n\n* fixed pytests\r\n\r\n* fixed more pytests\r\n\r\n* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py\r\n\r\n* fixed pytests (mainly multiline or rounding issues)\r\n\r\n* fixed treebank pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed destructive.py pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed pytest (rounding issues)\r\n\r\n* fixed pytest (initialised missing object)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* added pytest +SKIP for deprecated module stanford\r\n\r\n* updated AUTHORS.md\r\n\r\n* changed docstring corrections by usage of ELLIPSIS and different roundings\r\n\r\n* fixed AUTHORS.md to be consistent\r\n\r\n* Fix framenet doctest formatting with pprint\r\n\r\n* Change docstring on MultiListBox.__init__\r\n\r\nI believe the original typo was misinterpreted and changed to something that was not originally intended.\r\n\r\nCo-authored-by: Jan Lennartz \r\nCo-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com>\r\nCo-authored-by: Tom Aarsen ", "code": "def regexp_span_tokenize(s, regexp):\n rGood muffins cost $3.88\\nin New York. Please buy me\n ... two of them.\\n\\nThanks.\n left = 0\n for m in finditer(regexp, s):\n right, next = m.span()\n if right != left:\n yield left, right\n left = next\n yield left, len(s)\n\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 41, "vocab_size": 34, "complexity": 3, "nloc": 25, "token_counts": 50, "n_ast_nodes": 81, "n_identifiers": 10, "random_cut": "def regexp_span_tokenize(s, regexp):\n rGood muffins cost $3.88\\nin New York. Please buy me\n ... two of them.\\n\\nThanks.\n " }, { "id": 77612, "commit_id": "b1614930eb74e3bdab28c5f04949347f43ae6fa7", "repo": "wagtail", "path": "wagtail/images/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_default_ordering_used_if_invalid_ordering_provided", "commit_message": "Allow images to be sorted by date, title or file size", "code": "def test_default_ordering_used_if_invalid_ordering_provided(self):\n response = self.get({\"ordering\": \"bogus\"})\n self.assertEqual(response.status_code, 200)\n\n context = response.context\n default_ordering = \"-created_at\"\n self.assertEqual(context[\"current_ordering\"], default_ordering)\n self.assertEqual(\n context[\"images\"].object_list.query.order_by, (default_ordering,)\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 79, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 66, "n_ast_nodes": 111, "n_identifiers": 11, "random_cut": "def test_default_ordering_used_if_invalid_ordering_provided(self):\n response = self.get({\"ordering\": \"bogus\"})\n self.assertEqual(response.status_code, 200)\n\n context = " }, { "id": 46460, "commit_id": "c063fc688cf20c37ed830de5e3dac4a664fd8241", "repo": "airflow", "path": "airflow/providers/elasticsearch/log/es_task_handler.py", "file_name": "es_task_handler.py", "fun_name": "emit", "commit_message": "Update black precommit (#22521)\n\nUse latest version of black, drop py36, and add py310.", "code": "def emit(self, record):\n if self.handler:\n record.offset = int(time() * (10**9))\n self.handler.emit(record)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 39, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 36, "n_ast_nodes": 58, "n_identifiers": 7, "random_cut": "def emit(self, record):\n if self.handler:\n record.of" }, { "id": 244041, "commit_id": "cac356380d505bf15587f07c0529218cc36b9652", "repo": "mmdetection", "path": "mmdet/models/dense_heads/maskformer_head.py", "file_name": "maskformer_head.py", "fun_name": "preprocess_gt", "commit_message": "[Feature] Add Maskformer to mmdet (#7212)\n\n* first commit\r\n\r\n* add README\r\n\r\n* move model description from config to readme\r\n\r\nadd description for binary_input\r\n\r\nadd description for dice loss\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nremove compatibility of pretrain in maskformer\r\n\r\n* update comments in maskformer_head\r\n\r\n* update docs format", "code": "def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs):\n \n num_things_list = [self.num_things_classes] * len(gt_labels_list)\n num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list)\n\n targets = multi_apply(preprocess_panoptic_gt, gt_labels_list,\n gt_masks_list, gt_semantic_segs, num_things_list,\n num_stuff_list)\n labels, masks = targets\n return labels, masks\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 130, "n_words": 30, "vocab_size": 20, "complexity": 1, "nloc": 8, "token_counts": 61, "n_ast_nodes": 94, "n_identifiers": 15, "random_cut": "def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs):\n \n num_things_list = [self.num_things_classes] * len(gt_labels_list)\n num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list)\n\n targets = multi_apply(preprocess_panoptic_gt, gt_labels_list,\n gt_masks_list, gt_semantic_segs, num_things_list,\n num_stuff_list)\n labels, masks = targets\n return labels, mas" }, { "id": 203456, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/options.py", "file_name": "options.py", "fun_name": "_create_formsets", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _create_formsets(self, request, obj, change):\n \"Helper function to generate formsets for add/change_view.\"\n formsets = []\n inline_instances = []\n prefixes = {}\n get_formsets_args = [request]\n if change:\n get_formsets_args.append(obj)\n for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):\n prefix = FormSet.get_default_prefix()\n prefixes[prefix] = prefixes.get(prefix, 0) + 1\n if prefixes[prefix] != 1 or not prefix:\n prefix = \"%s-%s\" % (prefix, prefixes[prefix])\n formset_params = self.get_formset_kwargs(request, obj, inline, prefix)\n formset = FormSet(**formset_params)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 192, "n_words": 63, "vocab_size": 47, "complexity": 9, "nloc": 25, "token_counts": 188, "n_ast_nodes": 184, "n_identifiers": 19, "random_cut": "def _create_formsets(self, request, obj, change):\n \"Helper function to generate formsets for add/change_view.\"\n formsets = []\n inline_instances = []\n prefixes = {}\n get_formsets_args = [request]\n if change:\n get_formsets_args.append(obj)\n for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):\n prefix = FormSet.get_default_prefix()\n prefixes[prefix] = prefixes.get(prefix, 0) + 1\n if prefixes[prefix] != 1 or not prefix:\n prefix = \"%s-%s\" % (prefix, prefixes[prefix])\n formset_params = self.get_" }, { "id": 154593, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/interchange/dataframe_protocol/dataframe.py", "file_name": "dataframe.py", "fun_name": "_yield_chunks", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def _yield_chunks(self, chunk_slices) -> \"HdkProtocolDataframe\":\n \n for i in range(len(chunk_slices) - 1):\n yield HdkProtocolDataframe(\n df=self._df.take_2d_labels_or_positional(\n row_positions=range(chunk_slices[i], chunk_slices[i + 1])\n ),\n nan_as_null=self._nan_as_null,\n allow_copy=self._allow_copy,\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 137, "n_words": 22, "vocab_size": 22, "complexity": 2, "nloc": 21, "token_counts": 65, "n_ast_nodes": 101, "n_identifiers": 15, "random_cut": "def _yield_chunks(self, chunk_slices) -> \"HdkProtocolDataframe\":\n \n for i in range(len(chunk_slices) - 1):\n yield HdkProtocolDataframe(\n df=self._df.take_2d_labels_or_positional(\n " }, { "id": 122668, "commit_id": "8fb344a724075c2b7ea3ec3d4b9dd3ae1d8a0bd7", "repo": "jax", "path": "jax/_src/lax/control_flow/conditionals.py", "file_name": "conditionals.py", "fun_name": "_cond_lowering", "commit_message": "[jax2tf] An alternative support for shape polymorphism for native serialization.\n\njax2tf already supports many cases of shape polymorphism, e.g., those\nwhere the shapes of all intermediates can be expressed as polynomials\nin the dimension variables in the input. We want to achieve the same\nsame coverage, or more, while using StableHLO as the lowering format,\nrather than tf.Graph.\n\nFor native serialization we will support two lowering implementations:\n\n * one is using the growing support in JAX for dynamic shapes,\n of which shape polymorphism is a special case.\n This implementation is enabled with the --jax_dynamic_shapes flag.\n At the moment, the JAX dynamic shapes support is still\n incomplete and over 300 jax2tf shape polymorphism tests fail.\n\n * a new one (added) here in which we form a Jaxpr using abstract\n values that express dimension sizes as dimension polynomials\n (as for the standard jax2tf). Then we lower the Jaxpr to StableHLO.\n This implementation is enabled when --jax_dynamic_shapes is off.\n With this implementation only 50 jax2tf tests fail (to be fixed\n separately).\n\nThe key contribution here is to enable lowering a Jaxpr that contains\ndimension polynomials in some of the intermediate shapes. Many lowering rules\nalready have some partial support for Jaxprs where the shapes contain\n`Var`s. To the extent possible, we try to write lowering rules that should\ncover both cases of dynamic shapes: Var or polynomials in shapes.\n\nThe lowering convention is that at top level we collect the sorted list\nof dimension variable names in the inputs, and we store it in ModuleContext.dim_vars.\nAll IR functions will take N additional prefix arguments of int32 type\ncontaining the values of the dimension variables. This is stored as\na list of `ir.Value` in `LoweringContext.dim_var_values`.\n\nNote that the Jaxprs are not changed to have extra Vars for the dimension\nvariable values. An alternative implementation could work by transforming\nthe Jaxpr to replace dimension polynomials into Vars.\n\nThe key code pattern used in the lowering rule is::\n\n if not core.is_constant_shape(shape): # Handles both Var, and polynomials\n shape = mlir.eval_dynamic_shape(ctx, shape)\n return mhlo.DynamicXXX(..., shape)\n else:\n return mhlo.XXX(..., shape)\n\nwith `mlir.eval_dynamic_shape` handling both cases::\n\n def eval_dynamic_shape(ctx, shape):\n if config.jax_dynamic_shapes:\n # Using Var\n return ... subst using ctx.axis_size_env ...\n else:\n # Using polynomials\n return ... subst using ctx.module_context.dim_vars and ctx.dim_var_values\n\nIn order to support the above some lowering functions need to take a\nLoweringContext parameter, e.g., mlir.broadcast_mhlo.\n\nI expect that the changes here will improve the --jax_dynamic_shapes coverage\nas well.", "code": "def _cond_lowering(ctx, index, *args, branches, linear):\n del linear # Unused.\n joined_effects = core.join_effects(*(branch.effects for branch in branches))\n ordered_effects = [eff for eff in joined_effects\n if eff in core.ordered_effects]\n num_tokens = len(ordered_effects)\n tokens_in = ctx.tokens_in.subset(ordered_effects)\n output_token_types = [mlir.token_type() for _ in ordered_effects]\n output_types = [\n *output_token_types, *map(mlir.aval_to_ir_types, ctx.avals_out)]\n flat_output_types = util.flatten(output_types)\n\n # mhlo.CaseOp takes a single argument 'index' and the corresponding blocks\n # have no arguments; the computation within the block uses implicit\n # captures.\n case_op = mhlo.CaseOp(flat_output_types, index=index,\n num_branches=len(branches))\n name_stack = extend_name_stack(ctx.module_context.name_stack, 'cond')\n for i, jaxpr in enumerate(branches):\n branch = case_op.regions[i].blocks.append()\n with ir.InsertionPoint(branch):\n sub_ctx = ctx.module_context.replace(\n name_stack=xla.extend_name_stack(name_stack, f'branch_{i}_fun'))\n out_vals, tokens_out = mlir.jaxpr_subcomp(\n sub_ctx, jaxpr.jaxpr, tokens_in,\n map(mlir.ir_constants, jaxpr.consts),\n *map(mlir.wrap_singleton_ir_values, args),\n dim_var_values=ctx.dim_var_values)\n out_tokens = [tokens_out.get(eff) for eff in ordered_effects]\n out_vals = [*out_tokens, *out_vals]\n mhlo.ReturnOp(util.flatten(out_vals))\n\n tokens_and_outputs = util.unflatten(case_op.results, map(len, output_types))\n tokens, outputs = util.split_list(tokens_and_outputs, [num_tokens])\n ctx.set_tokens_out(mlir.TokenSet(zip(ordered_effects, tokens)))\n return outputs\n\nmlir.register_lowering(cond_p, _cond_lowering)\n\n@state.register_discharge_rule(cond_p)", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@state.register_discharge_rule(cond_p)", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 279, "n_words": 139, "vocab_size": 104, "complexity": 7, "nloc": 31, "token_counts": 312, "n_ast_nodes": 506, "n_identifiers": 69, "random_cut": "def _cond_lowering(ctx, index, *args, branches, linear):\n del linear # Unused.\n joined_effects = core.join_effects(*(branch.effects for branch in branches))\n ordered_effects = [eff for eff in joined_effects\n if eff in core.ordered_effects]\n num_tokens = len(ordered_effects)\n tokens_in = ctx.tokens_in.subset(ordered_effects)\n output_token_types = [mlir.token_type() for _ in ordered_effects]\n output_types = [\n *output_token_types, *map(mlir.aval_to_ir_types, ctx.avals_out)]\n flat_output_types = util.flatten(output_types)\n\n # mhlo.CaseOp takes a single argument 'index' and the corresponding blocks\n # have no arguments; the computation within the block uses implicit\n # captures.\n case_op = mhlo.CaseOp(flat_output_types, index=index,\n num_branches=len(branches))\n name_stack = extend_name_stack(ctx.module_context.name_stack, 'cond')\n for i, jaxpr in enumerate(branches):\n branch = case_op.regions[i].blocks.append()\n with ir.InsertionPoint(branch):\n sub_ctx = ctx.module_context.replace(\n name_stack=xla.extend_name_stack(name_stack, f'branch_{i}_fun'))\n out_vals, tokens_out = mlir.jaxpr_subcomp(\n sub_ctx, jaxpr.jaxpr, tokens_in,\n map(mlir.ir_constants, jaxpr.consts),\n *map(mlir.wrap_singleton_ir_values, args),\n dim_var_values=ctx.dim_var_values)\n out_tokens = [tokens_out.get(eff) for eff in ordered_effects]\n out_vals = [*out_tokens, *out_vals]\n mhlo.ReturnOp(util.flatten(out_vals))\n\n tokens_and_outputs = util.unflatten(case_op.results, map(len, output_types))\n tokens, outputs = util.split_list(tokens_and_outputs, [num_tokens])\n ctx.set_tokens_out(mlir.TokenSet(zip(ordered_effects, tokens)))\n return outputs\n\nmlir.register_lowering(cond_p, _cond_lowering)\n\n@state.re" }, { "id": 286539, "commit_id": "8e9e6bd57f4bc5d57ccedfacccda6342d5881266", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_model.py", "file_name": "portfolio_model.py", "fun_name": "__preprocess_transactions", "commit_message": "Incorporate portfolio class into SDK (#3401)\n\n* create functions to interact with portfolio\r\n\r\n* fix some docstrings\r\n\r\n* view docstrings\r\n\r\n* make portfolio loading available in sdk\r\n\r\n* reorder some methods\r\n\r\n* fix bug\r\n\r\n* update controller\r\n\r\n* update website\r\n\r\n* remove import\r\n\r\n* change input name\r\n\r\n* regenerate website\r\n\r\n* change portfolio arg name\r\n\r\n* fix metrics bugs\r\n\r\n* fix report\r\n\r\n* refactor assets alloc\r\n\r\n* refactor assets sectors alloc\r\n\r\n* remove unecessary attributes\r\n\r\n* refactor allocaasset sector\r\n\r\n* reorganize class\r\n\r\n* first refactor alloc\r\n\r\n* refactor portfolio alloc\r\n\r\n* black\r\n\r\n* fix alloc bug\r\n\r\n* regenerate sdk website\r\n\r\n* fix alloc bugs\r\n\r\n* forgot this exception\r\n\r\n* some refactor on portfolio alloc country region\r\n\r\n* fix some allocation bugs\r\n\r\n* add examples\r\n\r\n* regenerate website\r\n\r\nCo-authored-by: James Maslek ", "code": "def __preprocess_transactions(self):\n \n\n p_bar = tqdm(range(14), desc=\"Preprocessing transactions\")\n\n try:\n\n # 0. If optional fields not in the transactions add missing\n optional_fields = [\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n \"Fees\",\n \"Premium\",\n \"ISIN\",\n ]\n if not set(optional_fields).issubset(set(self.__transactions.columns)):\n for field in optional_fields:\n if field not in self.__transactions.columns:\n self.__transactions[field] = np.nan\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 1. Convert Date to datetime\n self.__transactions[\"Date\"] = pd.to_datetime(self.__transactions[\"Date\"])\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 2. Sort transactions by date\n self.__transactions = self.__transactions.sort_values(by=\"Date\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 3. Capitalize Ticker and Type [of instrument...]\n self.__transactions[\"Ticker\"] = self.__transactions[\"Ticker\"].map(\n lambda x: x.upper()\n )\n self.__transactions[\"Type\"] = self.__transactions[\"Type\"].map(\n lambda x: x.upper()\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n self.__transactions[\"Signal\"] = self.__transactions[\"Side\"].map(\n lambda x: 1\n if x.lower() in [\"deposit\", \"buy\"]\n else (-1 if x.lower() in [\"withdrawal\", \"sell\"] else 0)\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 5. Convert quantity to signed integer\n self.__transactions[\"Quantity\"] = (\n abs(self.__transactions[\"Quantity\"]) * self.__transactions[\"Signal\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 6. Determining the investment/divestment value\n self.__transactions[\"Investment\"] = (\n self.__transactions[\"Quantity\"] * self.__transactions[\"Price\"]\n + self.__transactions[\"Fees\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n crypto_trades = self.__transactions[self.__transactions.Type == \"CRYPTO\"]\n self.__transactions.loc[\n (self.__transactions.Type == \"CRYPTO\"), \"Ticker\"\n ] = [\n f\"{crypto}-{currency}\"\n for crypto, currency in zip(\n crypto_trades.Ticker, crypto_trades.Currency\n )\n ]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided.\n\n # If isin not valid ticker is empty\n self.__transactions[\"yf_Ticker\"] = self.__transactions[\"ISIN\"].apply(\n lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan\n )\n\n empty_tickers = list(\n self.__transactions[\n (self.__transactions[\"yf_Ticker\"] == \"\")\n | (self.__transactions[\"yf_Ticker\"].isna())\n ][\"Ticker\"].unique()\n )\n\n # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported\n removed_tickers = []\n for item in empty_tickers:\n with contextlib.redirect_stdout(None):\n # Suppress yfinance failed download message if occurs\n valid_ticker = not (\n yf.download(\n item,\n start=datetime.datetime.now() + datetime.timedelta(days=-5),\n progress=False,\n ).empty\n )\n if valid_ticker:\n # Invalid ISIN but valid ticker\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = np.nan\n else:\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = \"\"\n removed_tickers.append(item)\n\n # Merge reformated tickers into Ticker\n self.__transactions[\"Ticker\"] = self.__transactions[\"yf_Ticker\"].fillna(\n self.__transactions[\"Ticker\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 9. Remove unsupported ISINs that came out empty\n self.__transactions.drop(\n self.__transactions[self.__transactions[\"Ticker\"] == \"\"].index,\n inplace=True,\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 10. Create tickers dictionary with structure {'Type': [Ticker]}\n for ticker_type in set(self.__transactions[\"Type\"]):\n self.tickers[ticker_type] = list(\n set(\n self.__transactions[\n self.__transactions[\"Type\"].isin([ticker_type])\n ][\"Ticker\"]\n )\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 11. Create list with tickers except cash\n self.tickers_list = list(set(self.__transactions[\"Ticker\"]))\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 12. Save transactions inception date\n self.inception_date = self.__transactions[\"Date\"][0]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 13. Populate fields Sector, Industry and Country\n if (\n self.__transactions.loc[\n self.__transactions[\"Type\"] == \"STOCK\",\n optional_fields,\n ]\n .isnull()\n .values.any()\n ):\n # If any fields is empty for stocks (overwrites any info there)\n self.load_company_data()\n\n p_bar.n += 1\n p_bar.refresh()\n\n # Warn user of removed ISINs\n if removed_tickers:\n p_bar.disable = True\n console.print(\n f\"\\n[red]The following tickers are not supported and were removed: {removed_tickers}.\"\n f\"\\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN.\"\n f\"\\nSuffix info on 'Yahoo Finance market coverage':\"\n \" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html\"\n f\"\\nE.g. IWDA -> IWDA.AS[/red]\\n\"\n )\n except Exception:\n console.print(\"\\nCould not preprocess transactions.\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 2772, "n_words": 515, "vocab_size": 269, "complexity": 14, "nloc": 138, "token_counts": 843, "n_ast_nodes": 1454, "n_identifiers": 72, "random_cut": "def __preprocess_transactions(self):\n \n\n p_bar = tqdm(range(14), desc=\"Preprocessing transactions\")\n\n try:\n\n # 0. If optional fields not in the transactions add missing\n optional_fields = [\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n \"Fees\",\n \"Premium\",\n \"ISIN\",\n ]\n if not set(optional_fields).issubset(set(self.__transactions.columns)):\n for field in optional_fields:\n if field not in self.__transactions.columns:\n self.__transactions[field] = np.nan\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 1. Convert Date to datetime\n self.__transactions[\"Date\"] = pd.to_datetime(self.__transactions[\"Date\"])\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 2. Sort transactions by date\n self.__transactions = self.__transactions.sort_values(by=\"Date\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 3. Capitalize Ticker and Type [of instrument...]\n self.__transactions[\"Ticker\"] = self.__transactions[\"Ticker\"].map(\n lambda x: x.upper()\n )\n self.__transactions[\"Type\"] = self.__transactions[\"Type\"].map(\n lambda x: x.upper()\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n self.__transactions[\"Signal\"] = self.__transactions[\"Side\"].map(\n lambda x: 1\n if x.lower() in [\"deposit\", \"buy\"]\n else (-1 if x.lower() in [\"withdrawal\", \"sell\"] else 0)\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 5. Convert quantity to signed integer\n self.__transactions[\"Quantity\"] = (\n abs(self.__transactions[\"Quantity\"]) * self.__transactions[\"Signal\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 6. Determining the investment/divestment value\n self.__transactions[\"Investment\"] = (\n self.__transactions[\"Quantity\"] * self.__transactions[\"Price\"]\n + self.__transactions[\"Fees\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n crypto_trades = self.__transactions[self.__transactions.Type == \"CRYPTO\"]\n self.__transactions.loc[\n (self.__transactions.Type == \"CRYPTO\"), \"Ticker\"\n ] = [\n f\"{crypto}-{currency}\"\n for crypto, currency in zip(\n crypto_trades.Ticker, crypto_trades.Currency\n )\n ]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided.\n\n # If isin not valid ticker is empty\n self.__transactions[\"yf_Ticker\"] = self.__transactions[\"ISIN\"].apply(\n lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan\n )\n\n empty_tickers = list(\n self.__transactions[\n (self.__transactions[\"yf_Ticker\"] == \"\")\n | (self.__transactions[\"yf_Ticker\"].isna())\n ][\"Ticker\"].unique()\n )\n\n # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported\n removed_tickers = []\n for item in empty_tickers:\n with contextlib.redirect_stdout(None):\n # Suppress yfinance failed download message if occurs\n valid_ticker = not (\n yf.download(\n item,\n start=datetime.datetime.now() + datetime.timedelta(days=-5),\n progress=False,\n ).empty\n )\n if valid_ticker:\n " }, { "id": 105970, "commit_id": "494a3d8356e09af6c69ded33dc7f2e1a7d239ab9", "repo": "datasets", "path": "tests/test_table.py", "file_name": "test_table.py", "fun_name": "test_embed_array_storage_nested", "commit_message": "Save file name in embed_storage (#5285)\n\n* save path in embed storage\r\n\r\n* fix tests\r\n\r\n* fix more tests\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Polina Kazakova \r\n\r\nCo-authored-by: Polina Kazakova ", "code": "def test_embed_array_storage_nested(image_file):\n array = pa.array([[{\"bytes\": None, \"path\": image_file}]], type=pa.list_(Image.pa_type))\n embedded_images_array = embed_array_storage(array, [Image()])\n assert isinstance(embedded_images_array.to_pylist()[0][0][\"path\"], str)\n assert isinstance(embedded_images_array.to_pylist()[0][0][\"bytes\"], bytes)\n array = pa.array([{\"foo\": {\"bytes\": None, \"path\": image_file}}], type=pa.struct({\"foo\": Image.pa_type}))\n embedded_images_array = embed_array_storage(array, {\"foo\": Image()})\n assert isinstance(embedded_images_array.to_pylist()[0][\"foo\"][\"path\"], str)\n assert isinstance(embedded_images_array.to_pylist()[0][\"foo\"][\"bytes\"], bytes)\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 62, "n_words": 39, "vocab_size": 26, "complexity": 1, "nloc": 9, "token_counts": 179, "n_ast_nodes": 293, "n_identifiers": 15, "random_cut": "def test_embed_array_storage_nested(image_file):\n array = pa.ar" }, { "id": 29006, "commit_id": "b8598fa2cf84f8bb473f2066f075ad7a374c3c80", "repo": "saleor", "path": "saleor/graphql/tests/fixtures.py", "file_name": "fixtures.py", "fun_name": "user", "commit_message": "Drop `AnonymouUser` from the context, and assign None instead (#10575)\n\n* Fix error when app deleted product added to draft order; Fixes #10574\r\n\r\n* Get rid of AnonymousUser from context\r\n\r\n* Ger rid of AnonymousUser\r\n\r\n* Drop anonymous_user fixture\r\n\r\n* Clean events\r\n\r\n* Fix test_checkout_complete.py file\r\n\r\n* Drop changelog entry\r\n\r\n* Update resolver for me query\r\n\r\n* Apply code review remarks\r\n\r\n* Apply changes after rebasing with main branch\r\n\r\n* Fix review remarks\r\n\r\n* Update create order from checkout tests\r\n\r\n* Drop remaining uses of is_anonymous\r\n\r\nCo-authored-by: IKarbowiak ", "code": "def user(self, user):\n self._user = user\n if user:\n self.token = create_access_token(user)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 35, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 4, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def user(self, user):\n s" }, { "id": 65767, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/crm/report/prospects_engaged_but_not_converted/prospects_engaged_but_not_converted.py", "file_name": "prospects_engaged_but_not_converted.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data(filters):\n\tlead_details = []\n\tlead_filters = get_lead_filters(filters)\n\n\tfor lead in frappe.get_all(\n\t\t\"Lead\", fields=[\"name\", \"lead_name\", \"company_name\"], filters=lead_filters\n\t):\n\t\tdata = frappe.db.sql(\n\t\t\t,\n\t\t\t{\"lead\": lead.name, \"limit\": filters.get(\"no_of_interaction\")},\n\t\t)\n\n\t\tfor lead_info in data:\n\t\t\tlead_data = [lead.name, lead.lead_name, lead.company_name] + list(lead_info)\n\t\t\tlead_details.append(lead_data)\n\n\treturn lead_details\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 27, "n_words": 41, "vocab_size": 35, "complexity": 3, "nloc": 33, "token_counts": 100, "n_ast_nodes": 164, "n_identifiers": 20, "random_cut": "def get_data(filters):\n\tlead_details = []\n\tlead_filters = get_le" }, { "id": 26655, "commit_id": "6f37bd256e1258c8effaceeac7a7cf549592eead", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py", "file_name": "test_create_deliveries_for_subscription.py", "fun_name": "test_invoice_requested", "commit_message": "Remove list from subsription payload. Use camel case for attached meta (#9519)\n\n* Remove list from payload. Use camel case for attached meta\r\n\r\n* Fix tests", "code": "def test_invoice_requested(fulfilled_order, subscription_invoice_requested_webhook):\n webhooks = [subscription_invoice_requested_webhook]\n event_type = WebhookEventAsyncType.INVOICE_REQUESTED\n invoice = fulfilled_order.invoices.first()\n invoice_id = graphene.Node.to_global_id(\"Invoice\", invoice.id)\n deliveries = create_deliveries_for_subscriptions(event_type, invoice, webhooks)\n expected_payload = json.dumps({\"invoice\": {\"id\": invoice_id}, \"meta\": None})\n\n assert deliveries[0].payload.payload == expected_payload\n assert len(deliveries) == len(webhooks)\n assert deliveries[0].webhook == webhooks[0]\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 66, "n_words": 40, "vocab_size": 30, "complexity": 1, "nloc": 10, "token_counts": 103, "n_ast_nodes": 164, "n_identifiers": 23, "random_cut": "def test_invoice_requested(fulfilled_order, subscription_invoice_requested_webhook):\n webhooks = [subscription_invoice_requested_webhook]\n event_type = WebhookEventAsyncType.INVOICE_REQUESTED\n invoice = fulfilled_order.invoices.first()\n invoice_id = graphene.Node.to_global_id(\"Invoice\", invoice.id)\n deliveries = create_deliveries_for_subscriptions(event_type, invoice, webhooks)\n expected_payload = json.dumps({\"invoice\": {\"id\": invoice_id}, \"meta\": None})\n\n assert deliveries[0].payload.payload == expected_payload\n assert len(deliveries) == len(webhooks)\n assert deliveries[0].webhook == webhooks[0]\n\n" }, { "id": 257166, "commit_id": "d49e92e21c2f9658039da7e478e62431f801db32", "repo": "haystack", "path": "test/test_pipeline.py", "file_name": "test_pipeline.py", "fun_name": "test_generate_code_imports", "commit_message": "ElasticsearchRetriever to BM25Retriever (#2423)\n\n* change class names to bm25\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Add back all_terms_must_match\r\n\r\n* fix syntax\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Creating a wrapper for old ES retriever with deprecated wrapper\r\n\r\n* Update Documentation & Code Style\r\n\r\n* New method for deprecating old ESRetriever\r\n\r\n* New attempt for deprecating the ESRetriever\r\n\r\n* Reverting to the simplest solution - warning logged\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>\r\nCo-authored-by: Sara Zan ", "code": "def test_generate_code_imports():\n pipeline_config = {\n \"version\": \"master\",\n \"components\": [\n {\"name\": \"DocumentStore\", \"type\": \"ElasticsearchDocumentStore\"},\n {\"name\": \"retri\", \"type\": \"BM25Retriever\", \"params\": {\"document_store\": \"DocumentStore\"}},\n {\"name\": \"retri2\", \"type\": \"TfidfRetriever\", \"params\": {\"document_store\": \"DocumentStore\"}},\n ],\n \"pipelines\": [\n {\n \"name\": \"Query\",\n \"nodes\": [{\"name\": \"retri\", \"inputs\": [\"Query\"]}, {\"name\": \"retri2\", \"inputs\": [\"Query\"]}],\n }\n ],\n }\n\n code = generate_code(pipeline_config=pipeline_config, pipeline_variable_name=\"p\", generate_imports=True)\n assert code == (\n \"from haystack.document_stores import ElasticsearchDocumentStore\\n\"\n \"from haystack.nodes import BM25Retriever, TfidfRetriever\\n\"\n \"from haystack.pipelines import Pipeline\\n\"\n \"\\n\"\n \"document_store = ElasticsearchDocumentStore()\\n\"\n \"retri = BM25Retriever(document_store=document_store)\\n\"\n \"retri_2 = TfidfRetriever(document_store=document_store)\\n\"\n \"\\n\"\n \"p = Pipeline()\\n\"\n 'p.add_node(component=retri, name=\"retri\", inputs=[\"Query\"])\\n'\n 'p.add_node(component=retri_2, name=\"retri2\", inputs=[\"Query\"])'\n )\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 299, "n_words": 88, "vocab_size": 62, "complexity": 1, "nloc": 29, "token_counts": 134, "n_ast_nodes": 284, "n_identifiers": 6, "random_cut": "def test_generate_code_imports():\n pipeline_config = {\n \"version\": \"master\",\n \"components\": [\n {\"name\": \"DocumentStore\", \"type\": \"ElasticsearchDocumentStore\"},\n {\"name\": \"retri\", \"type\": \"BM25Retriever\", \"params\": {\"document_store\": \"DocumentStore\"}},\n {\"name\": \"retri2\", \"type\": \"TfidfRetriever\", \"params\": {\"document_store\": \"DocumentStore\"}},\n ],\n \"pipelines\": [\n {\n \"name\": \"Query\",\n \"nodes\": [{\"name\": \"retri\", \"inputs\": [\"Query\"]}, {\"name\": \"retri2\", \"inputs\": [\"Query\"]}],\n }\n ],\n }\n\n code = generate_code(pipeline_config=pipeline_config, pipeline_variable_name=\"p\", generate_imports=True)\n assert code == (\n \"from haystack.document_stores import ElasticsearchDocumentStore\\n\"\n \"from haystack.nodes import BM25Retriever, TfidfRetriever\\n\"\n \"from haystack.pipelines import Pipeline\\n\"\n \"\\n\"\n \"document_store = ElasticsearchDocumentStore()\\n\"\n \"retri = BM25Retriever(document_store=document_store)\\n\"\n \"retr" }, { "id": 54126, "commit_id": "e5bb8b9a899ed05aee5eac4e3d4ae9e90c69d66f", "repo": "prefect", "path": "src/prefect/orion/database/orm_models.py", "file_name": "orm_models.py", "fun_name": "__table_args__", "commit_message": "Update Block CRUD", "code": "def __table_args__(cls):\n return (\n sa.Index(\n \"uq_block__spec_id_name\",\n \"block_spec_id\",\n \"name\",\n unique=True,\n ),\n )\n\n\n@declarative_mixin", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@declarative_mixin", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 106, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 9, "token_counts": 24, "n_ast_nodes": 44, "n_identifiers": 6, "random_cut": "def __table_args__(cls):\n return (\n sa.Index(\n \"uq_block__spec_id_name\",\n \"block_spec_" }, { "id": 6046, "commit_id": "6bf9cfcee8ce605bd70dad8f242830b592c6e5dc", "repo": "ludwig", "path": "ludwig/datasets/naval/__init__.py", "file_name": "__init__.py", "fun_name": "load", "commit_message": "[cross-port from tf-legacy] Add support for additional tabular datasets to use to validate AutoML (#1722)\n\n* [cross-port from tf-legacy] Add support for additional tabular datasets to use to validate AutoML\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address flake8 issues\r\n\r\nCo-authored-by: Anne Holler \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def load(cache_dir=DEFAULT_CACHE_LOCATION, split=False):\n dataset = Naval(cache_dir=cache_dir)\n return dataset.load(split=split)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 13, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 44, "n_identifiers": 6, "random_cut": "def load(cache_dir=DEFAULT_CACHE_LOCATION, split=False):\n dataset = Naval(cache_dir=cache_dir)\n return dataset.load(split=split)\n\n" }, { "id": 205804, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/query_utils.py", "file_name": "query_utils.py", "fun_name": "register_lookup", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def register_lookup(cls, lookup, lookup_name=None):\n if lookup_name is None:\n lookup_name = lookup.lookup_name\n if \"class_lookups\" not in cls.__dict__:\n cls.class_lookups = {}\n cls.class_lookups[lookup_name] = lookup\n cls._clear_cached_lookups()\n return lookup\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 81, "n_words": 25, "vocab_size": 20, "complexity": 3, "nloc": 8, "token_counts": 50, "n_ast_nodes": 80, "n_identifiers": 7, "random_cut": "def register_lookup(cls, lookup, lookup_name=None):\n if lookup_name is None:\n lookup_name" }, { "id": 259885, "commit_id": "a47d569e670fd4102af37c3165c9b1ddf6fd3005", "repo": "scikit-learn", "path": "sklearn/datasets/tests/test_openml.py", "file_name": "test_openml.py", "fun_name": "test_fetch_openml_requires_pandas_in_future", "commit_message": "ENH improve ARFF parser using pandas (#21938)\n\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Adrin Jalali ", "code": "def test_fetch_openml_requires_pandas_in_future(monkeypatch):\n \n params = {\"as_frame\": False, \"parser\": \"auto\"}\n data_id = 1119\n try:\n check_pandas_support(\"test_fetch_openml_requires_pandas\")\n except ImportError:\n _monkey_patch_webbased_functions(monkeypatch, data_id, True)\n warn_msg = (\n \"From version 1.4, `parser='auto'` with `as_frame=False` will use pandas\"\n )\n with pytest.warns(FutureWarning, match=warn_msg):\n fetch_openml(data_id=data_id, **params)\n else:\n raise SkipTest(\"This test requires pandas to not be installed.\")\n\n\n@pytest.mark.filterwarnings(\"ignore:Version 1 of dataset Australian is inactive\")\n# TODO(1.4): remove this filterwarning decorator for `parser`\n@pytest.mark.filterwarnings(\"ignore:The default value of `parser` will change\")\n@pytest.mark.parametrize(\n \"params, err_msg\",\n [\n (\n {\"parser\": \"pandas\"},\n \"Sparse ARFF datasets cannot be loaded with parser='pandas'\",\n ),\n (\n {\"as_frame\": True},\n \"Sparse ARFF datasets cannot be loaded with as_frame=True.\",\n ),\n (\n {\"parser\": \"pandas\", \"as_frame\": True},\n \"Sparse ARFF datasets cannot be loaded with as_frame=True.\",\n ),\n ],\n)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.filterwarnings(\"ignore:Version 1 of dataset Australian is inactive\")\n# TODO(1.4): remove this filterwarning decorator for `parser`\n@pytest.mark.filterwarnings(\"ignore:The default value of `parser` will change\")\n@pytest.mark.parametrize(\n \"params, err_msg\",\n [\n (\n {\"parser\": \"pandas\"},\n \"Sparse ARFF datasets cannot be loaded with parser='pandas'\",\n ),\n (\n {\"as_frame\": True},\n \"Sparse ARFF datasets cannot be loaded with as_frame=True.\",\n ),\n (\n {\"parser\": \"pandas\", \"as_frame\": True},\n \"Sparse ARFF datasets cannot be loaded with as_frame=True.\",\n ),\n ],\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 306, "n_words": 112, "vocab_size": 80, "complexity": 2, "nloc": 14, "token_counts": 70, "n_ast_nodes": 247, "n_identifiers": 17, "random_cut": "def test_fetch_openml_requires_pandas_in_future(monkeypatch):\n \n params = {\"as_frame\": False, \"parser\": \"auto\"}\n data_id = 1119\n try:\n check_pandas_support(\"test_fetch_openml_requires_pandas\")\n except ImportError:\n _monkey_patc" }, { "id": 97252, "commit_id": "a3254cf73734a7f6a91a8ab58d5615b82f98a2f9", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_details.py", "file_name": "test_organization_metric_details.py", "fun_name": "test_derived_metric_details", "commit_message": "feat(metrics): Support for DM in Details Endpoint (#32744)\n\nAdds support for derived metrics in metrics detail\r\n endpoint", "code": "def test_derived_metric_details(self):\n # 3rd Test: Test for derived metrics when indexer and dataset have data\n self.store_session(\n self.build_session(\n project_id=self.project.id,\n started=(time.time() // 60) * 60,\n status=\"ok\",\n release=\"foobar@2.0\",\n )\n )\n response = self.get_success_response(\n self.organization.slug,\n \"session.crash_free_rate\",\n )\n assert response.data == {\n \"name\": \"session.crash_free_rate\",\n \"type\": \"numeric\",\n \"operations\": [],\n \"unit\": \"percentage\",\n \"tags\": [{\"key\": \"environment\"}, {\"key\": \"release\"}, {\"key\": \"session.status\"}],\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 260, "n_words": 53, "vocab_size": 49, "complexity": 1, "nloc": 20, "token_counts": 101, "n_ast_nodes": 188, "n_identifiers": 16, "random_cut": "def test_derived_metric_details(self):\n # 3rd Test: Test for derived metrics when indexer and dataset have data\n self.store_session(\n self.build_session(\n project_id=self.project.id,\n started=(time.time() // 60) * 60,\n status=\"ok\",\n release=\"foobar@2.0\",\n )\n )\n response = self.get_success_response(\n self.organization.slug,\n \"session.crash_free_rate\",\n )\n assert response.data == {\n \"name\":" }, { "id": 54480, "commit_id": "3d60f99313923009d554cca0f310dc5dd582e22d", "repo": "prefect", "path": "tests/orion/test_app.py", "file_name": "test_app.py", "fun_name": "test_app_generates_correct_api_openapi_schema", "commit_message": "Pass `ephemeral` flag to `create_app` to drop analytics and UI", "code": "def test_app_generates_correct_api_openapi_schema():\n \n schema = create_app(ephemeral=True).openapi()\n\n assert len(schema[\"paths\"].keys()) > 1\n assert all([p.startswith(\"/api/\") for p in schema[\"paths\"].keys()])\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 27, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 4, "token_counts": 54, "n_ast_nodes": 95, "n_identifiers": 10, "random_cut": "def test_app_generates_correct_api_openapi_schema():\n \n schema = create_app(ephemeral=True).openapi()\n\n assert len(schema[\"paths\"].keys()) > 1\n assert all([p.startswith(\"/api/\") for p in schema[\"paths\"].keys()])\n" }, { "id": 151796, "commit_id": "62c69bf2b5285196ce80760160712c04b339bad1", "repo": "freqtrade", "path": "tests/freqai/test_freqai_interface.py", "file_name": "test_freqai_interface.py", "fun_name": "test_start_backtesting", "commit_message": "fix custom_info", "code": "def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog):\n freqai_conf.get(\"freqai\", {}).update({\"save_backtest_models\": True})\n freqai_conf['runmode'] = RunMode.BACKTEST\n if is_arm() and \"Catboost\" in model:\n pytest.skip(\"CatBoost is not supported on ARM\")\n\n if is_mac() and 'Reinforcement' in model:\n pytest.skip(\"Reinforcement learning module not available on intel based Mac OS\")\n Trade.use_db = False\n\n freqai_conf.update({\"freqaimodel\": model})\n freqai_conf.update({\"timerange\": \"20180120-20180130\"})\n freqai_conf.update({\"strategy\": strat})\n\n if 'ReinforcementLearner' in model:\n freqai_conf = make_rl_config(freqai_conf)\n\n if 'test_4ac' in model:\n freqai_conf[\"freqaimodel_path\"] = str(Path(__file__).parents[1] / \"freqai\" / \"test_models\")\n\n strategy = get_patched_freqai_strategy(mocker, freqai_conf)\n exchange = get_patched_exchange(mocker, freqai_conf)\n strategy.dp = DataProvider(freqai_conf, exchange)\n strategy.freqai_info = freqai_conf.get(\"freqai\", {})\n freqai = strategy.freqai\n freqai.live = False\n freqai.dk = FreqaiDataKitchen(freqai_conf)\n timerange = TimeRange.parse_timerange(\"20180110-20180130\")\n freqai.dd.load_all_pair_histories(timerange, freqai.dk)\n sub_timerange = TimeRange.parse_timerange(\"20180110-20180130\")\n corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, \"LTC/BTC\", freqai.dk)\n\n df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, \"LTC/BTC\")\n df = freqai.cache_corr_pairlist_dfs(df, freqai.dk)\n for i in range(5):\n df[f'%-constant_{i}'] = i\n\n metadata = {\"pair\": \"LTC/BTC\"}\n freqai.start_backtesting(df, metadata, freqai.dk)\n model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]\n\n assert len(model_folders) == num_files\n Trade.use_db = True\n assert log_has_re(\n \"Removed features \",\n caplog,\n )\n assert log_has_re(\n \"Removed 5 features from prediction features, \",\n caplog,\n )\n Backtesting.cleanup()\n shutil.rmtree(Path(freqai.dk.full_path))\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 334, "n_words": 167, "vocab_size": 112, "complexity": 10, "nloc": 45, "token_counts": 377, "n_ast_nodes": 633, "n_identifiers": 60, "random_cut": "def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog):\n freqai_conf.get(\"freqai\", {}).update({\"save_backtest_models\": True})\n freqai_conf['runmode'] = RunMode.BACKTEST\n if is_arm() and \"Catboost\" in model:\n pytest.skip(\"CatBoost is not supported on ARM\")\n\n if is_mac() and 'Reinforcement' in model:\n pytest.skip(\"Reinforcement learning module not available on intel based Mac OS\")\n Trade.use_db = False\n\n freqai_conf.update({\"freqaimodel\": model})\n freqai_conf.update({\"timerange\": \"20180120-20180130\"})\n freqai_conf.update({\"strategy\": strat})\n\n if 'ReinforcementLearner' in model:\n freqai_conf = make_rl_config(freqai_conf)\n\n if 'test_4ac' in model:\n freqai_conf[\"freqaimodel_path\"] = str(Path(__file__).parents[1] / \"freqai\" / \"test_models\")\n\n strategy = get_patched_freqai_strategy(mocker, freqai_conf)\n exchange = get_patched_exchange(mocker, freqai_conf)\n strategy.dp = DataProvider(freqai_conf, exchange)\n strategy.freqai_info = freqai_conf.get(\"freqai\", {})\n freqai = strategy.freqai\n freqai.live = False\n freqai.dk = FreqaiDataKitchen(freqai_conf)\n timerange = TimeRange.parse_timerange(\"20180110-20180130\")\n freqai.dd.load_all_pair_histories(timerange, freqai.dk)\n sub_timerange = TimeRange.parse_timerange(\"20180110-20180130\")\n corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, \"LTC/BTC\", freqai.dk)\n\n df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, \"LTC/BTC\")\n df = freqai.cache_corr_pairlist_dfs(df, freqai.dk)\n for i in range(5):\n df[f'%-constant_{i}'] = i" }, { "id": 142863, "commit_id": "0959f44b6fc217a4f2766ed46a721eb79b067b2c", "repo": "ray", "path": "python/ray/tune/execution/trial_runner.py", "file_name": "trial_runner.py", "fun_name": "_reconcile_live_trials", "commit_message": "[tune/structure] Introduce execution package (#26015)\n\nExecution-specific packages are moved to tune.execution.\r\n\r\nCo-authored-by: Xiaowei Jiang ", "code": "def _reconcile_live_trials(self):\n \n for trial in list(self._live_trials):\n # Only for TERMINATED trials. ERRORed trials might be retried.\n if trial.status == Trial.TERMINATED:\n self._live_trials.remove(trial)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 72, "n_words": 21, "vocab_size": 20, "complexity": 3, "nloc": 4, "token_counts": 33, "n_ast_nodes": 56, "n_identifiers": 9, "random_cut": "def _reconcile_live_trials(self):\n \n for trial in list(self._live_trials):\n # Only for TERMINATED trials. ERRORed trials might be retried.\n if trial.status == Trial.TERMINATE" }, { "id": 121148, "commit_id": "231495166929be4a6ee3a0fd843858abeeca3694", "repo": "jax", "path": "jax/experimental/pjit.py", "file_name": "pjit.py", "fun_name": "_create_mesh_pspec_sharding", "commit_message": "Convert everything in pjit to the `Sharding` interface. The following contains the things that have changed in this CL:\n\n* All in_axis_resources and out_axis_resources are instances of `Sharding`. When `config.jax_array` is enabled, `in_shardings` is inferred from the inputs.\n\n* `out_shardings` are still instances of `MeshPspecSharding` even if `Array` are used. In a follow up CL, I will change out_axis_resources to accept `Sharding` instances.\n * This is also a reason why you still need a mesh context manager when `config.jax_array` is enabled.\n * cl/458267790 is WIP for this. It adds a couple of checks in MeshPspecSharding too when `AUTO` is used.\n\n* Checking of sharding with `aval` has a handler system to deal with sharding instances.\n * The reason for creating a `pjit` specific system rather than putting this check on the sharding instances is because each transformation has a different way of checking the sharding. The best example for this is `pjit` and `xmap`. They both have different way to check if an aval is sharded properly with respect to the given sharding because `pjit` and `xmap` has different ways to express sharding.\n\n* `MeshPspecSharding` and `SingleDeviceSharding` have `__hash__` and `__eq__`. So now we don't have to pass around canonicalized pspecs in the new path to get cache hits. The `Sharding` instances should handle that for us.\n\n* _pjit_lower still depends on mesh which is the major reason why I haven't removed `resource_env` from `params`. But in the interest of keep this CL small (LOL), I'll make those changes in a follow up CL.\n * Also the private functions in pxla.py are used by pathways and automap so I'll have to modify those too.\n * Also it has `pxla.resource_typecheck` which I haven't figured out how to move it to sharding interface.\n\n* `_to_xla_op_sharding` takes in `axis_ctx` as an extra **optional** parameter. This is required for `with_sharding_constraint`.\n * `with_sharding_constraint` uses the MLIR `ctx` here: cl/458042998\n\n* `pjit`'s batching handlers add an extra dimension to the axis_resources. Since this is dependent on how each transformation adds the extra dimension and it also differs on how each sharding instance will handle it, I added a handler system for this too. Again `xmap` and `pjit` differ a lot here. This is why I went with the handler approach.\n * MeshPspecSharding handles this `insert_axis_partitions` on the parsed partition spec. I have added more detailed comments in the place where this is done.\n\nPiperOrigin-RevId: 459548974", "code": "def _create_mesh_pspec_sharding(mesh, x):\n if _is_unspecified(x):\n return x\n if _is_from_gda(x):\n return x\n return sharding.MeshPspecSharding._from_parsed_pspec(mesh, x)\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 14, "vocab_size": 10, "complexity": 3, "nloc": 6, "token_counts": 34, "n_ast_nodes": 53, "n_identifiers": 8, "random_cut": "def _create_mesh_pspec_sharding(mesh, x):\n if _is_unspecified(x):\n return x\n if _is_from_gda(x):\n return x\n return sharding.MeshPspecSharding._from_parsed_pspec(mesh, x)\n\n" }, { "id": 277253, "commit_id": "fa6d9107a498f7c2403ff28c7b389a1a0c5cc083", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_flatten_modules", "commit_message": "reduct too long lines", "code": "def _flatten_modules(self, recursive=True, include_self=True):\n \n if include_self:\n yield self\n\n # Only instantiate set and deque if needed.\n trackables = getattr(self, \"_self_tracked_trackables\", None)\n if trackables:\n seen_object_ids = set()\n deque = collections.deque(trackables)\n while deque:\n trackable_obj = deque.popleft()\n trackable_id = id(trackable_obj)\n if trackable_id in seen_object_ids:\n continue\n seen_object_ids.add(trackable_id)\n\n # Metrics are not considered part of the Layer's topology.\n if isinstance(trackable_obj, tf.Module) and not isinstance(\n trackable_obj, metrics_mod.Metric\n ):\n yield trackable_obj\n # Introspect recursively through sublayers.\n if recursive:\n subtrackables = getattr(\n trackable_obj, \"_self_tracked_trackables\", None\n )\n if subtrackables:\n deque.extendleft(reversed(subtrackables))\n elif isinstance(\n trackable_obj,\n tf.__internal__.tracking.TrackableDataStructure,\n ):\n # Data structures are introspected even with\n # `recursive=False`.\n tracked_values = trackable_obj._values\n if tracked_values:\n deque.extendleft(reversed(tracked_values))\n\n # This is a hack so that the is_layer (within\n # training/trackable/layer_utils.py) check doesn't get the weights attr.\n # TODO(b/110718070): Remove when fixed.", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 702, "n_words": 124, "vocab_size": 90, "complexity": 11, "nloc": 30, "token_counts": 152, "n_ast_nodes": 255, "n_identifiers": 28, "random_cut": "def _flatten_modules(self, recursive=True, include_self=True):\n \n if include_self:\n yield self\n\n # Only instantiate set and deque if needed.\n trackables = getattr(self, \"_self_tracked_trackables\", None)\n if trackables:\n seen_object_ids = set()\n deque = collections.deque(trackables)\n while deque:\n trackable_obj = deque.popleft()\n trackable_id = id(trackable_obj)\n if trackable_id in seen_object_ids:\n continue\n seen_object_ids.add(trackable_id)\n\n # Metrics are not considered part of the Layer's topology.\n if isinstance(trackable_obj, tf.Module) and not isinstance(\n trackable_obj, metrics_mod.Metric\n ):\n yield trackable_obj\n # Introspect recursively through sublayers.\n if recursive:\n subtrackables = getattr(\n " }, { "id": 209580, "commit_id": "495b21f2867e48286767085c8cf2918e4092e9dc", "repo": "scapy", "path": "scapy/contrib/automotive/scanner/executor.py", "file_name": "executor.py", "fun_name": "check_new_states", "commit_message": "Add Automotive Logger for all debug outputs of the automotive layer", "code": "def check_new_states(self, test_case):\n # type: (AutomotiveTestCaseABC) -> None\n if isinstance(test_case, StateGenerator):\n edge = test_case.get_new_edge(self.socket, self.configuration)\n if edge:\n log_automotive.debug(\"Edge found %s\", edge)\n tf = test_case.get_transition_function(self.socket, edge)\n self.state_graph.add_edge(edge, tf)\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 107, "n_words": 27, "vocab_size": 24, "complexity": 3, "nloc": 7, "token_counts": 62, "n_ast_nodes": 97, "n_identifiers": 15, "random_cut": "def check_new_states(self, test_case):\n # type: (AutomotiveTestCaseABC) -> None\n if isinstance(test_case, StateGenerator):\n edge = test_case.get_new_edge(self.socket, self.configuration)\n if ed" }, { "id": 81347, "commit_id": "2d310dc4e50c6f7cd298f9fb8af69da258cd9ea6", "repo": "awx", "path": "awx/api/serializers.py", "file_name": "serializers.py", "fun_name": "_local_summarizable_fk_fields", "commit_message": "Optimize object creation by getting fewer empty relationships (#12508)\n\nThis optimizes the ActivityStreamSerializer by only getting many-to-many\r\n relationships that are speculatively non-empty\r\n based on information we have in other fields\r\n\r\nWe run this every time we create an object as an on_commit action\r\n so it is expected this will have a major impact on response times for launching jobs", "code": "def _local_summarizable_fk_fields(self, obj):\n summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)\n # Special requests\n summary_dict['group'] = summary_dict['group'] + ('inventory_id',)\n for key in summary_dict.keys():\n if 'id' not in summary_dict[key]:\n summary_dict[key] = summary_dict[key] + ('id',)\n field_list = list(summary_dict.items())\n # Needed related fields that are not in the default summary fields\n field_list += [\n ('workflow_job_template_node', ('id', 'unified_job_template_id')),\n ('label', ('id', 'name', 'organization_id')),\n ('notification', ('id', 'status', 'notification_type', 'notification_template_id')),\n ('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),\n ('o_auth2_application', ('id', 'name', 'description')),\n ('credential_type', ('id', 'name', 'description', 'kind', 'managed')),\n ('ad_hoc_command', ('id', 'name', 'status', 'limit')),\n ('workflow_approval', ('id', 'name', 'unified_job_id')),\n ('instance', ('id', 'hostname')),\n ]\n # Optimization - do not attempt to summarize all fields, pair down to only relations that exist\n if not obj:\n return field_list\n existing_association_types = [obj.object1, obj.object2]\n if 'user' in existing_association_types:\n existing_association_types.append('role')\n return [entry for entry in field_list if entry[0] in existing_association_types]\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 365, "n_words": 128, "vocab_size": 86, "complexity": 7, "nloc": 24, "token_counts": 234, "n_ast_nodes": 411, "n_identifiers": 16, "random_cut": "def _local_summarizable_fk_fields(self, obj):\n summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)\n # Special requests\n summary_dict['group'] = summary_dict['group'] + ('inventory_id',)\n for key in summary_dict.keys():\n if 'id' not in summary_dict[key]:\n summary_dict[key] = summary_dict[key] + ('id',)\n field_list = list(summary_dict.items())\n # Needed related fields that are not in the default summary fields\n field_list += [\n ('workflow_job_template_node', ('id', 'unified_job_template_id')),\n ('label', (" }, { "id": 208187, "commit_id": "777698c746e4d1aa8af0a7974b0559bf3b86b14a", "repo": "celery", "path": "t/unit/utils/test_local.py", "file_name": "test_local.py", "fun_name": "test_listproxy", "commit_message": "[pre-commit.ci] pre-commit autoupdate (#7625)\n\n* [pre-commit.ci] pre-commit autoupdate\r\n\r\nupdates:\r\n- [github.com/asottile/pyupgrade: v2.34.0 → v2.38.0](https://github.com/asottile/pyupgrade/compare/v2.34.0...v2.38.0)\r\n- [github.com/PyCQA/flake8: 4.0.1 → 5.0.4](https://github.com/PyCQA/flake8/compare/4.0.1...5.0.4)\r\n- [github.com/asottile/yesqa: v1.3.0 → v1.4.0](https://github.com/asottile/yesqa/compare/v1.3.0...v1.4.0)\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* autopep8\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Omer Katz ", "code": "def test_listproxy(self):\n v = []\n x = Proxy(lambda: v)\n x.append(1)\n x.extend([2, 3, 4])\n assert x[0] == 1\n assert x[:-1] == [1, 2, 3]\n del (x[-1])\n assert x[:-1] == [1, 2]\n x[0] = 10\n assert x[0] == 10\n assert 10 in x\n assert len(x) == 3\n assert iter(x)\n x[0:2] = [1, 2]\n del (x[0:2])\n assert str(x)\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 166, "n_words": 55, "vocab_size": 31, "complexity": 1, "nloc": 17, "token_counts": 133, "n_ast_nodes": 200, "n_identifiers": 10, "random_cut": "def test_listproxy(self):\n v = []\n x = Proxy(lambda: v)\n x.append(1)\n " }, { "id": 98309, "commit_id": "c407626bafad657529022fcc11ea7915d71e0c61", "repo": "sentry", "path": "tests/acceptance/test_onboarding.py", "file_name": "test_onboarding.py", "fun_name": "test_onboarding", "commit_message": "feat(onboarding): remove welcome page experiment and use new experience (#33616)\n\nThis PR copies the welcome page component from the targeted onboarding flow into the default onboarding flow and removes the TargetedOnboardingWelcomePageExperimentV2 experiment. There are some minor differences to handle the different prop types but everything else is the same.", "code": "def test_onboarding(self, generate_api_key):\n self.browser.get(\"/onboarding/%s/\" % self.org.slug)\n\n # Welcome step\n self.browser.wait_until('[data-test-id=\"onboarding-step-welcome\"]')\n self.browser.snapshot(name=\"onboarding - welcome\")\n\n # Platform selection step\n self.browser.click('[aria-label=\"Start\"]')\n self.browser.wait_until('[data-test-id=\"onboarding-step-select-platform\"]')\n\n self.browser.snapshot(name=\"onboarding - select platform\")\n\n # Select and create node JS project\n self.browser.click('[data-test-id=\"platform-node\"]')\n self.browser.wait_until_not('[data-test-id=\"platform-select-next\"][aria-disabled=\"true\"]')\n self.browser.wait_until('[data-test-id=\"platform-select-next\"][aria-disabled=\"false\"]')\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 116, "n_words": 33, "vocab_size": 28, "complexity": 1, "nloc": 20, "token_counts": 177, "n_ast_nodes": 164, "n_identifiers": 12, "random_cut": "def test_onboarding(self, generate_api_key):\n self.browser.get(\"/onboarding/%s/\" % self.org.slug)\n\n # Welcome step\n self.browser.wait_until('[data-test-id=\"onboarding-step-welcome\"]')\n self.browser.snapshot(name=\"onboarding - welcome\")\n\n # Platform selection step\n self.browser.click('[aria-label=\"Start\"]')\n self.brow" }, { "id": 163582, "commit_id": "388ecf3d0804d7596876b53d96eb34de5bdcf8a3", "repo": "pandas", "path": "scripts/validate_min_versions_in_sync.py", "file_name": "validate_min_versions_in_sync.py", "fun_name": "get_versions_from_code", "commit_message": "MISC: Check that min versions are aligned in CI and import_optional_dependency (#45219)", "code": "def get_versions_from_code() -> dict[str, str]:\n install_map = _optional.INSTALL_MAPPING\n versions = _optional.VERSIONS\n return {\n install_map.get(k, k).casefold(): v\n for k, v in versions.items()\n if k != \"pytest\"\n }\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 26, "vocab_size": 24, "complexity": 3, "nloc": 8, "token_counts": 52, "n_ast_nodes": 82, "n_identifiers": 13, "random_cut": "def get_versions_from_code() -> dict[str, str]:\n install_map = _optional.INSTALL_MAPPING\n versions = _optional" }, { "id": 39114, "commit_id": "fd2bf8bfb71aa94fe70e1fe462d317b9bbaa6c52", "repo": "recommenders", "path": "recommenders/models/ncf/dataset.py", "file_name": "dataset.py", "fun_name": "_create_test_file", "commit_message": "fix static analysis", "code": "def _create_test_file(self):\n\n logger.info(\"Creating full leave-one-out test file {} ...\".format(self.test_file_full))\n\n # create empty csv\n pd.DataFrame(\n columns=[self.col_user, self.col_item, self.col_rating, self.col_test_batch]\n ).to_csv(self.test_file_full, index=False)\n\n batch_idx = 0\n\n with self.train_datafile as train_datafile:\n with self.test_datafile as test_datafile:\n for user in test_datafile.users:\n if user in train_datafile.users:\n user_test_data = test_datafile.load_data(user)\n user_train_data = train_datafile.load_data(user)\n # for leave-one-out evaluation, exclude items seen in both training and test sets\n # when sampling negatives\n user_positive_item_pool = set(\n user_test_data[self.col_item].unique()).union(user_train_data[self.col_item].unique()\n )\n user_negative_item_pool = self._get_user_negatives_pool(user_positive_item_pool)\n n_samples = self.n_neg_test\n n_samples = self._check_sample_size(user, n_samples, user_negative_item_pool, training=False)\n\n user_examples_dfs = []\n # sample n_neg_test negatives for each positive example and assign a batch index\n for positive_example in np.array_split(user_test_data, user_test_data.shape[0]):\n negative_examples = self._get_negative_examples(user, user_negative_item_pool, n_samples)\n examples = pd.concat([positive_example, negative_examples])\n examples[self.col_test_batch] = batch_idx\n user_examples_dfs.append(examples)\n batch_idx += 1\n\n # append user test data to file\n user_examples = pd.concat(user_examples_dfs)\n user_examples.to_csv(self.test_file_full, mode='a', index=False, header=False)\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 734, "n_words": 130, "vocab_size": 95, "complexity": 4, "nloc": 27, "token_counts": 248, "n_ast_nodes": 392, "n_identifiers": 46, "random_cut": "def _create_test_file(self):\n\n logger.info(\"Creating full leave-one-out test file {} ...\".format(self.test_file_full))\n\n # create empty csv\n pd.DataFrame(\n columns=[self.col_user, self.col_item, self.col_rating, self.col_test_batch]\n ).to_csv(self.test_file_full, index=False)\n\n batch_idx = 0\n\n with self.train_datafile as train_datafile:\n with self.test_datafile as test_datafile:\n for user in test_datafile.users:\n if user in train_datafile.users:\n user_test_data = test_datafile.load_data(user)\n user_train_data = train_datafile.load_data(user)\n # for leave-one-out evaluation, exclude items seen in both training and test sets\n # when sampling negatives\n user_positive_item_pool = set(\n user_test_data[self.col_item].unique()).union(user_train_data[self.col_item].unique()\n )\n user_negative_item_pool = self._get_user_negatives_pool(user_positive_item_pool)\n n_samples = self.n_neg_test\n n_samples = self._check_sample_size(user, n_samples, user_negative_item_pool, training=False)\n\n user_examples_dfs = []\n # sample n_neg_test negatives for each positive example and assign a batch index\n for positive_example in np.array_split(user_test_data, user_test_data.shape[0]):\n negative_examples = self._get_negative_examples(user, user_negative_item_pool, n_samples)\n examples = pd.concat([positive_example, negative_examples])\n examples[self.col_test_batch] = batch_idx\n user_examples_dfs.append(examples)\n " }, { "id": 255069, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/softmaxcrossentropy.py", "file_name": "softmaxcrossentropy.py", "fun_name": "export_softmaxcrossentropy_sum_log_prob", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export_softmaxcrossentropy_sum_log_prob() -> None:\n # Define operator attributes.\n reduction = 'sum'\n\n # Create operator.\n node = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\n # Define operator inputs.\n np.random.seed(0)\n x = np.random.rand(3, 5).astype(np.float32)\n labels = np.random.randint(0, high=5, size=(3, )).astype(np.int64)\n\n # Compute SoftmaxCrossEntropyLoss\n loss, log_prob = softmaxcrossentropy(x, labels, reduction='sum', get_log_prob=True)\n\n # Check results\n expect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_sce_sum_log_prob')\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 247, "n_words": 56, "vocab_size": 46, "complexity": 1, "nloc": 11, "token_counts": 136, "n_ast_nodes": 218, "n_identifiers": 26, "random_cut": "def export_softmaxcrossentropy_sum_log_prob() -> None:\n # Define operator attributes.\n reduction = 'sum'\n\n # Create operator.\n node = onnx.helper.make_node('SoftmaxCrossEntropyLoss',\n inputs=['x', 'y'],\n outputs=['z', 'log_prob'],\n reduction=reduction)\n\n # Define operator inputs.\n np.random.seed(0)\n x = np.random.rand(3, 5).astype(np.float32)\n labels = np.random.randint(0, high=5, size=(3, )).astype(np.int64)\n\n # Compute SoftmaxCrossEntropyLoss\n loss, log_prob = softmaxcrossentropy(x, labels, reduction='sum', get_log_prob=True)\n\n # Check results\n expect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_sce_sum_log_prob')\n" }, { "id": 65046, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/sales_invoice/sales_invoice.py", "file_name": "sales_invoice.py", "fun_name": "get_mode_of_payment_info", "commit_message": "style: format code with black", "code": "def get_mode_of_payment_info(mode_of_payment, company):\n\treturn frappe.db.sql(\n\t\t,\n\t\t(company, mode_of_payment),\n\t\tas_dict=1,\n\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 4, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 9, "token_counts": 27, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "def get_mode_of_payment_info(mode_of_payment, company):\n\treturn frappe.db.sql(\n\t\t,\n\t\t(company, mode_of_payment" }, { "id": 314637, "commit_id": "bc33818b20d145cba370247f5bb3b69d078cd9f3", "repo": "core", "path": "homeassistant/components/egardia/alarm_control_panel.py", "file_name": "alarm_control_panel.py", "fun_name": "should_poll", "commit_message": "Use attributes in egardia alarm (#74098)", "code": "def should_poll(self) -> bool:\n \n if not self._rs_enabled:\n return True\n return False\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 43, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 4, "random_cut": "def should_poll(self) -> bool:\n \n " }, { "id": 241548, "commit_id": "7fa1aebcc99297e4d7eb8dcf2deb22e6da814edf", "repo": "lightning", "path": "tests/profiler/test_profiler.py", "file_name": "test_profiler.py", "fun_name": "test_pytorch_profiler_trainer_ddp", "commit_message": "Remove `profile(\"training_step_and_backward\")` (#11222)", "code": "def test_pytorch_profiler_trainer_ddp(tmpdir, pytorch_profiler):\n \n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n enable_progress_bar=False,\n max_epochs=1,\n limit_train_batches=5,\n limit_val_batches=5,\n profiler=pytorch_profiler,\n strategy=\"ddp\",\n gpus=2,\n )\n trainer.fit(model)\n expected = {\"[Strategy]DDPStrategy.validation_step\"}\n if not _KINETO_AVAILABLE:\n expected |= {\n \"[Strategy]DDPStrategy.training_step\",\n \"[Strategy]DDPStrategy.backward\",\n }\n for name in expected:\n assert sum(e.name == name for e in pytorch_profiler.function_events), name\n\n files = set(os.listdir(pytorch_profiler.dirpath))\n expected = f\"fit-profiler-{trainer.local_rank}.txt\"\n assert expected in files\n\n path = pytorch_profiler.dirpath / expected\n assert path.read_text(\"utf-8\")\n\n if _KINETO_AVAILABLE:\n files = os.listdir(pytorch_profiler.dirpath)\n files = [file for file in files if file.endswith(\".json\")]\n assert len(files) == 2, files\n local_rank = trainer.local_rank\n assert any(f\"{local_rank}-optimizer_step_with_closure_\" in f for f in files)\n assert any(f\"{local_rank}-[Strategy]DDPStrategy.validation_step\" in f for f in files)\n\n\n@pytest.mark.parametrize(\"fast_dev_run\", [1, 2, 3, 4, 5])\n@pytest.mark.parametrize(\"boring_model_cls\", [ManualOptimBoringModel, BoringModel])", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"fast_dev_run\", [1, 2, 3, 4, 5])\n@pytest.mark.parametrize(\"boring_model_cls\", [ManualOptimBoringModel, BoringModel])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 289, "n_words": 108, "vocab_size": 64, "complexity": 9, "nloc": 34, "token_counts": 199, "n_ast_nodes": 378, "n_identifiers": 39, "random_cut": "def test_pytorch_profiler_trainer_ddp(tmpdir, pytorch_profiler):\n \n model = BoringModel()\n trainer = Trainer(\n default_root_dir=tmpdir,\n enable_progress_bar=False,\n max_epochs=1,\n limit_train_batches=5,\n limit_val_batches=5,\n profiler=pytorch_profiler,\n strategy=\"ddp\",\n gpus=2,\n )\n trainer.fit(model)\n expected = {\"[Strategy]DDPStrategy.validation_step\"}\n if not _KINETO_AVAILABLE:\n expected |= {\n \"[Strategy]DDPStrategy.training_step\",\n \"[Strategy]DDPStrategy.backward\",\n }\n for name in expected:\n assert sum(e.name == name for e in pytorch_profiler.function_events), name\n\n files = set(os.listdir(pytorch_profiler.dirpath))\n expected = f\"fit-profiler-{trainer.local_rank}.txt\"\n assert expected in files\n\n path = pytorch_profiler.dirpath / expected\n assert path.read_text(\"utf-8\")\n\n if _KINETO_AVAILABLE:\n files = os.listdir(pytorch_profiler.dirpath)\n files = [fi" }, { "id": 289566, "commit_id": "e84e5f134ee6ccd04ad098a16c41dd2ed141371c", "repo": "core", "path": "tests/components/recorder/test_websocket_api.py", "file_name": "test_websocket_api.py", "fun_name": "test_statistics_during_period", "commit_message": "Use US_CUSTOMARY_SYSTEM in tests (#80658)\n\n* Use US_CUSTOMARY_SYSTEM in tests\r\n\r\n* Don't update test_unit_system", "code": "async def test_statistics_during_period(recorder_mock, hass, hass_ws_client):\n \n now = dt_util.utcnow()\n\n hass.config.units = US_CUSTOMARY_SYSTEM\n await async_setup_component(hass, \"sensor\", {})\n await async_recorder_block_till_done(hass)\n hass.states.async_set(\"sensor.test\", 10, attributes=POWER_SENSOR_KW_ATTRIBUTES)\n await async_wait_recording_done(hass)\n\n do_adhoc_statistics(hass, start=now)\n await async_wait_recording_done(hass)\n\n client = await hass_ws_client()\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"recorder/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"end_time\": now.isoformat(),\n \"statistic_ids\": [\"sensor.test\"],\n \"period\": \"hour\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {}\n\n await client.send_json(\n {\n \"id\": 2,\n \"type\": \"recorder/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"statistic_ids\": [\"sensor.test\"],\n \"period\": \"5minute\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n \"sensor.test\": [\n {\n \"statistic_id\": \"sensor.test\",\n \"start\": now.isoformat(),\n \"end\": (now + timedelta(minutes=5)).isoformat(),\n \"mean\": approx(10),\n \"min\": approx(10),\n \"max\": approx(10),\n \"last_reset\": None,\n \"state\": None,\n \"sum\": None,\n }\n ]\n }\n\n\n@pytest.mark.parametrize(\n \"attributes, state, value, custom_units, converted_value\",\n [\n (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {\"distance\": \"cm\"}, 1000),\n (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {\"distance\": \"m\"}, 10),\n (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {\"distance\": \"in\"}, 10 / 0.0254),\n (POWER_SENSOR_KW_ATTRIBUTES, 10, 10, {\"power\": \"W\"}, 10000),\n (POWER_SENSOR_KW_ATTRIBUTES, 10, 10, {\"power\": \"kW\"}, 10),\n (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {\"pressure\": \"Pa\"}, 1000),\n (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {\"pressure\": \"hPa\"}, 10),\n (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {\"pressure\": \"psi\"}, 1000 / 6894.757),\n (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {\"speed\": \"m/s\"}, 2.77778),\n (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {\"speed\": \"km/h\"}, 10),\n (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {\"speed\": \"mph\"}, 6.21371),\n (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {\"temperature\": \"°C\"}, 10),\n (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {\"temperature\": \"°F\"}, 50),\n (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {\"temperature\": \"K\"}, 283.15),\n (VOLUME_SENSOR_M3_ATTRIBUTES, 10, 10, {\"volume\": \"m³\"}, 10),\n (VOLUME_SENSOR_M3_ATTRIBUTES, 10, 10, {\"volume\": \"ft³\"}, 353.14666),\n ],\n)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"attributes, state, value, custom_units, converted_value\",\n [\n (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {\"distance\": \"cm\"}, 1000),\n (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {\"distance\": \"m\"}, 10),\n (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {\"distance\": \"in\"}, 10 / 0.0254),\n (POWER_SENSOR_KW_ATTRIBUTES, 10, 10, {\"power\": \"W\"}, 10000),\n (POWER_SENSOR_KW_ATTRIBUTES, 10, 10, {\"power\": \"kW\"}, 10),\n (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {\"pressure\": \"Pa\"}, 1000),\n (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {\"pressure\": \"hPa\"}, 10),\n (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {\"pressure\": \"psi\"}, 1000 / 6894.757),\n (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {\"speed\": \"m/s\"}, 2.77778),\n (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {\"speed\": \"km/h\"}, 10),\n (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {\"speed\": \"mph\"}, 6.21371),\n (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {\"temperature\": \"°C\"}, 10),\n (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {\"temperature\": \"°F\"}, 50),\n (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {\"temperature\": \"K\"}, 283.15),\n (VOLUME_SENSOR_M3_ATTRIBUTES, 10, 10, {\"volume\": \"m³\"}, 10),\n (VOLUME_SENSOR_M3_ATTRIBUTES, 10, 10, {\"volume\": \"ft³\"}, 353.14666),\n ],\n)", "n_ast_errors": 1, "ast_levels": 18, "n_whitespaces": 719, "n_words": 217, "vocab_size": 113, "complexity": 1, "nloc": 49, "token_counts": 263, "n_ast_nodes": 863, "n_identifiers": 35, "random_cut": "async def test_statistics_during_period(recorder_mock, hass, hass_ws_client):\n \n now = dt_util.utcnow()\n\n hass.config.units = US_CUSTOMARY_SYSTEM\n await async_setup_component(hass, \"sensor\", {})\n await async_recorder_block_till_done(hass)\n hass.states.async_set(\"sensor.test\", 10, attributes=POWER_SENSOR_KW_ATTRIBUTES)\n await async_wait_recording_done(hass)\n\n do_adhoc_statistics(hass, start=now)\n await async_wait_recording_done(hass)\n\n client = await hass_ws_client()\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"recorder/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"end_time\": now.isoformat(),\n \"statistic_ids\": [\"sensor.test\"],\n \"period\": \"hour\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {}\n\n await client.send_json(\n {\n \"id\": 2,\n \"type\": \"recorder/statistics_during_period\",\n \"start_time\": now.isoformat(),\n \"statistic_ids\": [\"sensor.test\"],\n \"period\": \"5minute\",\n " }, { "id": 309021, "commit_id": "8915b73f724b58e93284a823c0d2e99fbfc13e84", "repo": "core", "path": "homeassistant/components/mazda/sensor.py", "file_name": "sensor.py", "fun_name": "_front_right_tire_pressure_value", "commit_message": "Use SensorEntityDescription in Mazda integration (#63423)\n\n* Use SensorEntityDescription in Mazda integration\r\n\r\n* Change lambdas to functions\r\n\r\n* Minor fixes\r\n\r\n* Address review comments", "code": "def _front_right_tire_pressure_value(data, unit_system):\n \n return round(data[\"status\"][\"tirePressure\"][\"frontRightTirePressurePsi\"])\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 41, "n_identifiers": 4, "random_cut": "def _front_right_tire_pressure_value(data, unit_system):\n \n return round(data[\"status\"][\"tirePressure\"][\"frontRightTirePressurePsi\"])\n\n" }, { "id": 210497, "commit_id": "4984ff0ffe6ce0996907f1a6b47bbdfbd4b1a879", "repo": "PaddleDetection", "path": "deploy/python/preprocess.py", "file_name": "preprocess.py", "fun_name": "apply_image", "commit_message": "add YOLOX codes (#5727)", "code": "def apply_image(self, image, offsets, im_size, size):\n x, y = offsets\n im_h, im_w = im_size\n h, w = size\n canvas = np.ones((h, w, 3), dtype=np.float32)\n canvas *= np.array(self.fill_value, dtype=np.float32)\n canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)\n return canvas\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 39, "vocab_size": 30, "complexity": 1, "nloc": 8, "token_counts": 92, "n_ast_nodes": 133, "n_identifiers": 20, "random_cut": "def apply_image(self, image, offsets, im_size, size):\n x, y = offsets\n im_h, im_w = im_size\n h, w = size\n canvas = np.ones((h, w, 3), dtype=np.float32)\n canvas *= " }, { "id": 285905, "commit_id": "7fd72d9ee1e8847717195859bf6d608268a94e2f", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/forecast/test_forecast_controller.py", "file_name": "test_forecast_controller.py", "fun_name": "test_models", "commit_message": "Forecasting Menu [Work in Progress] (#1933)\n\n* Gave forecasting memory\r\n\r\n* Fixed scripts, refactored\r\n\r\n* FIxed poetry lock\r\n\r\n* edge case check for forecast target\r\n\r\n* Improved combine and load functionality\r\n\r\n* Cleaned up translations\r\n\r\n* Fixed issue with covariates\r\n\r\n* Fixed issue checking covariates\r\n\r\n* Another covariates check fix\r\n\r\n* Ignored regr and linregr warnings\r\n\r\n* Fixed covariate issues\r\n\r\n* switched from forecasting to forecast\r\n\r\n* Finished transition to forecast\r\n\r\n* Can add entire dataset with one command\r\n\r\n* Improved combine description\r\n\r\n* Removed naming covariates\r\n\r\n* Created new installation\r\n\r\n* typo\r\n\r\n* Make plot show dates if available\r\n\r\n* Added better handling or users without the menu\r\n\r\n* Removed unused file\r\n\r\n* Fix\r\n\r\n* Better handling for nontraditional datasets\r\n\r\n* Fixed black and pylint\r\n\r\n* Fixed tests\r\n\r\n* Added darts install to main tests\r\n\r\n* Working on darts with CI\r\n\r\n* Added back test file\r\n\r\n* Made large tables print better\r\n\r\n* naive baseline\r\n\r\n* typo\r\n\r\n* Finished naive\r\n\r\n* no dollar on prediction\r\n\r\n* fixed positive MAPE bug\r\n\r\n* quick refactoring\r\n\r\n* Fixed two different args for same thing\r\n\r\n* added extra patience\r\n\r\n* linreg mape fix\r\n\r\n* info fix\r\n\r\n* Refactored API, bumped to Darts 0.21.0\r\n\r\n* Added fixes\r\n\r\n* Increased verbosity for wrong column\r\n\r\n* Updated dependencies\r\n\r\n* Hid warnings\r\n\r\n* Fixed importing\r\n\r\n* Fixed tests\r\n\r\n* Fixed ugly seasonal plotting\r\n\r\n* Fixed forecast line color\r\n\r\n* Switched chart output to blue\r\n\r\n* Simplified lambda_price_prediction_color\r\n\r\n* fixed residuals\r\n\r\n* Chnage\r\n\r\n* Removed darts from CI per Chavi\r\n\r\n* Added fixes to tests\r\n\r\n* Added knnfix\r\n\r\n* Fixed issue where n!= o\r\n\r\n* Added changes\r\n\r\n* Added changes\r\n\r\n* Imrpoved forecast dash\r\n\r\n* Added Theo notebook\r\n\r\n* Added enhancements to dash\r\n\r\n* Added notebook\r\n\r\n* Added fix for jupyter lab\r\n\r\n* Added debug stuff\r\n\r\n* Change\r\n\r\n* Updated docs\r\n\r\n* Fixed formatting\r\n\r\n* Fixed formatting\r\n\r\n* Removed prints\r\n\r\n* Filtered some info\r\n\r\n* Added button to run model\r\n\r\n* Improved api\r\n\r\n* Added secret feautr (no peeking Martin)\r\n\r\n* Cleaned code\r\n\r\n* Fixed tests\r\n\r\n* Added test fixes\r\n\r\n* Added fixes\r\n\r\n* Fixes\r\n\r\n* FIxes for pres\r\n\r\n* Remove bad tests\r\n\r\n* Removed knn\r\n\r\n* Fixed issues with removing mc\r\n\r\n* doc for conda\r\n\r\n* Added forecast improvements\r\n\r\n* Added streamlit support\r\n\r\n* Fixed issues\r\n\r\n* fix expo with streamlit due to quantile()\r\n\r\n* fixed performance issues with streamlit for now..\r\n\r\n* clean up historical forecast with new trainer\r\n\r\n* quick fix for regression trainer params\r\n\r\n* Added fixes\r\n\r\n* quick fix for other fix for regression trainer params\r\n\r\n* table formatting for timestamp\r\n\r\n* potential fix for inf in feature engineered datasets\r\n\r\n* Basic working in new format\r\n\r\n* dw\r\n\r\n* Trying\r\n\r\n* Fixed issues\r\n\r\n* Improved graphing\r\n\r\n* fixing trainer for LR and formatting\r\n\r\n* doge and linting\r\n\r\n* page break\r\n\r\n* automatic cleaning of datasets\r\n\r\n* automatic cleaning of datasets- fix\r\n\r\n* Fixed forecast dates\r\n\r\n* Made dashboard prettier\r\n\r\n* Added fixes\r\n\r\n* Added fixes\r\n\r\n* Added options\r\n\r\n* Fixed error\r\n\r\n* remove caching\r\n\r\n* adding in spinner\r\n\r\n* Added vairable n_predict in streamlit\r\n\r\n* Added mypy fix\r\n\r\n* renaming and range change\r\n\r\n* new index for n predict\r\n\r\n* check positive float for window size\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* renaming\r\n\r\n* reorg files\r\n\r\n* Update _index.md\r\n\r\n* hidden which command for versions\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* which: ns parser\r\n\r\n* hugo for: which\r\n\r\n* hugo for: forecasting fix\r\n\r\n* formatting black\r\n\r\n* update stock controller test\r\n\r\n* Lay groundwork for better residual plotting\r\n\r\n* improved delete to allow for periods in title\r\n\r\n* improved automatic cleaning of inf\r\n\r\n* Added new API\r\n\r\n* Added new API\r\n\r\n* Added new API\r\n\r\n* formatting for black\r\n\r\n* Updated our testing CI\r\n\r\n* Reverted changes\r\n\r\n* Added forecast docs\r\n\r\n* Fixed mypy issues\r\n\r\n* Fixes tests\r\n\r\n* Did some refactoring, added a report\r\n\r\n* new api in streamlit\r\n\r\n* Added integrated tests\r\n\r\n* Update _index.md\r\n\r\n* improved loading in custom dataset\r\n\r\n* menu spacing\r\n\r\n* installer fixes\r\n\r\n* Added docs fixes\r\n\r\n* Adding comments to test if commit working\r\n\r\n* Fixed report\r\n\r\n* naming conventions\r\n\r\n* formatting\r\n\r\n* removing unused var\r\n\r\n* Made last report imporvements\r\n\r\n* Update README.md\r\n\r\n* Added fix\r\n\r\n* Switched to warning\r\n\r\n* Added fixes\r\n\r\n* Added fixes\r\n\r\n* Added fixes\r\n\r\n* Added fixes\r\n\r\n* Update economy av view test\r\n\r\n* Remove forgotten print statement\r\n\r\n* Update depencencies\r\n\r\n* Added verbosity to pytest\r\n\r\n* Added fixes\r\n\r\n* Fixed pylint\r\n\r\n* Fixed actions checkout\r\n\r\n* Added fixes\r\n\r\nCo-authored-by: colin99d \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: James Simmons \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: Theodore Aptekarev ", "code": "def test_models(mocker, opt, func):\n mocker.patch(base + \"helpers.check_parser_input\", return_value=True)\n mocker.patch(base + func)\n cont = fc.ForecastController()\n cont.datasets = {\"data\": df}\n getattr(cont, f\"call_{opt}\")([\"data\"])\n\n\n@pytest.mark.parametrize(\n \"opt\",\n [\n \"expo\",\n \"theta\",\n \"rnn\",\n \"nbeats\",\n \"tcn\",\n \"regr\",\n \"linregr\",\n \"brnn\",\n \"trans\",\n \"tft\",\n ],\n)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"opt\",\n [\n \"expo\",\n \"theta\",\n \"rnn\",\n \"nbeats\",\n \"tcn\",\n \"regr\",\n \"linregr\",\n \"brnn\",\n \"trans\",\n \"tft\",\n ],\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 126, "n_words": 35, "vocab_size": 32, "complexity": 1, "nloc": 6, "token_counts": 57, "n_ast_nodes": 161, "n_identifiers": 16, "random_cut": "def test_models(mocker, opt, func):\n mocker.patch(base + \"helpers.check_parser_input\", return_valu" }, { "id": 150469, "commit_id": "6834db11f3ec4d0b9d9a6540633e1b363c11c889", "repo": "freqtrade", "path": "freqtrade/rpc/replicate/serializer.py", "file_name": "serializer.py", "fun_name": "_deserialize", "commit_message": "minor improvements and pairlist data transmission", "code": "def _deserialize(self, data):\n # The WebSocketSerializer gives bytes not string\n return json.loads(data)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 25, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 23, "n_identifiers": 5, "random_cut": "def _deserialize(self, data):\n # The WebSocketSerializer gives bytes not string\n return json.lo" }, { "id": 201910, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/builtin_server/tests.py", "file_name": "tests.py", "fun_name": "write", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def write(self, data):\n \n\n assert isinstance(data, bytes), \"write() argument must be bytestring\"\n\n if not self.status:\n raise AssertionError(\"write() before start_response()\")\n\n elif not self.headers_sent:\n # Before the first output, send the stored headers\n self.bytes_sent = len(data) # make sure we know content-length\n self.send_headers()\n else:\n self.bytes_sent += len(data)\n\n # XXX check Content-Length and truncate if too many bytes written?\n data = BytesIO(data)\n for chunk in iter(lambda: data.read(MAX_SOCKET_CHUNK_SIZE), b\"\"):\n self._write(chunk)\n self._flush()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 200, "n_words": 66, "vocab_size": 58, "complexity": 4, "nloc": 13, "token_counts": 92, "n_ast_nodes": 157, "n_identifiers": 18, "random_cut": "def write(self, data):\n \n\n assert isinstance(data, bytes), \"write() argument must be bytestring\"\n\n if not self.status:\n raise AssertionError(\"write() before start_response()\")\n\n elif not self.headers_sent:\n # Before the first output, send the stored headers\n self.bytes_sent = len(data) # make sure we know content-length\n self.send_headers()\n else:\n self.bytes_sent += len(data)\n\n " }, { "id": 179632, "commit_id": "8e1577e6debd76caffac1b1102a00f94348d7a3f", "repo": "gradio", "path": "gradio/state.py", "file_name": "state.py", "fun_name": "__setattr__", "commit_message": "state fixes; deprecation", "code": "def __setattr__(self, name, value):\n if name.startswith(\"_\"):\n self.__dict__[name] = value\n else:\n StateHolder.state_dict[(self.__id, name)] = value\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 49, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 41, "n_ast_nodes": 64, "n_identifiers": 9, "random_cut": "def __setattr__(self, name, value):\n if name.startswith(\"_\"):\n self.__dict__" }, { "id": 147088, "commit_id": "aaf47b2493beb985bfbc52dbdf1f52fc48377d74", "repo": "ray", "path": "python/ray/serve/tests/test_cli.py", "file_name": "test_cli.py", "fun_name": "test_run_deployment_node", "commit_message": "[serve] Implement `serve.run()` and `Application` (#23157)\n\nThese changes expose `Application` as a public API. They also introduce a new public method, `serve.run()`, which allows users to deploy their `Applications` or `DeploymentNodes`. Additionally, the Serve CLI's `run` command and Serve's REST API are updated to use `Applications` and `serve.run()`.\r\n\r\nCo-authored-by: Edward Oakes ", "code": "def test_run_deployment_node(ray_start_stop):\n # Tests serve run with specified args and kwargs\n\n # Deploy via import path\n p = subprocess.Popen(\n [\n \"serve\",\n \"run\",\n \"--address=auto\",\n \"ray.serve.tests.test_cli.molly_macaw\",\n ]\n )\n wait_for_condition(lambda: ping_endpoint(\"Macaw\") == \"Molly is green!\", timeout=10)\n p.send_signal(signal.SIGINT)\n p.wait()\n assert ping_endpoint(\"Macaw\") == \"connection error\"\n\n\n@serve.deployment", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@serve.deployment", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 121, "n_words": 41, "vocab_size": 38, "complexity": 1, "nloc": 13, "token_counts": 57, "n_ast_nodes": 113, "n_identifiers": 14, "random_cut": "def test_run_deployment_node(ray_start_stop):\n # Tests serve run with specified args and kwargs\n\n # Deploy via import path\n p = subprocess.Popen(\n [\n \"serve\",\n \"run\",\n \"--address=auto\",\n \"ray.serve.tests.test_cli.molly_macaw\",\n ]\n )\n wait_for_condition(lambda: ping_endpoint(\"Macaw\") == \"Molly is green!\", timeout=10)\n p.send_signal(signal.SIGINT)\n p.wait()\n assert ping_endpoint(\"Macaw\") == \"connection error\"\n\n\n@serve.deployment" }, { "id": 84776, "commit_id": "bd9a1dc9710293e36d2d47d970d7afb95100c2e6", "repo": "zulip", "path": "zerver/tests/test_widgets.py", "file_name": "test_widgets.py", "fun_name": "test_poll_command_extra_data", "commit_message": "tests: Consistently JSON-encode ‘to’ parameter\n\nAlthough our POST /messages handler accepts the ‘to’ parameter with or\nwithout JSON encoding, there are two problems with passing it as an\nunencoded string.\n\nFirstly, you’d fail to send a message to a stream named ‘true’ or\n‘false’ or ‘null’ or ‘2022’, as the JSON interpretation is prioritized\nover the plain string interpretation.\n\nSecondly, and more importantly for our tests, it violates our OpenAPI\nschema, which requires the parameter to be JSON-encoded. This is\nbecause OpenAPI has no concept of a parameter that’s “optionally\nJSON-encoded”, nor should it: such a parameter cannot be unambiguously\ndecoded for the reason above.\n\nOur version of openapi-core doesn’t currently detect this schema\nviolation, but after the next upgrade it will.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_poll_command_extra_data(self) -> None:\n sender = self.example_user(\"cordelia\")\n stream_name = \"Verona\"\n # We test for both trailing and leading spaces, along with blank lines\n # for the poll options.\n content = \"/poll What is your favorite color?\\n\\nRed\\nGreen \\n\\n Blue\\n - Yellow\"\n\n payload = dict(\n type=\"stream\",\n to=orjson.dumps(stream_name).decode(),\n topic=\"whatever\",\n content=content,\n )\n result = self.api_post(sender, \"/api/v1/messages\", payload)\n self.assert_json_success(result)\n\n message = self.get_last_message()\n self.assertEqual(message.content, content)\n\n expected_submessage_content = dict(\n widget_type=\"poll\",\n extra_data=dict(\n options=[\"Red\", \"Green\", \"Blue\", \"Yellow\"],\n question=\"What is your favorite color?\",\n ),\n )\n\n submessage = SubMessage.objects.get(message_id=message.id)\n self.assertEqual(submessage.msg_type, \"widget\")\n self.assertEqual(orjson.loads(submessage.content), expected_submessage_content)\n\n # Now don't supply a question.\n\n content = \"/poll\"\n payload[\"content\"] = content\n result = self.api_post(sender, \"/api/v1/messages\", payload)\n self.assert_json_success(result)\n\n expected_submessage_content = dict(\n widget_type=\"poll\",\n extra_data=dict(\n options=[],\n question=\"\",\n ),\n )\n\n message = self.get_last_message()\n self.assertEqual(message.content, content)\n submessage = SubMessage.objects.get(message_id=message.id)\n self.assertEqual(submessage.msg_type, \"widget\")\n self.assertEqual(orjson.loads(submessage.content), expected_submessage_content)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 489, "n_words": 121, "vocab_size": 77, "complexity": 1, "nloc": 40, "token_counts": 263, "n_ast_nodes": 445, "n_identifiers": 33, "random_cut": "def test_poll_command_extra_data(self) -> None:\n sender = self.example_user(\"cordelia\")\n stream_name = \"Verona\"\n # We test for both trailing and leading spaces, alo" }, { "id": 283255, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "openbb_terminal/custom/prediction_techniques/pred_controller.py", "file_name": "pred_controller.py", "fun_name": "update_runtime_choices", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def update_runtime_choices(self):\n \n if session and obbff.USE_PROMPT_TOOLKIT:\n self.choices[\"pick\"] = {c: None for c in list(self.df.columns)}\n self.completer = NestedCompleter.from_nested_dict(self.choices)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 49, "n_words": 17, "vocab_size": 16, "complexity": 4, "nloc": 4, "token_counts": 48, "n_ast_nodes": 79, "n_identifiers": 13, "random_cut": "def update_runtime_choices(self):\n \n if session and obbff.USE_PROMPT_TOOLKIT:\n self.choices[\"pick\"] = {c: None for c in list(self.df." }, { "id": 281020, "commit_id": "d5d581b59b614d45f105f3bda91645667ad623b8", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/stocks_controller.py", "file_name": "stocks_controller.py", "fun_name": "call_reset", "commit_message": "improve usage of timezone in terminal (#1126)\n\n* improve usage of timezone in terminal\r\n\r\n* lint\r\n\r\n* update dependencies\r\n\r\n* address James review comments\r\n\r\n* remove seconds from time on cmd line\r\n\r\n* skip test", "code": "def call_reset(self, _):\n \n if self.ticker:\n if self.suffix:\n self.queue.insert(0, f\"load {self.ticker}.{self.suffix}\")\n else:\n self.queue.insert(0, f\"load {self.ticker}\")\n self.queue.insert(0, \"stocks\")\n self.queue.insert(0, \"reset\")\n self.queue.insert(0, \"quit\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 107, "n_words": 20, "vocab_size": 14, "complexity": 3, "nloc": 9, "token_counts": 72, "n_ast_nodes": 141, "n_identifiers": 7, "random_cut": "def call_reset(self, _):\n \n if self.ticker:\n if self.suffix:\n self.queue.insert(0, f\"load {self.ticker}.{self.suffix}\")\n else:\n self.queue.insert(0, f\"load {self.ticker}\")\n self.queue.in" }, { "id": 211310, "commit_id": "10e7fe232c83dacee0f517d78644b705e5d24a57", "repo": "PaddleDetection", "path": "deploy/python/utils.py", "file_name": "utils.py", "fun_name": "argsparser", "commit_message": "[deploy] alter save coco format json in deploy/python/infer.py (#6705)", "code": "def argsparser():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n \"--model_dir\",\n type=str,\n default=None,\n help=(\"Directory include:'model.pdiparams', 'model.pdmodel', \"\n \"'infer_cfg.yml', created by tools/export_model.py.\"),\n required=True)\n parser.add_argument(\n \"--image_file\", type=str, default=None, help=\"Path of image file.\")\n parser.add_argument(\n \"--image_dir\",\n type=str,\n default=None,\n help=\"Dir of image file, `image_file` has a higher priority.\")\n parser.add_argument(\n \"--batch_size\", type=int, default=1, help=\"batch_size for inference.\")\n parser.add_argument(\n \"--video_file\",\n type=str,\n default=None,\n help=\"Path of video file, `video_file` or `camera_id` has a highest priority.\"\n )\n parser.add_argument(\n \"--camera_id\",\n type=int,\n default=-1,\n help=\"device id of camera to predict.\")\n parser.add_argument(\n \"--threshold\", type=float, default=0.5, help=\"Threshold of score.\")\n parser.add_argument(\n \"--output_dir\",\n type=str,\n default=\"output\",\n help=\"Directory of output visualization files.\")\n parser.add_argument(\n \"--run_mode\",\n type=str,\n default='paddle',\n help=\"mode of running(paddle/trt_fp32/trt_fp16/trt_int8)\")\n parser.add_argument(\n \"--device\",\n type=str,\n default='cpu',\n help=\"Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU.\"\n )\n parser.add_argument(\n \"--use_gpu\",\n type=ast.literal_eval,\n default=False,\n help=\"Deprecated, please use `--device`.\")\n parser.add_argument(\n \"--run_benchmark\",\n type=ast.literal_eval,\n default=False,\n help=\"Whether to predict a image_file repeatedly for benchmark\")\n parser.add_argument(\n \"--enable_mkldnn\",\n type=ast.literal_eval,\n default=False,\n help=\"Whether use mkldnn with CPU.\")\n parser.add_argument(\n \"--enable_mkldnn_bfloat16\",\n type=ast.literal_eval,\n default=False,\n help=\"Whether use mkldnn bfloat16 inference with CPU.\")\n parser.add_argument(\n \"--cpu_threads\", type=int, default=1, help=\"Num of threads with CPU.\")\n parser.add_argument(\n \"--trt_min_shape\", type=int, default=1, help=\"min_shape for TensorRT.\")\n parser.add_argument(\n \"--trt_max_shape\",\n type=int,\n default=1280,\n help=\"max_shape for TensorRT.\")\n parser.add_argument(\n \"--trt_opt_shape\",\n type=int,\n default=640,\n help=\"opt_shape for TensorRT.\")\n parser.add_argument(\n \"--trt_calib_mode\",\n type=bool,\n default=False,\n help=\"If the model is produced by TRT offline quantitative \"\n \"calibration, trt_calib_mode need to set True.\")\n parser.add_argument(\n '--save_images',\n action='store_true',\n default=False,\n help='Save visualization image results.')\n parser.add_argument(\n '--save_mot_txts',\n action='store_true',\n help='Save tracking results (txt).')\n parser.add_argument(\n '--save_mot_txt_per_img',\n action='store_true',\n help='Save tracking results (txt) for each image.')\n parser.add_argument(\n '--scaled',\n type=bool,\n default=False,\n help=\"Whether coords after detector outputs are scaled, False in JDE YOLOv3 \"\n \"True in general detector.\")\n parser.add_argument(\n \"--tracker_config\", type=str, default=None, help=(\"tracker donfig\"))\n parser.add_argument(\n \"--reid_model_dir\",\n type=str,\n default=None,\n help=(\"Directory include:'model.pdiparams', 'model.pdmodel', \"\n \"'infer_cfg.yml', created by tools/export_model.py.\"))\n parser.add_argument(\n \"--reid_batch_size\",\n type=int,\n default=50,\n help=\"max batch_size for reid model inference.\")\n parser.add_argument(\n '--use_dark',\n type=ast.literal_eval,\n default=True,\n help='whether to use darkpose to get better keypoint position predict ')\n parser.add_argument(\n \"--action_file\",\n type=str,\n default=None,\n help=\"Path of input file for action recognition.\")\n parser.add_argument(\n \"--window_size\",\n type=int,\n default=50,\n help=\"Temporal size of skeleton feature for action recognition.\")\n parser.add_argument(\n \"--random_pad\",\n type=ast.literal_eval,\n default=False,\n help=\"Whether do random padding for action recognition.\")\n parser.add_argument(\n \"--save_results\",\n action='store_true',\n default=False,\n help=\"Whether save detection result to file using coco format\")\n parser.add_argument(\n '--use_coco_category',\n action='store_true',\n default=False,\n help='Whether to use the coco format dictionary `clsid2catid`')\n parser.add_argument(\n \"--slice_infer\",\n action='store_true',\n help=\"Whether to slice the image and merge the inference results for small object detection.\"\n )\n parser.add_argument(\n '--slice_size',\n nargs='+',\n type=int,\n default=[640, 640],\n help=\"Height of the sliced image.\")\n parser.add_argument(\n \"--overlap_ratio\",\n nargs='+',\n type=float,\n default=[0.25, 0.25],\n help=\"Overlap height ratio of the sliced image.\")\n parser.add_argument(\n \"--combine_method\",\n type=str,\n default='nms',\n help=\"Combine method of the sliced images' detection results, choose in ['nms', 'nmm', 'concat'].\"\n )\n parser.add_argument(\n \"--match_threshold\",\n type=float,\n default=0.6,\n help=\"Combine method matching threshold.\")\n parser.add_argument(\n \"--match_metric\",\n type=str,\n default='iou',\n help=\"Combine method matching metric, choose in ['iou', 'ios'].\")\n return parser\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 1542, "n_words": 433, "vocab_size": 238, "complexity": 1, "nloc": 183, "token_counts": 739, "n_ast_nodes": 1210, "n_identifiers": 19, "random_cut": "def argsparser():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n \"--model_dir\",\n type=str,\n default=None,\n help=(\"Directory include:'model.pdiparams', 'model.pdmodel', \"\n \"'infer_cfg.yml', created by tools/export_model.py.\"),\n required=True)\n parser.add_argument(\n \"--image_file\", type=str, default=None, help=\"Path of image file.\")\n parser.add_argument(\n \"--image_dir\",\n type=str,\n default=None,\n help=\"Dir of image file, `image_file` has a higher priority.\")\n parser.add_argument(\n \"--batch_size\", type=int, default=1, help=\"batch_size for inference.\")\n parser.add_argument(\n \"--video_file\",\n type=str,\n default=None,\n help=\"Path of video file, `video_file` or `camera_id` has a highest priority.\"\n )\n parser.add_argument(\n \"--camera_id\",\n type=int,\n default=-1,\n help=\"device id of camera to predict.\")\n parser.add_argument(\n \"--threshold\", type=float, default=0.5, help=\"Threshold of score.\")\n parser.add_argument(\n \"--output_dir\",\n type=str,\n default=\"output\",\n help=\"Directory of output visualization files.\")\n parser.add_argument(\n \"--run_mode\",\n type=str,\n default='paddle',\n help=\"mode of running(paddle/trt_fp32/trt_fp16/trt_int8)\")\n parser.add_argument(\n \"--device\",\n type=str,\n default='cpu',\n help=\"Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU.\"\n )\n parser.add_argument(\n \"--use_gpu\",\n type=ast.literal_eval,\n default=False,\n help=\"Deprecated, please use `--device`.\")\n parser.add_argument(\n \"--run_benchmark\",\n type=ast.literal_eval,\n default=False,\n help=\"Whether to predict a image_file repeatedly for benchmark\")\n parser.add_argument(\n \"--enable_mkldnn\",\n type=ast.literal_eval,\n default=False,\n help=\"Whether use mkldnn with CPU.\")\n parser.add_argument(\n \"--enable_mkldnn_bfloat16\",\n type=ast.literal_eval,\n default=False,\n help=\"Whether use mkldnn bfloat16 inference with CPU.\")\n parser.add_argument(\n \"--cpu_threads\", type=int, default=1, help=\"Num of threads with CPU.\")\n parser.add_argument(\n \"--trt_min_shape\", type=int, default=1, help=\"min_shape for TensorRT.\")\n parser.add_argument(\n \"--trt_max_shape\",\n type=int,\n default=1280,\n help=\"max_shape for TensorRT.\")\n parser.add_argument(\n \"--trt_opt_shape\",\n type=int,\n default=640,\n help=\"opt_shape for TensorRT.\")\n parser.add_argument(\n \"--trt_calib_mode\",\n type=bool,\n default=False,\n help=\"If the model is " }, { "id": 86151, "commit_id": "1449643f60404c3ec50ec4eab11bc1c3b3bfe1ab", "repo": "sentry", "path": "tests/sentry/models/test_groupsnooze.py", "file_name": "test_groupsnooze.py", "fun_name": "test_user_rate_without_test", "commit_message": "fix(tests): Use `RedisSnubaTSDB` by default in all tests (#39297)\n\n`RedisSnubaTSDB` has been the default in productions. To make our tests\r\nreflect production we should use it there as well.\r\n\r\nRemoved most uses of `tsdb.incr` from the tests. The only ones left are\r\nplaces that are actually still using `tsdb.incr`.", "code": "def test_user_rate_without_test(self):\n snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60)\n assert snooze.is_valid(test_rates=False)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 55, "n_identifiers": 11, "random_cut": "def test_user_rate_without_test(self):\n sno" }, { "id": 7237, "commit_id": "aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173", "repo": "ludwig", "path": "tests/ludwig/utils/test_defaults.py", "file_name": "test_defaults.py", "fun_name": "test_merge_with_defaults_early_stop", "commit_message": "feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027)", "code": "def test_merge_with_defaults_early_stop(use_train, use_hyperopt_scheduler):\n all_input_features = [\n binary_feature(),\n category_feature(),\n number_feature(),\n text_feature(),\n ]\n all_output_features = [\n category_feature(),\n sequence_feature(),\n vector_feature(),\n ]\n\n # validate config with all features\n config = {\n \"input_features\": all_input_features,\n \"output_features\": all_output_features,\n HYPEROPT: HYPEROPT_CONFIG,\n }\n config = copy.deepcopy(config)\n\n if use_train:\n config[TRAINER] = {\"batch_size\": 42}\n\n if use_hyperopt_scheduler:\n # hyperopt scheduler cannot be used with early stopping\n config[HYPEROPT][\"executor\"][SCHEDULER] = SCHEDULER_DICT\n\n merged_config = merge_with_defaults(config)\n\n expected = -1 if use_hyperopt_scheduler else ECDTrainerConfig().early_stop\n assert merged_config[TRAINER][\"early_stop\"] == expected\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 200, "n_words": 71, "vocab_size": 54, "complexity": 4, "nloc": 25, "token_counts": 123, "n_ast_nodes": 199, "n_identifiers": 24, "random_cut": "def test_merge_with_defaults_early_stop(use_train, use_hyperopt_scheduler):\n all_input_features = [\n binary_feature(),\n category_feature(),\n number_feature(),\n text_feature(),\n ]\n all_output_features = [\n category_feature(),\n sequence_feature(),\n vector_feature(),\n ]\n\n # validate config with all features\n config = {\n \"input_features\": all_input_features,\n \"output_features\": all_output_features,\n HYPEROPT: HYPEROPT_CONFIG,\n }\n config = copy.deepcopy(config)\n\n if use_train:\n config[TRAINER] = {\"batch_size\": 42}\n\n if use_hyperopt_scheduler:\n # hyperopt sched" }, { "id": 61098, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py", "file_name": "candidates.py", "fun_name": "make_install_req_from_dist", "commit_message": "upd; format", "code": "def make_install_req_from_dist(dist, template):\n # type: (Distribution, InstallRequirement) -> InstallRequirement\n project_name = canonicalize_name(dist.project_name)\n if template.req:\n line = str(template.req)\n elif template.link:\n line = f\"{project_name} @ {template.link.url}\"\n else:\n line = f\"{project_name}=={dist.parsed_version}\"\n ireq = install_req_from_line(\n line,\n user_supplied=template.user_supplied,\n comes_from=template.comes_from,\n use_pep517=template.use_pep517,\n isolated=template.isolated,\n constraint=template.constraint,\n options=dict(\n install_options=template.install_options,\n global_options=template.global_options,\n hashes=template.hash_options,\n ),\n )\n ireq.satisfied_by = dist\n return ireq\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 184, "n_words": 48, "vocab_size": 40, "complexity": 3, "nloc": 23, "token_counts": 111, "n_ast_nodes": 192, "n_identifiers": 25, "random_cut": "def make_install_req_from_dist(dist, template):\n # type: (Distribution, InstallRequirement) -> InstallRequirement\n project_name = canonicalize_name(dist.project_name)\n if template.req:\n line = str(template.req)\n elif template.link:\n line = f\"{project_name} @ {template.link.url}\"\n else:\n line = f\"{project_name}=={dist.parsed_version}\"\n ireq = install_req_from_line(\n line,\n user_supplied=template.user_supplied,\n comes_from=template.comes_from,\n use_pep517=t" }, { "id": 99954, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_environments.py", "file_name": "test_organization_environments.py", "fun_name": "test_project_filter", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_project_filter(self):\n other_project = self.create_project()\n project_env = self.create_environment(name=\"project\", project=self.project)\n other_project_env = self.create_environment(name=\"other\", project=other_project)\n\n response = self.get_success_response(\n self.project.organization.slug, project=[self.project.id]\n )\n assert response.data == serialize([project_env])\n response = self.get_success_response(\n self.project.organization.slug, project=[other_project.id]\n )\n assert response.data == serialize([other_project_env])\n response = self.get_success_response(\n self.project.organization.slug, project=[self.project.id, other_project.id]\n )\n assert response.data == serialize([other_project_env, project_env])\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 161, "n_words": 45, "vocab_size": 26, "complexity": 1, "nloc": 16, "token_counts": 151, "n_ast_nodes": 233, "n_identifiers": 16, "random_cut": "def test_project_filter(self):\n other_project = self.create_project()\n project_env = self.create_environment(name=\"project\", project=self.project)\n other_project_env = self.create_environment(name=\"other\", project=other_project)\n\n response = self.get_success_response(\n self.project.organization.slug, project=[self.project.id]\n )\n assert r" }, { "id": 68739, "commit_id": "62857e3e080b3888f40a09112be63238974dd175", "repo": "erpnext", "path": "erpnext/manufacturing/doctype/bom_update_log/bom_update_log.py", "file_name": "bom_update_log.py", "fun_name": "resume_bom_cost_update_jobs", "commit_message": "feat: Track progress in Log Batch/Job wise\n\n- This was done due to stale reads while the background jobs tried updating status of the log\n- Added a table where all bom jobs within log will be tracked with what level they are processing\n- Cron job will check if table jobs are all processed every 5 mins\n- If yes, it will prepare parents and call `process_boms_cost_level_wise` to start next level\n- If pending jobs, do nothing\n- Current BOM Level is being tracked that helps adding rows to the table\n- Individual bom cost jobs (that are queued) will process and update boms > will update BOM Update Batch table row with list of updated BOMs", "code": "def resume_bom_cost_update_jobs():\n\t\n\n\tin_progress_logs = frappe.db.get_all(\n\t\t\"BOM Update Log\",\n\t\t{\"update_type\": \"Update Cost\", \"status\": \"In Progress\"},\n\t\t[\"name\", \"processed_boms\", \"current_level\"],\n\t)\n\tif not in_progress_logs:\n\t\treturn\n\n\tfor log in in_progress_logs:\n\t\t# check if all log batches of current level are processed\n\t\tbom_batches = frappe.db.get_all(\n\t\t\t\"BOM Update Batch\", {\"parent\": log.name, \"level\": log.current_level}, [\"name\", \"boms_updated\"]\n\t\t)\n\t\tincomplete_level = any(not row.get(\"boms_updated\") for row in bom_batches)\n\t\tif not bom_batches or incomplete_level:\n\t\t\tcontinue\n\n\t\t# Prep parent BOMs & updated processed BOMs for next level\n\t\tcurrent_boms, processed_boms = get_processed_current_boms(log, bom_batches)\n\t\tparent_boms = get_next_higher_level_boms(child_boms=current_boms, processed_boms=processed_boms)\n\n\t\tset_values_in_log(\n\t\t\tlog.name,\n\t\t\tvalues={\n\t\t\t\t\"processed_boms\": json.dumps(processed_boms),\n\t\t\t\t\"parent_boms\": json.dumps(parent_boms),\n\t\t\t\t\"status\": \"Completed\" if not parent_boms else \"In Progress\",\n\t\t\t},\n\t\t\tcommit=True,\n\t\t)\n\n\t\tif parent_boms: # there is a next level to process\n\t\t\tprocess_boms_cost_level_wise(update_doc=frappe.get_doc(\"BOM Update Log\", log.name))\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 87, "n_words": 116, "vocab_size": 80, "complexity": 8, "nloc": 28, "token_counts": 180, "n_ast_nodes": 311, "n_identifiers": 27, "random_cut": "def resume_bom_cost_update_jobs():\n\t\n\n\tin_progress_logs = frappe.db.get_all(\n\t\t\"BOM Update Log\",\n\t\t{\"update_type\": \"Update Cost\", \"status\": \"In Progress\"},\n\t\t[\"name\", \"processed_boms\", \"current_level\"],\n\t)\n\tif not in_progress_logs:\n\t\treturn\n\n\tfor log in in_progress_logs:\n\t\t# check if all log batches of current level are processed\n\t\tbom_batches = frappe.db.get_all(\n\t\t\t\"BOM Update Batch\", {\"parent\": log.name, \"level\": log.current_level}, [\"name\", \"boms_updated\"]\n\t\t)\n\t\tincomplete_level = any(not row.get(\"boms_updated\") for row in bom_batches)\n\t\tif not bom_batches or incomplete_level:\n\t\t\tcontinue\n\n\t\t# Prep parent BOMs & updated processed BOMs for next level\n\t\tcurrent_boms, processed_boms = get_processed_current_boms(log, bom_batches)\n\t\tparent_boms = get_next_higher_level_boms(child_boms=current_boms, processed_boms=processed_boms)\n\n\t\tset_values_in_log(\n\t\t\tlog.name,\n\t\t\tvalues={\n\t\t\t\t\"processed_boms\": json.dumps(processed_boms),\n\t\t\t\t\"parent_boms\": json.dumps(parent_boms),\n\t\t\t\t\"status\": \"Completed\" if not parent_boms else \"In Progress\",\n\t\t\t},\n\t\t\tcommit=True,\n\t\t)\n\n\t\tif parent_boms: # there is a next l" }, { "id": 46966, "commit_id": "6933022e94acf139b2dea9a589bb8b25c62a5d20", "repo": "airflow", "path": "airflow/providers/docker/operators/docker_swarm.py", "file_name": "docker_swarm.py", "fun_name": "on_kill", "commit_message": "Fix new MyPy errors in main (#22884)\n\nThose MyPe errors are side effect of some new dependencies.", "code": "def on_kill(self) -> None:\n if self.cli is not None and self.service is not None:\n self.log.info('Removing docker service: %s', self.service['ID'])\n self.cli.remove_service(self.service['ID'])\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 48, "n_words": 20, "vocab_size": 17, "complexity": 3, "nloc": 4, "token_counts": 50, "n_ast_nodes": 82, "n_identifiers": 7, "random_cut": "def on_kill(self) -> None:\n " }, { "id": 102720, "commit_id": "89f15f591cc3cc3e8ae40e95ffc802f7f2561ece", "repo": "chia-blockchain", "path": "tests/core/full_node/test_mempool.py", "file_name": "test_mempool.py", "fun_name": "test_agg_sig_mixed", "commit_message": "Merge standalone wallet into main (#9793)\n\n* wallet changes from pac\r\n\r\n* cat changes\r\n\r\n* pool tests\r\n\r\n* pooling tests passing\r\n\r\n* offers\r\n\r\n* lint\r\n\r\n* mempool_mode\r\n\r\n* black\r\n\r\n* linting\r\n\r\n* workflow files\r\n\r\n* flake8\r\n\r\n* more cleanup\r\n\r\n* renamed\r\n\r\n* remove obsolete test, don't cast announcement\r\n\r\n* memos are not only bytes32\r\n\r\n* trade renames\r\n\r\n* fix rpcs, block_record\r\n\r\n* wallet rpc, recompile settlement clvm\r\n\r\n* key derivation\r\n\r\n* clvm tests\r\n\r\n* lgtm issues and wallet peers\r\n\r\n* stash\r\n\r\n* rename\r\n\r\n* mypy linting\r\n\r\n* flake8\r\n\r\n* bad initializer\r\n\r\n* flaky tests\r\n\r\n* Make CAT wallets only create on verified hints (#9651)\r\n\r\n* fix clvm tests\r\n\r\n* return to log lvl warn\r\n\r\n* check puzzle unhardened\r\n\r\n* public key, not bytes. api caching change\r\n\r\n* precommit changes\r\n\r\n* remove unused import\r\n\r\n* mypy ci file, tests\r\n\r\n* ensure balance before creating a tx\r\n\r\n* Remove CAT logic from full node test (#9741)\r\n\r\n* Add confirmations and sleeps for wallet (#9742)\r\n\r\n* use pool executor\r\n\r\n* rever merge mistakes/cleanup\r\n\r\n* Fix trade test flakiness (#9751)\r\n\r\n* remove precommit\r\n\r\n* older version of black\r\n\r\n* lint only in super linter\r\n\r\n* Make announcements in RPC be objects instead of bytes (#9752)\r\n\r\n* Make announcements in RPC be objects instead of bytes\r\n\r\n* Lint\r\n\r\n* misc hint'ish cleanup (#9753)\r\n\r\n* misc hint'ish cleanup\r\n\r\n* unremove some ci bits\r\n\r\n* Use main cached_bls.py\r\n\r\n* Fix bad merge in main_pac (#9774)\r\n\r\n* Fix bad merge at 71da0487b9cd5564453ec24b76f1ac773c272b75\r\n\r\n* Remove unused ignores\r\n\r\n* more unused ignores\r\n\r\n* Fix bad merge at 3b143e705057d6c14e2fb3e00078aceff0552d7e\r\n\r\n* One more byte32.from_hexstr\r\n\r\n* Remove obsolete test\r\n\r\n* remove commented out\r\n\r\n* remove duplicate payment object\r\n\r\n* remove long sync\r\n\r\n* remove unused test, noise\r\n\r\n* memos type\r\n\r\n* bytes32\r\n\r\n* make it clear it's a single state at a time\r\n\r\n* copy over asset ids from pacr\r\n\r\n* file endl linter\r\n\r\n* Update chia/server/ws_connection.py\r\n\r\nCo-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>\r\n\r\nCo-authored-by: Matt Hauff \r\nCo-authored-by: Kyle Altendorf \r\nCo-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>", "code": "def test_agg_sig_mixed(self):\n npc_list = [\n NPC(self.h1, self.h2, [(self.ASM, [ConditionWithArgs(self.ASM, [bytes(self.pk1), b\"msg1\"])])]),\n NPC(self.h1, self.h2, [(self.ASU, [ConditionWithArgs(self.ASU, [bytes(self.pk2), b\"msg2\"])])]),\n ]\n pks, msgs = pkm_pairs(npc_list, b\"foobar\")\n assert [bytes(pk) for pk in pks] == [bytes(self.pk1), bytes(self.pk2)]\n assert msgs == [b\"msg1\" + self.h1 + b\"foobar\", b\"msg2\"]\n", "url": "https://github.com/Chia-Network/chia-blockchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 97, "n_words": 41, "vocab_size": 33, "complexity": 2, "nloc": 8, "token_counts": 144, "n_ast_nodes": 211, "n_identifiers": 16, "random_cut": "def test_agg_sig_mixed(self):\n npc_list = [\n NPC(self.h1, self.h2, [(self.ASM, [ConditionWithArgs(self.ASM, [bytes(self.pk1), b\"msg1\"])])]),\n NPC(self.h1, self.h2, [(self.ASU, [ConditionWithArgs(self.ASU, [bytes(self.pk2), b\"msg2\"])])]),\n ]\n pks, msgs = pkm_pairs(npc_list, b\"foobar\")" }, { "id": 315032, "commit_id": "0a65f53356e124592cae37ea1f1873b789e0726b", "repo": "core", "path": "homeassistant/components/life360/device_tracker.py", "file_name": "device_tracker.py", "fun_name": "entity_picture", "commit_message": "Convert life360 integration to entity based (#72461)\n\n* Convert life360 integration to entity based\r\n\r\n* Improve config_flow.py type checking\r\n\r\n* Add tests for config flow\r\n\r\nFix form defaults for reauth flow.\r\n\r\n* Cover reauth when config entry loaded\r\n\r\n* Update per review (except for dataclasses)\r\n\r\n* Restore check for missing location information\r\n\r\nThis is in current code but was accidentally removed in this PR.\r\n\r\n* Fix updates from review\r\n\r\n* Update tests per review changes\r\n\r\n* Change IntegData to a dataclass\r\n\r\n* Use dataclasses to represent fetched Life360 data\r\n\r\n* Always add extra attributes\r\n\r\n* Update per review take 2\r\n\r\n* Tweak handling of bad last_seen or location_accuracy\r\n\r\n* Fix type of Life360Member.gps_accuracy\r\n\r\n* Update per review take 3\r\n\r\n* Update .coveragerc\r\n\r\n* Parametrize successful reauth flow test\r\n\r\n* Fix test coverage failure\r\n\r\n* Update per review take 4\r\n\r\n* Fix config schema", "code": "def entity_picture(self) -> str | None:\n \n if self.available:\n self._attr_entity_picture = self._data.entity_picture\n return super().entity_picture\n\n # All of the following will only be called if self.available is True.\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 26, "vocab_size": 25, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 7, "random_cut": "def entity_picture(self) -> str | None:\n \n if self.available:\n self._attr_entity_picture = self._data.entity_picture\n return super().entity_picture\n\n # All of the following will only be called if self.available is True" }, { "id": 131772, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_resource_demand_scheduler.py", "file_name": "test_resource_demand_scheduler.py", "fun_name": "test_packing", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_packing(self):\n provider = MockProvider()\n scheduler = ResourceDemandScheduler(\n provider, TYPES_A, 10, head_node_type=\"p2.8xlarge\"\n )\n\n provider.create_node({}, {TAG_RAY_USER_NODE_TYPE: \"p2.8xlarge\"}, 1)\n # At this point our cluster has 1 p2.8xlarge instances (8 GPUs) and is\n # fully idle.\n nodes = provider.non_terminated_nodes({})\n\n resource_demands = [{\"GPU\": 1}] * 2\n pending_placement_groups = [\n PlacementGroupTableData(\n state=PlacementGroupTableData.PENDING,\n strategy=PlacementStrategy.STRICT_PACK,\n bundles=[Bundle(unit_resources={\"GPU\": 2})] * 3,\n ),\n ]\n # The 2 resource demand gpus should still be packed onto the same node\n # as the 6 GPU placement group.\n to_launch, rem = scheduler.get_nodes_to_launch(\n nodes, {}, resource_demands, {}, pending_placement_groups, {}\n )\n assert to_launch == {}\n assert not rem\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 294, "n_words": 94, "vocab_size": 78, "complexity": 1, "nloc": 20, "token_counts": 127, "n_ast_nodes": 202, "n_identifiers": 26, "random_cut": "def test_packing(self):\n provider = MockProvider()\n scheduler = ResourceDemandScheduler(\n provider, TYPES_A, 10, head_node_type=\"p2.8xlarge\"\n )\n\n provider.create_node({}, {TAG_RAY_USER_NODE_TYPE: \"p2.8xlarge\"}, 1)\n # At this point our cluster has 1 p2.8xlarge i" }, { "id": 88511, "commit_id": "fef9c695a1a7d3384fb3ce7ec6c264632e77061d", "repo": "sentry", "path": "tests/sentry/auth/test_access.py", "file_name": "test_access.py", "fun_name": "test_superuser", "commit_message": "feature(hybrid-cloud): Access with silo tests (#41305)\n\nGoal of this PR is implement a secondary interface for creating `Access`\r\nobjects that work on service dataclasses only. It validates that\r\nsecondary interface by running the access test suite against both\r\nimplementations *in all silo modes* ensuring full compatibility.\r\n\r\nNotably, while most of the org member access logic is left untouched,\r\nsome parts of existing logic have been slightly refactored:\r\n\r\n1. Organizationless Access objects no longer need the DB, and act on\r\nshared logic from the service layer.\r\n2. sso state and permissions querying is now extracted into the service\r\nlayer, and even the existing access uses that.", "code": "def test_superuser(self):\n request = self.make_request(user=self.superuser, is_superuser=False)\n result = self.from_request(request)\n assert not result.has_permission(\"test.permission\")\n\n request = self.make_request(user=self.superuser, is_superuser=True)\n result = self.from_request(request)\n assert result.has_permission(\"test.permission\")\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 62, "n_words": 21, "vocab_size": 12, "complexity": 1, "nloc": 7, "token_counts": 68, "n_ast_nodes": 111, "n_identifiers": 10, "random_cut": "def test_superuser(self):\n request = self.make_request(user=self.superuser, is_superuser=False)\n result = self.from_request(request)\n assert not result.has_permission(\"test.permission\")\n\n request = self.make_request(user=self.superuser, is_superuser=True)\n result = self.from_request(request)\n assert result.has_permission(\"te" }, { "id": 101600, "commit_id": "98d01760e469fd2108eed8d0b0a1ba6297c3177c", "repo": "faceswap", "path": "plugins/extract/recognition/vgg_face2_keras.py", "file_name": "vgg_face2_keras.py", "fun_name": "_integer_iterator", "commit_message": "Overhaul sort:\n - Standardize image data reading and writing\n - Optimize loading (just one pass required)\n - Make all sort groups binnable (to greater or lesser results)\n - Add sort by pitch\n - Deprecate multiple options\n - linting, docs + locales", "code": "def _integer_iterator(cls) -> Generator[int, None, None]:\n \n i = -1\n while True:\n i += 1\n yield i\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 59, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 5, "random_cut": "def _integer_iterator(cls) -> Generator[int, None, None]:\n \n i = -1\n while True:\n i += 1\n yield i\n" }, { "id": 150159, "commit_id": "40f00196ebe4abc91b9987bf4365ea43f48c0eee", "repo": "freqtrade", "path": "freqtrade/freqai/data_drawer.py", "file_name": "data_drawer.py", "fun_name": "load_historic_predictions_from_disk", "commit_message": "use cloudpickle in place of pickle. define Paths once in data_drawer.", "code": "def load_historic_predictions_from_disk(self):\n \n exists = self.historic_predictions_path.is_file() # resolve().exists()\n if exists:\n with open(self.historic_predictions_path, \"rb\") as fp:\n self.historic_predictions = cloudpickle.load(fp)\n logger.info(\n f\"Found existing historic predictions at {self.full_path}, but beware \"\n \"that statistics may be inaccurate if the bot has been offline for \"\n \"an extended period of time.\"\n )\n elif not self.follow_mode:\n logger.info(\"Could not find existing historic_predictions, starting from scratch\")\n else:\n logger.warning(\n f\"Follower could not find historic predictions at {self.full_path} \"\n \"sending null values back to strategy\"\n )\n\n return exists\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 276, "n_words": 77, "vocab_size": 64, "complexity": 3, "nloc": 18, "token_counts": 73, "n_ast_nodes": 151, "n_identifiers": 15, "random_cut": "def load_historic_predictions_from_disk(self):\n \n exists = self.historic_predictions_path.is_file() # resolve().exists()\n if exists:\n with open(self.historic_predictions_path, \"rb\") as fp:\n self.historic_predictions = cloudpickle.load(fp)\n logger.info(\n f\"Found existing historic predictions at {self.full_path}, but beware \"\n \"that statistics may be inaccurate if the bot has been offline for \"\n \"an extended period of time.\"\n )\n elif not self.follow_mode:\n logger.info(\"Could not find existing historic_predictions, starting from scratch\")\n else:\n logger.warning(\n" }, { "id": 103763, "commit_id": "26b8ab9adf28dd2cab8614ec223d0cb4519763fa", "repo": "kitty", "path": "kitty_tests/datatypes.py", "file_name": "datatypes.py", "fun_name": "test_bracketed_paste_sanitizer", "commit_message": "Use a regex for bracketed paste sanitization", "code": "def test_bracketed_paste_sanitizer(self):\n from kitty.utils import sanitize_for_bracketed_paste\n for x in ('\\x1b[201~ab\\x9b201~cd', '\\x1b[201\\x1b[201~~ab'):\n q = sanitize_for_bracketed_paste(x.encode('utf-8'))\n self.assertNotIn(b'\\x1b[201~', q)\n self.assertNotIn('\\x9b201~'.encode('utf-8'), q)\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 64, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 6, "token_counts": 53, "n_ast_nodes": 98, "n_identifiers": 9, "random_cut": "def test_bracketed_paste_sanitizer(self):\n from kitty.utils import sanitize_for_bracketed_paste\n for x " }, { "id": 11385, "commit_id": "ae6df58f80d20fe4d8a11dbd3927593f228e990f", "repo": "jina", "path": "tests/integration/reduce/test_reduce.py", "file_name": "test_reduce.py", "fun_name": "test_reduce_needs", "commit_message": "fix: remove return_results (#4347)", "code": "def test_reduce_needs():\n flow = (\n Flow(port_expose=exposed_port)\n .add(uses=Executor1, name='pod0')\n .add(uses=Executor2, needs='gateway', name='pod1')\n .add(uses=Executor3, needs='gateway', name='pod2')\n .add(needs=['pod0', 'pod1', 'pod2'], name='pod3')\n )\n\n with flow as f:\n da = DocumentArray([Document() for _ in range(5)])\n resp = Client(port=exposed_port, return_responses=True).post('/', inputs=da)\n\n assert len(resp[0].docs) == 5\n for doc in resp[0].docs:\n assert doc.text == 'exec1'\n assert doc.tags == {'a': 'b'}\n assert doc.modality == 'image'\n assert (doc.embedding == np.zeros(3)).all()\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 151, "n_words": 60, "vocab_size": 46, "complexity": 3, "nloc": 17, "token_counts": 176, "n_ast_nodes": 294, "n_identifiers": 34, "random_cut": "def test_reduce_needs():\n flow = (\n Flow(port_expose=exposed_port)\n .add(uses=Executor1, name='pod0')\n .add(uses=Executor2" }, { "id": 59154, "commit_id": "7092f0403a97154d3c3909e3fcd95e7db5776246", "repo": "prefect", "path": "tests/test_serializers.py", "file_name": "test_serializers.py", "fun_name": "test_picklelib_is_used", "commit_message": "Remove deep serialization from `PickleSerializer` and add tests (#7044)", "code": "def test_picklelib_is_used(self, monkeypatch):\n dumps = MagicMock(return_value=b\"test\")\n loads = MagicMock(return_value=\"test\")\n monkeypatch.setattr(\"pickle.dumps\", dumps)\n monkeypatch.setattr(\"pickle.loads\", loads)\n serializer = PickleSerializer(picklelib=\"pickle\")\n serializer.dumps(\"test\")\n dumps.assert_called_once_with(\"test\")\n serializer.loads(b\"test\")\n loads.assert_called_once_with(base64.decodebytes(b\"test\"))\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 82, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 10, "token_counts": 79, "n_ast_nodes": 140, "n_identifiers": 14, "random_cut": "def test_picklelib_is_used(self, monkeypatch):\n " }, { "id": 177098, "commit_id": "28f78cfa9a386620ee1179582fda1db5ffc59f84", "repo": "networkx", "path": "networkx/algorithms/tests/test_distance_measures.py", "file_name": "test_distance_measures.py", "fun_name": "test_bound_center_weight_attr", "commit_message": "Add weight distance metrics (#5305)\n\nAdds the weight keyword argument to allow users to compute weighted distance metrics\r\ne.g. diameter, eccentricity, periphery, etc. The kwarg works in the same fashion as the\r\nweight param for shortest paths - i.e. if a string, look up with edge attr by key, if callable,\r\ncompute the weight via the function. Default is None, meaning return unweighted result\r\nwhich is the current behavior.\r\n\r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Ross Barnowski ", "code": "def test_bound_center_weight_attr(self):\n result = {0}\n assert (\n set(nx.center(self.G, usebounds=True, weight=\"weight\"))\n == set(nx.center(self.G, usebounds=True, weight=\"cost\"))\n == result\n )\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 70, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 54, "n_ast_nodes": 84, "n_identifiers": 9, "random_cut": "def test_bound_center_weight_attr(self):\n result = {0}\n assert (\n set(nx.center(self.G, usebounds=True, weight=\"weight\"))\n == set(nx.ce" }, { "id": 40109, "commit_id": "5dfa6b0782803cb0635119ee1dcf8775dd76c8a7", "repo": "dash", "path": "dash/testing/browser.py", "file_name": "browser.py", "fun_name": "find_element", "commit_message": ":hocho: deprecated find_element(s)_by_css_selector", "code": "def find_element(self, selector):\n \n return self.driver.find_element(By.CSS_SELECTOR, selector)\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 6, "random_cut": "def find_element(self, selector):\n \n return self.driv" }, { "id": 304733, "commit_id": "635eda584dc8f932af235b72bb36ad76e74662f5", "repo": "core", "path": "tests/components/risco/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "test_error_on_login", "commit_message": "Support for local push in Risco integration (#75874)\n\n* Local config flow\r\n\r\n* Local entities\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Address code review comments\r\n\r\n* More type hints\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* More annotations\r\n\r\n* Even more annonations\r\n\r\n* New entity naming\r\n\r\n* Move fixtures to conftest\r\n\r\n* Improve state tests for local\r\n\r\n* Remove mutable default arguments\r\n\r\n* Remove assertions for lack of state\r\n\r\n* Add missing file\r\n\r\n* Switch setup to fixtures\r\n\r\n* Use error fixtures in test_config_flow\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "async def test_error_on_login(hass, login_with_error, cloud_config_entry):\n \n await hass.config_entries.async_setup(cloud_config_entry.entry_id)\n await hass.async_block_till_done()\n\n registry = er.async_get(hass)\n for id in ENTITY_IDS.values():\n assert not registry.async_is_registered(id)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 19, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 52, "n_ast_nodes": 87, "n_identifiers": 15, "random_cut": "async def test_error_on_login(hass, login_with_error, cloud_config_entry):\n \n await hass.config_entries.async_setup(cloud_config_entry.entry_id)\n await hass.async_block_till_done()\n\n registry = er.async_get(hass)\n for id in ENT" }, { "id": 244375, "commit_id": "9c5b3331ac8edbfa328922fbab45c382380da540", "repo": "mmdetection", "path": "mmdet/models/roi_heads/standard_roi_head.py", "file_name": "standard_roi_head.py", "fun_name": "aug_test", "commit_message": "Simplify api of one-stage detector", "code": "def aug_test(self, x, proposal_list, aug_batch_img_metas, rescale=False):\n \n det_bboxes, det_labels = self.aug_test_bboxes(x, aug_batch_img_metas,\n proposal_list,\n self.test_cfg)\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n aug_batch_img_metas[0][0]['scale_factor'])\n bbox_results = bbox2result(_det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n # det_bboxes always keep the original scale\n if self.with_mask:\n segm_results = self.aug_test_mask(x, aug_batch_img_metas,\n det_bboxes, det_labels)\n return [(bbox_results, segm_results)]\n else:\n return [bbox_results]\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 375, "n_words": 53, "vocab_size": 40, "complexity": 3, "nloc": 18, "token_counts": 120, "n_ast_nodes": 180, "n_identifiers": 20, "random_cut": "def aug_test(self, x, proposal_list, aug_batch_img_metas, rescale=False):\n \n det_bboxes, det_labels = self.aug_test_bboxes(x, aug_batch_img_metas,\n proposal_list,\n self.test_cfg)\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n aug_batch_img_metas[0][0]['scale_factor'])\n bbox_results = bbox2result(_det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n # det_bboxes always keep the original scale\n if self.with_mask:\n segm_results = self.aug_test_mask(x, aug_batch_img_metas,\n det_bboxes, det_labels)\n " }, { "id": 309854, "commit_id": "cb89c23c0ffd7beba1ecc0cb84d80e8842f9a571", "repo": "core", "path": "tests/components/zwave_js/test_init.py", "file_name": "test_init.py", "fun_name": "test_null_name", "commit_message": "Avoid removing zwave_js devices for non-ready nodes (#59964)\n\n* Only replace a node if the mfgr id / prod id / prod type differ\n\n* Prefer original device name for unready node\n\n* move register_node_in_dev_reg into async_setup_entry\n\n* simplify get_device_id_ext\n\n* Don't need hex ids\n\n* Revert \"move register_node_in_dev_reg into async_setup_entry\"\n\nThis reverts commit f900e5fb0c67cc81657a1452b51c313bccb6f9e1.\n\n* Revert Callable change\n\n* Revert device backup name\n\n* Add test fixtures\n\n* Update existing not ready test with new fixture data\n\n* Check device properties after node added event\n\n* Add entity check\n\n* Check for extended device id\n\n* better device info checks\n\n* Use receive_event to properly setup components\n\n* Cleanup tests\n\n* improve test_replace_different_node\n\n* improve test_replace_same_node\n\n* add test test_node_model_change\n\n* Clean up long comments and strings\n\n* Format\n\n* Reload integration to detect node device config changes\n\n* update assertions\n\n* Disable entities on \"value removed\" event\n\n* Disable node status sensor on node replacement\n\n* Add test for disabling entities on remove value event\n\n* Add test for disabling node status sensor on node replacement\n\n* disable entity -> remove entity\n\nCo-authored-by: Martin Hjelmare ", "code": "async def test_null_name(hass, client, null_name_check, integration):\n \n node = null_name_check\n assert hass.states.get(f\"switch.node_{node.node_id}\")\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 20, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 48, "n_identifiers": 9, "random_cut": "async def test_null_name(hass, client, null_name_check, integration):\n \n node = null_name_check\n assert hass.states.get(f\"switch.node_{node.node_id}\")\n\n" }, { "id": 214851, "commit_id": "5d210c14f5b903291cde509d34142c220c06de9e", "repo": "flair", "path": "tests/model_test_utils.py", "file_name": "model_test_utils.py", "fun_name": "build_model", "commit_message": "refactor sequence tagger", "code": "def build_model(self, embeddings, label_dict, **kwargs):\n model_args = dict(self.model_args)\n for k in kwargs.keys():\n if k in model_args:\n del model_args[k]\n return self.model_cls(\n embeddings=embeddings,\n label_dictionary=label_dict,\n label_type=self.train_label_type,\n **model_args,\n **kwargs,\n )\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 134, "n_words": 26, "vocab_size": 24, "complexity": 3, "nloc": 12, "token_counts": 65, "n_ast_nodes": 95, "n_identifiers": 13, "random_cut": "def build_model(self, embeddings, label_dict, **kwargs):\n model_args = dict(self.model_args)\n for k in kwargs.keys():\n if k in model_args:\n del model_args[k]\n return self.model_cls(\n embeddings=embeddings,\n label_dictionary=label_dict,\n label_type=self.train_label_type,\n **model_args,\n **kwargs,\n " }, { "id": 31932, "commit_id": "fbc7598babd06a49797db7142016f0029cdc41b2", "repo": "transformers", "path": "src/transformers/models/mobilevit/modeling_mobilevit.py", "file_name": "modeling_mobilevit.py", "fun_name": "_prune_heads", "commit_message": "add MobileViT model (#17354)\n\n* add MobileViT\r\n\r\n* fixup\r\n\r\n* Update README.md\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* remove empty line\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* use clearer variable names\r\n\r\n* rename to MobileViTTransformerLayer\r\n\r\n* no longer inherit from nn.Sequential\r\n\r\n* fixup\r\n\r\n* fixup\r\n\r\n* not sure why this got added twice\r\n\r\n* rename organization for checkpoints\r\n\r\n* fix it up\r\n\r\n* Update src/transformers/models/mobilevit/__init__.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mobilevit/configuration_mobilevit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mobilevit/configuration_mobilevit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mobilevit/configuration_mobilevit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update tests/models/mobilevit/test_modeling_mobilevit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mobilevit/modeling_mobilevit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mobilevit/modeling_mobilevit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mobilevit/modeling_mobilevit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mobilevit/modeling_mobilevit.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* code style improvements\r\n\r\n* fixup\r\n\r\n* Update docs/source/en/model_doc/mobilevit.mdx\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update docs/source/en/model_doc/mobilevit.mdx\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mobilevit/configuration_mobilevit.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/mobilevit/configuration_mobilevit.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* download labels from hub\r\n\r\n* rename layers\r\n\r\n* rename more layers\r\n\r\n* don't compute loss in separate function\r\n\r\n* remove some nn.Sequential\r\n\r\n* replace nn.Sequential with new MobileViTTransformer class\r\n\r\n* replace nn.Sequential with MobileViTMobileNetLayer\r\n\r\n* fix pruning since model structure changed\r\n\r\n* fixup\r\n\r\n* fix doc comment\r\n\r\n* remove custom resize from feature extractor\r\n\r\n* fix ONNX import\r\n\r\n* add to doc tests\r\n\r\n* use center_crop from image_utils\r\n\r\n* move RGB->BGR flipping into image_utils\r\n\r\n* fix broken tests\r\n\r\n* wrong type hint\r\n\r\n* small tweaks\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _prune_heads(self, heads_to_prune):\n \n for layer_index, heads in heads_to_prune.items():\n mobilevit_layer = self.encoder.layer[layer_index]\n if isinstance(mobilevit_layer, MobileViTLayer):\n for transformer_layer in mobilevit_layer.transformer.layer:\n transformer_layer.attention.prune_heads(heads)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 89, "n_words": 19, "vocab_size": 17, "complexity": 4, "nloc": 6, "token_counts": 54, "n_ast_nodes": 85, "n_identifiers": 15, "random_cut": "def _prune_heads(self, heads_to_prune):\n \n for layer_index, heads in heads_to_prune.items():\n mobilevit_layer = self.encoder.layer[lay" }, { "id": 62080, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/metadata.py", "file_name": "metadata.py", "fun_name": "provides", "commit_message": "upd; format", "code": "def provides(self, value):\n if self._legacy:\n self._legacy['Provides-Dist'] = value\n else:\n self._data['provides'] = value\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 47, "n_words": 12, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 51, "n_identifiers": 5, "random_cut": "def provides(self, value):\n if self._legacy:\n self._legacy['" }, { "id": 149782, "commit_id": "29c2d1d1891f7e804a133908702f435ff4fd8f32", "repo": "freqtrade", "path": "freqtrade/freqai/data_handler.py", "file_name": "data_handler.py", "fun_name": "compute_distances", "commit_message": "use logger in favor of print", "code": "def compute_distances(self) -> float:\n logger.info(\"computing average mean distance for all training points\")\n pairwise = pairwise_distances(self.data_dictionary[\"train_features\"], n_jobs=-1)\n avg_mean_dist = pairwise.mean(axis=1).mean()\n logger.info(\"avg_mean_dist\", avg_mean_dist)\n\n return avg_mean_dist\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 57, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 6, "token_counts": 53, "n_ast_nodes": 90, "n_identifiers": 12, "random_cut": "def compute_distances(self) -> float:\n logger.info(\"computing average mean distance for all training points\")\n pairwise = pairwise_distances(self.data_dictionary[\"train_features\"], n_jobs=-1)\n avg_mean_dist = pairwise.mean(axis=1).mean()\n logger.info(\"avg_mean_dist\", avg_mean_dist)\n\n return avg_mean_dist\n" }, { "id": 55379, "commit_id": "b9f2761989e5b324beb9a5b88688f9a75c50312b", "repo": "prefect", "path": "tests/fixtures/database.py", "file_name": "database.py", "fun_name": "block_schema", "commit_message": "Blocks Refactor (PrefectHQ/orion#1670)\n\n* Rename BlockSpec to BlockSchema\n* Renames API Block to Block Document", "code": "async def block_schema(session):\n block_schema = await models.block_schemas.create_block_schema(\n session=session,\n block_schema=schemas.core.BlockSchema(\n name=\"x\",\n version=\"1.0\",\n type=\"abc\",\n ),\n )\n await session.commit()\n return block_schema\n\n\n@pytest.fixture", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 83, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 11, "token_counts": 49, "n_ast_nodes": 89, "n_identifiers": 14, "random_cut": "async def block_schema(session):\n block_schema = await models.block_schemas.cr" }, { "id": 259355, "commit_id": "d76f87c8eb5a50da917cab8ea87ed0bfdfb7dd3c", "repo": "scikit-learn", "path": "sklearn/linear_model/tests/test_ridge.py", "file_name": "test_ridge.py", "fun_name": "test_ridge_fit_intercept_sparse_sag", "commit_message": "Fix Ridge sparse + sample_weight + intercept (#22899)\n\nCo-authored-by: Olivier Grisel ", "code": "def test_ridge_fit_intercept_sparse_sag(with_sample_weight, global_random_seed):\n X, y = _make_sparse_offset_regression(\n n_features=5, n_samples=20, random_state=global_random_seed, X_offset=5.0\n )\n if with_sample_weight:\n rng = np.random.RandomState(global_random_seed)\n sample_weight = 1.0 + rng.uniform(size=X.shape[0])\n else:\n sample_weight = None\n X_csr = sp.csr_matrix(X)\n\n params = dict(\n alpha=1.0, solver=\"sag\", fit_intercept=True, tol=1e-10, max_iter=100000\n )\n dense_ridge = Ridge(**params)\n sparse_ridge = Ridge(**params)\n dense_ridge.fit(X, y, sample_weight=sample_weight)\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", UserWarning)\n sparse_ridge.fit(X_csr, y, sample_weight=sample_weight)\n assert_allclose(dense_ridge.intercept_, sparse_ridge.intercept_, rtol=1e-4)\n assert_allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=1e-4)\n with pytest.warns(UserWarning, match='\"sag\" solver requires.*'):\n Ridge(solver=\"sag\").fit(X_csr, y)\n\n\n@pytest.mark.parametrize(\"return_intercept\", [False, True])\n@pytest.mark.parametrize(\"sample_weight\", [None, np.ones(1000)])\n@pytest.mark.parametrize(\"arr_type\", [np.array, sp.csr_matrix])\n@pytest.mark.parametrize(\n \"solver\", [\"auto\", \"sparse_cg\", \"cholesky\", \"lsqr\", \"sag\", \"saga\", \"lbfgs\"]\n)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"return_intercept\", [False, True])\n@pytest.mark.parametrize(\"sample_weight\", [None, np.ones(1000)])\n@pytest.mark.parametrize(\"arr_type\", [np.array, sp.csr_matrix])\n@pytest.mark.parametrize(\n \"solver\", [\"auto\", \"sparse_cg\", \"cholesky\", \"lsqr\", \"sag\", \"saga\", \"lbfgs\"]\n)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 181, "n_words": 86, "vocab_size": 71, "complexity": 2, "nloc": 23, "token_counts": 214, "n_ast_nodes": 447, "n_identifiers": 47, "random_cut": "def test_ridge_fit_intercept_sparse_sag(with_sample_weight, global_random_seed):\n X, y = _make_sparse_offset_regression(\n n_features=5, n_samples=20, random_state=global_random_seed, X_offset=5.0\n )\n if with_sample_weight:\n rng = np.random.RandomState(global_random_seed)\n sample_weight = 1.0 + rng.uniform(size=X.shape[0])\n else:\n sample_weight = None\n X_csr = sp.csr_matrix(X)\n\n params = dict(\n alpha=1.0, solver=\"sag\", fit_intercept=True, tol=1e-10, max_iter=100000\n )\n dense_ridge = Ridge(**params)\n sparse_ridge = Ridge(**params)\n dense_ridge.fit(X, y, sample_weight=sample_weight)\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", UserWarning)\n sparse_ridge.fit(X_csr, y, sample_weight=sample_weight" }, { "id": 206638, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/encoding.py", "file_name": "encoding.py", "fun_name": "force_bytes", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def force_bytes(s, encoding=\"utf-8\", strings_only=False, errors=\"strict\"):\n \n # Handle the common case first for performance reasons.\n if isinstance(s, bytes):\n if encoding == \"utf-8\":\n return s\n else:\n return s.decode(\"utf-8\", errors).encode(encoding, errors)\n if strings_only and is_protected_type(s):\n return s\n if isinstance(s, memoryview):\n return bytes(s)\n return str(s).encode(encoding, errors)\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 110, "n_words": 42, "vocab_size": 32, "complexity": 6, "nloc": 11, "token_counts": 86, "n_ast_nodes": 141, "n_identifiers": 12, "random_cut": "def force_bytes(s, encoding=\"utf-8\", strings_only=False, errors=\"strict\"):\n \n # Handle the common case first for performance reasons.\n if isinstance(s, bytes):\n if encoding == \"utf-8\":\n " }, { "id": 163813, "commit_id": "49bddad8b16d7c881a3440340035b1b83854e55e", "repo": "pandas", "path": "pandas/tests/util/test_assert_frame_equal.py", "file_name": "test_assert_frame_equal.py", "fun_name": "test_assert_frame_equal_checking_allow_dups_flag", "commit_message": "REGR: check_flags not respected in assert_frame_equal (#45565)", "code": "def test_assert_frame_equal_checking_allow_dups_flag():\n # GH#45554\n left = DataFrame([[1, 2], [3, 4]])\n left.flags.allows_duplicate_labels = False\n\n right = DataFrame([[1, 2], [3, 4]])\n right.flags.allows_duplicate_labels = True\n tm.assert_frame_equal(left, right, check_flags=False)\n\n with pytest.raises(AssertionError, match=\"allows_duplicate_labels\"):\n tm.assert_frame_equal(left, right, check_flags=True)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 31, "vocab_size": 22, "complexity": 1, "nloc": 8, "token_counts": 90, "n_ast_nodes": 137, "n_identifiers": 13, "random_cut": "def test_assert_frame_equal_checking_allow_dups_flag():\n # GH#45554\n left = DataFrame([[1, 2], [3, 4]])\n left.flags.allows_duplicate_labels = False\n\n right = DataFrame([[1, 2], [3, 4]])\n right.flags.allows_duplicate_labels = True\n tm.assert_" }, { "id": 322196, "commit_id": "621357338437ee420eabbbf5ab19065bc85e73a5", "repo": "PaddleNLP", "path": "paddlenlp/taskflow/knowledge_mining.py", "file_name": "knowledge_mining.py", "fun_name": "_load_task_resources", "commit_message": "Update neural search readme and Add Paddle Serving Support (#1558)\n\n* add recall inference similarity\r\n\r\n* update examples\r\n\r\n* updatea readme\r\n\r\n* update dir name\r\n\r\n* update neural search readme\r\n\r\n* update milvus readme\r\n\r\n* update domain adaptive pretraining readme\r\n\r\n* fix the mistakes\r\n\r\n* update readme\r\n\r\n* add recall Paddle Serving Support\r\n\r\n* update readme\r\n\r\n* update readme and format the code\r\n\r\n* reformat the files\r\n\r\n* move the files\r\n\r\n* reformat the code\r\n\r\n* remove redundant code\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: tianxin ", "code": "def _load_task_resources(self):\n if self._tag_path is None:\n self._tag_path = os.path.join(self._task_path, \"tags.txt\")\n self._tags_to_index, self._index_to_tags, self._all_tags = self._load_labels(\n self._tag_path)\n if self._term_schema_path is None:\n self._term_schema_path = os.path.join(self._task_path,\n \"termtree_type.csv\")\n if self._term_data_path is None:\n self._term_data_path = os.path.join(self._task_path,\n \"termtree_data\")\n if self._linking is True:\n self._termtree = TermTree.from_dir(\n self._term_schema_path, self._term_data_path, self._linking)\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 242, "n_words": 42, "vocab_size": 25, "complexity": 5, "nloc": 14, "token_counts": 121, "n_ast_nodes": 190, "n_identifiers": 17, "random_cut": "def _load_task_resources(self):\n if self._tag_path is None:\n self._tag_path = os.path.join(self._task_path, \"tags.txt\")\n self._tags_to_index, self._index_to_tags, self._all_tags = self._load_labels(\n self._tag_path)\n if self._term_schema_path is None:\n self._term_schema_path = os.path.join(self._task_path,\n \"termtree_type.csv\")\n if self._term_data_path is None:\n self._term_data_path = os.path.join(self._task_path,\n \"termtree_data\")\n if self._linking" }, { "id": 299414, "commit_id": "a9ca774e7ed1d8fe502a53d5b765c1d9b393a524", "repo": "core", "path": "tests/components/insteon/test_api_device.py", "file_name": "test_api_device.py", "fun_name": "test_cancel_add_device", "commit_message": "Insteon Device Control Panel (#70834)\n\nCo-authored-by: Paulus Schoutsen ", "code": "async def test_cancel_add_device(hass, hass_ws_client):\n \n\n ws_client, devices, _, _ = await _async_setup(hass, hass_ws_client)\n\n with patch.object(insteon.api.aldb, \"devices\", devices):\n await ws_client.send_json(\n {\n ID: 2,\n TYPE: \"insteon/device/add/cancel\",\n }\n )\n msg = await ws_client.receive_json()\n assert msg[\"success\"]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 120, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 11, "token_counts": 68, "n_ast_nodes": 113, "n_identifiers": 17, "random_cut": "async def test_cancel_add_device(hass, hass_ws_client):\n \n\n ws_client, devices, _, _ = await _async_setup(hass, hass_ws_client)\n\n with patch.object(insteon.api.aldb, \"dev" }, { "id": 250255, "commit_id": "652d1669c5a103b1c20478770c4aaf18849c09a3", "repo": "synapse", "path": "tests/handlers/test_appservice.py", "file_name": "test_appservice.py", "fun_name": "test_application_services_receive_local_to_device", "commit_message": "Add missing type hints to tests.handlers. (#14680)\n\nAnd do not allow untyped defs in tests.handlers.", "code": "def test_application_services_receive_local_to_device(self) -> None:\n \n interested_appservice = self._register_application_service(\n namespaces={\n ApplicationService.NS_USERS: [\n {\n \"regex\": \"@exclusive_as_user:.+\",\n \"exclusive\": True,\n }\n ],\n },\n )\n\n # Have local_user send a to-device message to exclusive_as_user\n message_content = {\"some_key\": \"some really interesting value\"}\n chan = self.make_request(\n \"PUT\",\n \"/_matrix/client/r0/sendToDevice/m.room_key_request/3\",\n content={\n \"messages\": {\n self.exclusive_as_user: {\n self.exclusive_as_user_device_id: message_content\n }\n }\n },\n access_token=self.local_user_token,\n )\n self.assertEqual(chan.code, 200, chan.result)\n\n # Have exclusive_as_user send a to-device message to local_user\n chan = self.make_request(\n \"PUT\",\n \"/_matrix/client/r0/sendToDevice/m.room_key_request/4\",\n content={\n \"messages\": {\n self.local_user: {self.local_user_device_id: message_content}\n }\n },\n access_token=self.exclusive_as_user_token,\n )\n self.assertEqual(chan.code, 200, chan.result)\n\n # Check if our application service - that is interested in exclusive_as_user - received\n # the to-device message as part of an AS transaction.\n # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS.\n #\n # The uninterested application service should not have been notified at all.\n self.send_mock.assert_called_once()\n (\n service,\n _events,\n _ephemeral,\n to_device_messages,\n _otks,\n _fbks,\n _device_list_summary,\n ) = self.send_mock.call_args[0]\n\n # Assert that this was the same to-device message that local_user sent\n self.assertEqual(service, interested_appservice)\n self.assertEqual(to_device_messages[0][\"type\"], \"m.room_key_request\")\n self.assertEqual(to_device_messages[0][\"sender\"], self.local_user)\n\n # Additional fields 'to_user_id' and 'to_device_id' specifically for\n # to-device messages via the AS API\n self.assertEqual(to_device_messages[0][\"to_user_id\"], self.exclusive_as_user)\n self.assertEqual(\n to_device_messages[0][\"to_device_id\"], self.exclusive_as_user_device_id\n )\n self.assertEqual(to_device_messages[0][\"content\"], message_content)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 871, "n_words": 187, "vocab_size": 119, "complexity": 1, "nloc": 59, "token_counts": 262, "n_ast_nodes": 423, "n_identifiers": 31, "random_cut": "def test_application_services_receive_local_to_device(self) -> None:\n \n interested_appservice = self._register_application_service(\n namespaces={\n ApplicationService.NS_USERS: [\n {\n \"regex\": \"@exclusive_as_user:.+\",\n \"exclusive\": True,\n }\n ],\n },\n )\n\n # Have local_user send a to-device message to exclusive_as_user\n message_content = {\"some_key\": \"some really interesting value\"}\n chan = self.make_request(\n \"PUT\",\n \"/_matrix/client/r0/sendToDevice/m.room_key_request/3\",\n content={\n \"messages\": {\n self.exclusive_as_user: {\n self.exclusive_as_user_device_id: message_content\n }\n }\n },\n access_token=self.local_user_token,\n )\n self.assertEqual(chan.code, 200, chan.result)\n\n # Have exclusive_as_user send a to-device message to local_user\n chan = self.make_request(\n \"PUT\",\n \"/_matrix/client/r0/sendToDevice/m.room_key_request/4\",\n content={\n \"messages\": {\n self.local_user: {self.local_user_device_id: message_content}\n }\n },\n access_token=self.exclusive_as_user_token,\n )\n self.assertEqual(chan.code, 200, chan.result)\n\n # Check if our application service - that is interested in exclusive_as_user - received\n # the to-device message as part of an AS transaction.\n # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS.\n #\n # The uninterested application service should not have been notified at all.\n self.send_mock.assert_called_once()\n (\n service,\n _events,\n _ephemeral,\n to_device_messages,\n _otks,\n _fbks,\n _device_list_summary,\n ) = self.send_mock.call_args[0]\n\n # Assert that this was the same to-device message that local_user sent\n self.assertEqual(service, interested_appservice)\n self.assertEqual(to_device_messages[0][\"type\"], \"m.room_key_request\")\n s" }, { "id": 69242, "commit_id": "ff5cad1cd617a23d6ffc9903f29d713a8db8d949", "repo": "erpnext", "path": "erpnext/assets/doctype/asset/depreciation.py", "file_name": "depreciation.py", "fun_name": "reset_depreciation_schedule", "commit_message": "fix: calculate depreciation properly on asset sale entry and scrap entry", "code": "def reset_depreciation_schedule(asset, date):\n\tasset.flags.ignore_validate_update_after_submit = True\n\n\t# recreate original depreciation schedule of the asset\n\tasset.prepare_depreciation_data(date_of_return=date)\n\n\tmodify_depreciation_schedule_for_asset_repairs(asset)\n\tasset.save()\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 11, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 31, "n_ast_nodes": 52, "n_identifiers": 9, "random_cut": "def reset_depreciation_schedule(asset, date):\n\tasset.flags.ignore_validate_update_after_submit = True\n\n\t# recreate original depreciation sche" }, { "id": 243073, "commit_id": "e2158344a0b4b4016a39dcf40c7220aa77b60579", "repo": "Pillow", "path": "Tests/test_imagefont.py", "file_name": "test_imagefont.py", "fun_name": "test_multiline_width", "commit_message": "update test_imagefont to use textbbox", "code": "def test_multiline_width(self):\n ttf = self.get_font()\n im = Image.new(mode=\"RGB\", size=(300, 100))\n draw = ImageDraw.Draw(im)\n\n assert (\n draw.textbbox((0, 0), \"longest line\", font=ttf)[2]\n == draw.multiline_textbbox((0, 0), \"longest line\\nline\", font=ttf)[2]\n )\n with pytest.warns(DeprecationWarning) as log:\n assert (\n draw.textsize(\"longest line\", font=ttf)[0]\n == draw.multiline_textsize(\"longest line\\nline\", font=ttf)[0]\n )\n assert len(log) == 2\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 167, "n_words": 45, "vocab_size": 31, "complexity": 1, "nloc": 14, "token_counts": 127, "n_ast_nodes": 202, "n_identifiers": 22, "random_cut": "def test_multiline_width(self):\n ttf = self.get_font()\n im = Image.new(mode=\"RGB\", size=(300, 100))\n draw = ImageDraw.Draw(im)\n\n assert (\n " }, { "id": 320228, "commit_id": "4333bd58cfeec5c613a8b9b5d3a3b713964f5c8e", "repo": "paperless-ngx", "path": "src/paperless/serialisers.py", "file_name": "serialisers.py", "fun_name": "get_permissions", "commit_message": "feat: add users and groups API routes", "code": "def get_permissions(self, obj):\n # obj.get_user_permissions() returns more permissions than desired\n permission_natural_keys = []\n permissions = obj.user_permissions.all()\n for permission in permissions:\n permission_natural_keys.append(\n permission.natural_key()[1] + \".\" + permission.natural_key()[0],\n )\n return permission_natural_keys\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 100, "n_words": 29, "vocab_size": 25, "complexity": 2, "nloc": 8, "token_counts": 52, "n_ast_nodes": 85, "n_identifiers": 10, "random_cut": "def get_permissions(self, obj):\n # obj.get_user_permissions() returns more perm" }, { "id": 46779, "commit_id": "4ffd4f09532fceb67675fce4c1f5cd383eff992e", "repo": "airflow", "path": "dev/breeze/src/airflow_breeze/shell/shell_params.py", "file_name": "shell_params.py", "fun_name": "md5sum_cache_dir", "commit_message": "Prepare Breeze2 for prime time :) (#22713)\n\nThis is a review and clean-up for all the parameters and\r\ncommands for Breeze2 in order to prepare it for being\r\nused by the contribugors.\r\n\r\nThere are various small fixes here and there, removal\r\nof duplicated code, refactoring and moving code around\r\nas well as cleanup and review all the parameters used\r\nfor all implemented commands.\r\n\r\nThe parameters, default values and their behaviours were\r\nupdated to match \"new\" life of Breeze rather than old\r\none.\r\n\r\nSome improvements are made to the autocomplete and\r\nclick help messages printed. Full list of choices is\r\nalways displayed, parameters are groups according to\r\ntheir target audience, and they were sorted according\r\nto importance and frequency of use.\r\n\r\nVarious messages have been colourised according to their\r\nmeaning - warnings as yellow, errors as red and\r\ninformational messages as bright_blue.\r\n\r\nThe `dry-run` option has been added to just show what\r\nwould have been run without actually running some\r\npotentially \"write\" commands (read commands are still\r\nexecuted) so that you can easily verify and manually\r\ncopy and execute the commands with option to modify\r\nthem before. The `dry_run` and `verbose` options are\r\nnow used for all commands.\r\n\r\nThe \"main\" command now runs \"shell\" by default similarly\r\nas the original Breeze.\r\n\r\nAll \"shortcut\" parameters have been standardized - i.e\r\ncommon options (verbose/dry run/help) have one and all\r\ncommon flags that are likely to be used often have an\r\nassigned shortcute.\r\n\r\nThe \"stop\" and \"cleanup\" command have been added\r\nas they are necessary for average user to complete the\r\nregular usage cycle.\r\n\r\nDocumentation for all the important methods have been\r\nupdated.", "code": "def md5sum_cache_dir(self) -> Path:\n cache_dir = Path(BUILD_CACHE_DIR, self.airflow_branch, self.python, self.the_image_type)\n return cache_dir\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 40, "n_identifiers": 8, "random_cut": "def md5sum_cache_dir(self) -> Path:\n cache_dir = Path(BUILD_CACHE_DIR, self.airflow_branch, self.python, self.the_image_type)\n r" }, { "id": 37492, "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_tokenizers", "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", "code": "def require_tokenizers(test_case):\n \n return unittest.skipUnless(is_tokenizers_available(), \"test requires tokenizers\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "random_cut": "def require_tokenizers(test_case):\n \n return unittest.skipUnless(is_tokenizers_available(), \"test requires tokenizers\")(test_case)\n\n" }, { "id": 187901, "commit_id": "d82184af1d8dfddd5e4ddcf4ee5f141e2e398d5e", "repo": "streamlink", "path": "src/streamlink/stream/ffmpegmux.py", "file_name": "ffmpegmux.py", "fun_name": "command", "commit_message": "stream.ffmpegmux: validate FFmpeg version\n\nand log FFmpeg version output on the debug logging level", "code": "def command(cls, session):\n with _lock_resolve_command:\n return cls._resolve_command(\n session.options.get(\"ffmpeg-ffmpeg\"),\n not session.options.get(\"ffmpeg-no-validation\"),\n )\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 69, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 6, "token_counts": 35, "n_ast_nodes": 60, "n_identifiers": 7, "random_cut": "def command(cls, session):\n with _lock_resolve_command:\n " }, { "id": 162176, "commit_id": "be1f331f2103e6c89c8d25e47e1b445072b498dd", "repo": "yt-dlp", "path": "yt_dlp/extractor/tiktok.py", "file_name": "tiktok.py", "fun_name": "_parse_aweme_video_app", "commit_message": "[TikTok] Misc fixes (#2271)\n\nCloses #2265\r\nAuthored by: MinePlayersPE", "code": "def _parse_aweme_video_app(self, aweme_detail):\n aweme_id = aweme_detail['aweme_id']\n video_info = aweme_detail['video']\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 9, "vocab_size": 8, "complexity": 20, "nloc": 103, "token_counts": 838, "n_ast_nodes": 33, "n_identifiers": 5, "random_cut": "def _parse_aweme_video_app(self, aweme_detail):\n aweme_id = aweme_detail['aweme_id']\n video_info = aweme_detail['video']\n" }, { "id": 257021, "commit_id": "834f8c49024063ce17a63e50a9d7cff12f1c4f91", "repo": "haystack", "path": "test/test_file_converter.py", "file_name": "test_file_converter.py", "fun_name": "test_convert", "commit_message": "Change return types of indexing pipeline nodes (#2342)\n\n* Change return types of file converters\r\n\r\n* Change return types of preprocessor\r\n\r\n* Change return types of crawler\r\n\r\n* Adapt utils to functions to new return types\r\n\r\n* Adapt __init__.py to new method names\r\n\r\n* Prevent circular imports\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Let DocStores' run method accept Documents\r\n\r\n* Adapt tests to new return types\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Put \"# type: ignore\" to right place\r\n\r\n* Remove id_hash_keys property from Document primitive\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Adapt tests to new return types and missing id_hash_keys property\r\n\r\n* Fix mypy\r\n\r\n* Fix mypy\r\n\r\n* Adapt PDFToTextOCRConverter\r\n\r\n* Remove id_hash_keys from RestAPI tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Rename tests\r\n\r\n* Remove redundant setting of content_type=\"text\"\r\n\r\n* Add DeprecationWarning\r\n\r\n* Add id_hash_keys to elasticsearch_index_to_document_store\r\n\r\n* Change document type from dict to Docuemnt in PreProcessor test\r\n\r\n* Fix file path in Tutorial 5\r\n\r\n* Remove added output in Tutorial 5\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix file_paths in Tutorial 9 + fix gz files in fetch_archive_from_http\r\n\r\n* Adapt tutorials to new return types\r\n\r\n* Adapt tutorial 14 to new return types\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Change assertions to HaystackErrors\r\n\r\n* Import HaystackError correctly\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_convert(Converter):\n converter = Converter()\n document = converter.convert(file_path=SAMPLES_PATH / \"pdf\" / \"sample_pdf_1.pdf\")[0]\n pages = document.content.split(\"\\f\")\n assert len(pages) == 4 # the sample PDF file has four pages.\n assert pages[0] != \"\" # the page 1 of PDF contains text.\n assert pages[2] == \"\" # the page 3 of PDF file is empty.\n # assert text is retained from the document.\n # As whitespace can differ (\\n,\" \", etc.), we standardize all to simple whitespace\n page_standard_whitespace = \" \".join(pages[0].split())\n assert \"Adobe Systems made the PDF specification available free of charge in 1993.\" in page_standard_whitespace\n\n\n@pytest.mark.tika\n@pytest.mark.parametrize(\"Converter\", [PDFToTextConverter, TikaConverter])", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.tika\n@pytest.mark.parametrize(\"Converter\", [PDFToTextConverter, TikaConverter])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 127, "n_words": 97, "vocab_size": 68, "complexity": 1, "nloc": 9, "token_counts": 77, "n_ast_nodes": 174, "n_identifiers": 19, "random_cut": "def test_convert(Converter):\n converter = Converter()\n document = converter.convert(file_path=SAMPLES_PATH / \"pdf\" / \"sample_pdf_1.pdf\")[0]\n pages = document.content.split(\"\\f\")\n assert len(pages) == 4 # the sample PDF file has four pages.\n assert pages[0] != \"\" # t" }, { "id": 83356, "commit_id": "21cd1c10b3f12467f8f7d9b98b0589f31c2da852", "repo": "zulip", "path": "analytics/tests/test_counts.py", "file_name": "test_counts.py", "fun_name": "test_bad_fill_to_time", "commit_message": "docs: Add missing space in “time zone”.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_bad_fill_to_time(self) -> None:\n stat = self.make_dummy_count_stat(\"test stat\")\n with self.assertRaises(ValueError):\n process_count_stat(stat, installation_epoch() + 65 * self.MINUTE)\n with self.assertRaises(TimeZoneNotUTCException):\n process_count_stat(stat, installation_epoch().replace(tzinfo=None))\n\n # This tests the LoggingCountStat branch of the code in do_delete_counts_at_hour.\n # It is important that do_delete_counts_at_hour not delete any of the collected\n # logging data!", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 97, "n_words": 46, "vocab_size": 39, "complexity": 1, "nloc": 6, "token_counts": 60, "n_ast_nodes": 106, "n_identifiers": 12, "random_cut": "def test_bad_fill_to_time(self) -> None:\n stat = self.make_dummy_count_stat(\"test stat\")\n with self.assertRaises(ValueError):\n process_count_stat(stat, installation_epoch() + 65 * self.MINUTE)\n with se" }, { "id": 60220, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/examples/pycaffe/tools.py", "file_name": "tools.py", "fun_name": "deprocess", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def deprocess(self, im):\n \n im = im.transpose(1, 2, 0)\n im /= self.scale\n im += self.mean\n im = im[:, :, ::-1] # change to RGB\n\n return np.uint8(im)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 68, "n_words": 25, "vocab_size": 21, "complexity": 1, "nloc": 6, "token_counts": 50, "n_ast_nodes": 80, "n_identifiers": 8, "random_cut": "def deprocess(self, im):\n \n im = im.transpose(1, 2, 0)\n im /= self.scale\n im += self.mean\n im = im[:, :, ::-1" }, { "id": 274628, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/metrics/base_metric.py", "file_name": "base_metric.py", "fun_name": "update_state", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def update_state(self, y_true, y_pred, sample_weight=None):\n \n y_true = tf.cast(y_true, self._dtype)\n y_pred = tf.cast(y_pred, self._dtype)\n [\n y_true,\n y_pred,\n ], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values(\n [y_true, y_pred], sample_weight\n )\n y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(\n y_pred, y_true\n )\n\n ag_fn = tf.__internal__.autograph.tf_convert(\n self._fn, tf.__internal__.autograph.control_status_ctx()\n )\n matches = ag_fn(y_true, y_pred, **self._fn_kwargs)\n return super().update_state(matches, sample_weight=sample_weight)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 184, "n_words": 45, "vocab_size": 29, "complexity": 1, "nloc": 17, "token_counts": 121, "n_ast_nodes": 181, "n_identifiers": 21, "random_cut": "def update_state(self, y_true, y_pred, sample_weight=None):\n \n y_true = tf.cast(y_true, self._dtype)\n y_pred = tf.cast(y_pred, self._dtype)\n [\n y_true,\n y_pred,\n ], sample_weight = metric" }, { "id": 281015, "commit_id": "8f8147c3af76f03223943fe630a94dfb326b13c7", "repo": "OpenBBTerminal", "path": "tests/gamestonk_terminal/stocks/screener/test_yahoofinance_view.py", "file_name": "test_yahoofinance_view.py", "fun_name": "test_historical_no_d_signals", "commit_message": "Tests : Stocks > Research + Screener (#1131)\n\n* Updating tests : stocks/research\r\n\r\n* Updating tests : stocks/screener\r\n\r\n* Updating tests : stocks/screener", "code": "def test_historical_no_d_signals(mocker):\n # FORCE SINGLE THREADING\n yf_download = yahoofinance_view.yf.download\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 14, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 32, "token_counts": 146, "n_ast_nodes": 21, "n_identifiers": 6, "random_cut": "def test_historical_no_d_signals(mocker):\n # FORCE SINGLE THREADING\n yf_download = yahoofinance_view.yf.download\n" }, { "id": 46158, "commit_id": "3452f7ce45607af04bade5e5edebaa18fdc13819", "repo": "airflow", "path": "airflow/utils/db.py", "file_name": "db.py", "fun_name": "print_happy_cat", "commit_message": "Enhance `db upgrade` args (#22102)\n\nMake `db upgrade` args more like `db downgrade`.\r\n\r\n```\r\nusage: airflow db upgrade [-h] [--from-revision FROM_REVISION] [--from-version FROM_VERSION] [-r REVISION]\r\n [-s] [-n VERSION]\r\n\r\nUpgrade the schema of the metadata database. To print but not execute commands, use option ``--show-sql-only``. If using options ``--from-revision`` or ``--from-version``, you must also use ``--show-sql-only``, because if actually *running* migrations, we should only migrate from the *current* revision.\r\n\r\noptional arguments:\r\n -h, --help show this help message and exit\r\n --from-revision FROM_REVISION\r\n (Optional) If generating sql, may supply a *from* revision\r\n --from-version FROM_VERSION\r\n (Optional) If generating sql, may supply a *from* version\r\n -r REVISION, --revision REVISION\r\n (Optional) The airflow revision to upgrade to. Note: must provide either `--revision` or `--version`.\r\n -s, --show-sql-only Don't actually run migrations; just print out sql scripts for offline migration. Required if using either `--from-version` or `--from-version`.\r\n -n VERSION, --version VERSION\r\n (Optional) The airflow version to upgrade to. Note: must provide either `--revision` or `--version`.\r\n```", "code": "def print_happy_cat(message):\n if sys.stdout.isatty():\n size = os.get_terminal_size().columns\n else:\n size = 0\n print(message.center(size))\n print(.center(size))\n print(.center(size))\n print(.center(size))\n print(.center(size))\n return\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 17, "vocab_size": 12, "complexity": 2, "nloc": 11, "token_counts": 74, "n_ast_nodes": 135, "n_identifiers": 11, "random_cut": "def print_happy_cat(message):\n if sy" }, { "id": 99994, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_teams.py", "file_name": "test_organization_teams.py", "fun_name": "test_missing_permission", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_missing_permission(self):\n user = self.create_user()\n self.login_as(user=user)\n\n self.get_error_response(self.organization.slug, status_code=403)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 28, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 55, "n_identifiers": 9, "random_cut": "def test_missing_permission(self):\n user = self.create_user()\n self.login_as" }, { "id": 294227, "commit_id": "83983bc875445d7147cb98e70f1214c6ed270da9", "repo": "core", "path": "homeassistant/components/motion_blinds/cover.py", "file_name": "cover.py", "fun_name": "set_cover_position", "commit_message": "Motion request update till stop (#68580)\n\n* update untill stop\r\n\r\n* fixes\r\n\r\n* fix spelling", "code": "def set_cover_position(self, **kwargs):\n \n position = kwargs[ATTR_POSITION]\n self._blind.Set_position(100 - position)\n self.request_position_till_stop()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 38, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "def set_cover_position(self, **kwargs):\n \n position = kwargs[ATTR_POSITION]\n self._blind.Set_position(100 - position)\n se" }, { "id": 73473, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/forms.py", "file_name": "forms.py", "fun_name": "media", "commit_message": "Reformat with black", "code": "def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailsettings/js/site-switcher.js\"),\n ]\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 58, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 6, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 6, "random_cut": "def media(self):\n return forms.Media(\n js=[\n versioned_static(\"wagtailsettings/js/site-switcher.js\"),\n ]\n )\n" }, { "id": 12410, "commit_id": "0a8a4fa6d9aeddc2a1271b7db16c8cac8b66b2b5", "repo": "jina", "path": "tests/unit/orchestrate/flow/flow-construct/test_flow.py", "file_name": "test_flow.py", "fun_name": "test_dry_run_with_two_pathways_diverging_at_non_gateway", "commit_message": "test: fix tests because join disappeared (#4832)", "code": "def test_dry_run_with_two_pathways_diverging_at_non_gateway():\n f = (\n Flow()\n .add(name='r1')\n .add(name='r2')\n .add(name='r3', needs='r1')\n .needs(['r2', 'r3'])\n )\n\n with f:\n _validate_flow(f)\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 66, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 10, "token_counts": 52, "n_ast_nodes": 97, "n_identifiers": 7, "random_cut": "def test_dry_run_with_two_pathways_diverging_at_non_gateway():\n f = (\n Flow()\n " }, { "id": 223594, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "fold", "commit_message": "add python 3.10.4 for windows", "code": "def fold(self, policy):\n # message-id tokens may not be folded.\n return str(self) + policy.linesep\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 26, "n_identifiers": 5, "random_cut": "def fold(self, policy):\n # message-id tokens may not be folded.\n " }, { "id": 255848, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/shape_inference_test.py", "file_name": "shape_inference_test.py", "fun_name": "test_einsum_sum_along_dim", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_einsum_sum_along_dim(self) -> None:\n graph = self._make_graph(\n [('x', TensorProto.FLOAT, (3, 4))],\n [make_node('Einsum', ['x'], ['y'], equation='i j->i ')],\n [],)\n self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, ))]) # type: ignore\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 73, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 6, "token_counts": 74, "n_ast_nodes": 117, "n_identifiers": 10, "random_cut": "def test_einsum_sum_along_dim(self) -> None:\n graph = self._make_graph(\n [('x', TensorProto.FLOAT, (3, 4))],\n [make_node('Einsum', ['x'], ['y'], equation='i j->i ')],\n [],)\n self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto." }, { "id": 167746, "commit_id": "f65417656ba8c59438d832b6e2a431f78d40c21c", "repo": "pandas", "path": "pandas/core/computation/ops.py", "file_name": "ops.py", "fun_name": "__call__", "commit_message": "TYP: more return annotations in core/ (#47618)\n\n* TYP: more return annotations in core/\r\n\r\n* from __future__ import annotations\r\n\r\n* more __future__", "code": "def __call__(self, env) -> MathCall:\n operand = self.operand(env)\n # error: Cannot call function of unknown type\n return self.func(operand) # type: ignore[operator]\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 42, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 40, "n_identifiers": 6, "random_cut": "def __call__(self, env) -> MathCall:\n operand = self.operand(env)\n # error: Cannot call function of unknown type\n return self.func(operand) #" }, { "id": 258086, "commit_id": "b694c7b5cbf612926fea3b0bf79ac9b12b136a2e", "repo": "haystack", "path": "test/document_stores/test_base.py", "file_name": "test_base.py", "fun_name": "test_get_all_documents_with_incorrect_filter_value", "commit_message": "Document Store test refactoring (#3449)\n\n* add new marker\r\n\r\n* start using test hierarchies\r\n\r\n* move ES tests into their own class\r\n\r\n* refactor test workflow\r\n\r\n* job steps\r\n\r\n* add more tests\r\n\r\n* move more tests\r\n\r\n* more tests\r\n\r\n* test labels\r\n\r\n* add more tests\r\n\r\n* Update tests.yml\r\n\r\n* Update tests.yml\r\n\r\n* fix\r\n\r\n* typo\r\n\r\n* fix es image tag\r\n\r\n* map es ports\r\n\r\n* try\r\n\r\n* fix\r\n\r\n* default port\r\n\r\n* remove opensearch from the markers sorcery\r\n\r\n* revert\r\n\r\n* skip new tests in old jobs\r\n\r\n* skip opensearch_faiss", "code": "def test_get_all_documents_with_incorrect_filter_value(self, ds, documents):\n ds.write_documents(documents)\n result = ds.get_all_documents(filters={\"year\": [\"nope\"]})\n assert len(result) == 0\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 33, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 38, "n_ast_nodes": 63, "n_identifiers": 9, "random_cut": "def test_get_all_documents_with_incorrect_filter_value(self, ds, documents):\n ds.write_documents(documents)\n result = ds.get_all_documents(filt" }, { "id": 279744, "commit_id": "e3e3a428f0a7955040c8a8fb8b2ad6f3e16d29eb", "repo": "keras", "path": "keras/saving/experimental/serialization_lib_test.py", "file_name": "serialization_lib_test.py", "fun_name": "test_simple_objects", "commit_message": "Remaster serialization logic.\n\nThere were several significant flaws, most prominently:\n\n- We had 2 separate serialization systems partially overlapping and interacting with each other: the JSON encoder/decoder one, and serialize/deserialize_keras_objects. The new system is fully standalone.\n- We ignored objects passed via `custom_objects` most of the time.\n\nPiperOrigin-RevId: 473794783", "code": "def test_simple_objects(self, obj):\n serialized, _, reserialized = self.roundtrip(obj)\n self.assertEqual(serialized, reserialized)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 41, "n_identifiers": 8, "random_cut": "def test_simple_objects(self, obj):\n serialized, _, reserialized = self.roundtrip(obj)\n self.assertEqual(s" }, { "id": 114433, "commit_id": "e641c0c6b79558388d5f0d019fd9015f0ed17f8f", "repo": "mindsdb", "path": "tests/integration_tests/flows/test_http.py", "file_name": "test_http.py", "fun_name": "test_7_utils", "commit_message": "test file upload", "code": "def test_7_utils(self):\n \n\n response = requests.get(f'{root}/util/ping')\n assert response.status_code == 200\n\n response = requests.get(f'{root}/util/ping_native')\n assert response.status_code == 200\n\n response = requests.get(f'{root}/config/vars')\n assert response.status_code == 200\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 72, "n_words": 23, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 97, "n_identifiers": 7, "random_cut": "def test_7_utils(self):\n \n\n response = requests.get(f'{root}/util/ping')\n assert response.status_code == 200\n\n response = requests.get(f'{root}/util/ping_native')\n assert response.status_code == 200\n\n res" }, { "id": 300937, "commit_id": "a4c1bcefb9d2a6f2aa0bc189fca496d46c78e3b0", "repo": "core", "path": "tests/components/recorder/test_util.py", "file_name": "test_util.py", "fun_name": "test_setup_connection_for_dialect_sqlite", "commit_message": "Tune sqlite based on configured settings (#72016)", "code": "def test_setup_connection_for_dialect_sqlite(sqlite_version, db_supports_row_number):\n \n instance_mock = MagicMock(_db_supports_row_number=True)\n execute_args = []\n close_mock = MagicMock()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 24, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 22, "token_counts": 143, "n_ast_nodes": 44, "n_identifiers": 8, "random_cut": "def test_setup_connection_for_dialect_sqlite(sqlite_version, db_supports_row_number):\n \n instance_mock = MagicMock(_db_supports_row_number=True)\n execute_args = []\n close_mock = MagicMock()\n" }, { "id": 41830, "commit_id": "fefd94023aa2238a6971a4cbe3a37362e3205bc6", "repo": "seaborn", "path": "seaborn/_marks/lines.py", "file_name": "lines.py", "fun_name": "_plot", "commit_message": "Differentiate Line/Path and add Lines/Paths alternatives (#2822)\n\n* Add lines module and differentiate Path/Line\r\n\r\n* Add markers to Line/Path and add Lines/Paths\r\n\r\n* Implement unstatisfying but workable approach to keep_na\r\n\r\n* Add tests for Line(s)/Path(s)\r\n\r\n* Add backcompat for matplotlib<3.3.0", "code": "def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n if self._sort:\n data = data.sort_values(orient)\n\n line = mpl.lines.Line2D(\n data[\"x\"].to_numpy(),\n data[\"y\"].to_numpy(),\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **self.artist_kws,\n )\n ax.add_line(line)\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 375, "n_words": 64, "vocab_size": 49, "complexity": 4, "nloc": 24, "token_counts": 222, "n_ast_nodes": 352, "n_identifiers": 33, "random_cut": "def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fil" }, { "id": 19847, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/commands/completion.py", "file_name": "completion.py", "fun_name": "add_options", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"--bash\",\n \"-b\",\n action=\"store_const\",\n const=\"bash\",\n dest=\"shell\",\n help=\"Emit completion code for bash\",\n )\n self.cmd_opts.add_option(\n \"--zsh\",\n \"-z\",\n action=\"store_const\",\n const=\"zsh\",\n dest=\"shell\",\n help=\"Emit completion code for zsh\",\n )\n self.cmd_opts.add_option(\n \"--fish\",\n \"-f\",\n action=\"store_const\",\n const=\"fish\",\n dest=\"shell\",\n help=\"Emit completion code for fish\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 288, "n_words": 42, "vocab_size": 26, "complexity": 1, "nloc": 26, "token_counts": 100, "n_ast_nodes": 174, "n_identifiers": 10, "random_cut": "def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"--bash\",\n \"-b\",\n action=\"store_const\",\n const=\"bash\",\n dest=\"shell\",\n help=\"Emit completion code for bash\",\n )\n self.cmd_opts.add_option(\n \"--zsh\",\n \"-z\",\n action=\"store_const\",\n const=\"zsh\",\n dest=\"shell\",\n help=\"Emit completion code for zsh\",\n )\n self.cmd_opts.add_option(\n \"--fish\",\n \"-f\",\n action=\"store_const\",\n const=\"fish\",\n dest=\"shell\",\n help=\"Emit completion co" }, { "id": 286794, "commit_id": "2ef3f86b835f31d71c4349d97fdd4bd1dadc2736", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_optimization/po_view.py", "file_name": "po_view.py", "fun_name": "display_heat", "commit_message": "Portfolio optimization controller/sdk fixes (#3604)\n\n* fix plot and show\r\n\r\n* clean duplicated code\r\n\r\n* fix msg\r\n\r\n* fix if no portfolios\r\n\r\n* improve error msg\r\n\r\n* fix msg and add integration test\r\n\r\n* final fixes\r\n\r\n* Portfolio/po | alloc : fix paths\r\n\r\n* Linting\r\n\r\nCo-authored-by: Chavithra PARANA ", "code": "def display_heat(**kwargs):\n\n weights = kwargs.get(\"weights\", None)\n data = kwargs.get(\"data\", None)\n category = kwargs.get(\"category\", None)\n title = kwargs.get(\"title\", \"\")\n external_axes = kwargs.get(\"external_axes\", None)\n\n if len(weights) == 1:\n console.print(f\"Heatmap needs at least two values for '{category}'.\")\n return\n\n if external_axes is None:\n _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n else:\n ax = external_axes[0]\n\n if len(weights) <= 3:\n number_of_clusters = len(weights)\n else:\n number_of_clusters = None\n\n ax = rp.plot_clusters(\n returns=data,\n codependence=\"pearson\",\n linkage=\"ward\",\n k=number_of_clusters,\n max_k=10,\n leaf_order=True,\n dendrogram=True,\n cmap=\"RdYlBu\",\n # linecolor='tab:purple',\n ax=ax,\n )\n\n ax = ax.get_figure().axes\n ax[0].grid(False)\n ax[0].axis(\"off\")\n\n if category is None:\n # Vertical dendrogram\n l, b, w, h = ax[4].get_position().bounds\n l1 = l * 0.5\n w1 = w * 0.2\n b1 = h * 0.05\n ax[4].set_position([l - l1, b + b1, w * 0.8, h * 0.95])\n # Heatmap\n l, b, w, h = ax[1].get_position().bounds\n ax[1].set_position([l - l1 - w1, b + b1, w * 0.8, h * 0.95])\n w2 = w * 0.2\n # colorbar\n l, b, w, h = ax[2].get_position().bounds\n ax[2].set_position([l - l1 - w1 - w2, b, w, h])\n # Horizontal dendrogram\n l, b, w, h = ax[3].get_position().bounds\n ax[3].set_position([l - l1 - w1, b, w * 0.8, h])\n else:\n # Vertical dendrogram\n l, b, w, h = ax[4].get_position().bounds\n l1 = l * 0.5\n w1 = w * 0.4\n b1 = h * 0.2\n ax[4].set_position([l - l1, b + b1, w * 0.6, h * 0.8])\n # Heatmap\n l, b, w, h = ax[1].get_position().bounds\n ax[1].set_position([l - l1 - w1, b + b1, w * 0.6, h * 0.8])\n w2 = w * 0.05\n # colorbar\n l, b, w, h = ax[2].get_position().bounds\n ax[2].set_position([l - l1 - w1 - w2, b, w, h])\n # Horizontal dendrogram\n l, b, w, h = ax[3].get_position().bounds\n ax[3].set_position([l - l1 - w1, b, w * 0.6, h])\n\n title = \"Portfolio - \" + title + \"\\n\"\n title += ax[3].get_title(loc=\"left\")\n ax[3].set_title(title)\n\n if external_axes is None:\n theme.visualize_output(force_tight_layout=True)\n\n\n@log_start_end(log=logger)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@log_start_end(log=logger)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 707, "n_words": 303, "vocab_size": 104, "complexity": 6, "nloc": 62, "token_counts": 657, "n_ast_nodes": 970, "n_identifiers": 54, "random_cut": "def display_heat(**kwargs):\n\n weights = kwargs.get(\"weights\", None)\n data = kwargs.get(\"data\", None)\n category = kwargs.get(\"category\", None)\n title = kwargs.get(\"title\", \"\")\n external_axes = kwargs.get(\"external_axes\", None)\n\n if len(weights) == 1:\n console.print(f\"Heatmap needs at least two values for '{category}'.\")\n return\n\n if external_axes is None:\n _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n else:\n ax = external_axes[0]\n\n if len(weights) <= 3:\n number_of_clusters = len(weights)\n else:\n number_of_clusters = None\n\n ax = rp.plot_clusters(\n returns=data,\n codependence=\"pearson\",\n linkage=\"ward\",\n k=number_of_clusters,\n max_k=10,\n leaf_order=True,\n dendrogram=True,\n cmap=\"RdYlBu\",\n # linecolor='tab:purple',\n ax=ax,\n )\n\n ax = ax.get_figure().axes\n ax[0].grid(False)\n ax[0].axis(\"off\")\n\n if category is None:\n # Vertical de" }, { "id": 10560, "commit_id": "cea300655ed8be70d74c390ca12e8b09fb741665", "repo": "jina", "path": "jina/jaml/parsers/__init__.py", "file_name": "__init__.py", "fun_name": "_get_flow_parser", "commit_message": "refactor: use absolute imports (#4167)", "code": "def _get_flow_parser():\n from jina.jaml.parsers.flow.legacy import LegacyParser\n from jina.jaml.parsers.flow.v1 import V1Parser\n\n return [V1Parser, LegacyParser], V1Parser\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 14, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 36, "n_ast_nodes": 49, "n_identifiers": 9, "random_cut": "def _get_flow_parser():\n from jina.jaml.parsers.flow.legacy import LegacyParser\n from jina.jaml.parsers.flow.v1 import V1Parser\n\n return [V1Parser, LegacyParser], V" }, { "id": 71603, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_edit_page.py", "file_name": "test_edit_page.py", "fun_name": "_create_page", "commit_message": "Reformat with black", "code": "def _create_page(self, parent):\n response = self.client.post(\n reverse(\"wagtailadmin_pages:add\", args=(\"tests\", \"simplepage\", parent.pk)),\n {\n \"title\": \"Hello, world!\",\n \"content\": \"Some content\",\n \"slug\": \"hello-world\",\n \"action-publish\": \"publish\",\n },\n follow=True,\n )\n self.assertRedirects(\n response, reverse(\"wagtailadmin_explore\", args=(parent.pk,))\n )\n page = SimplePage.objects.get()\n self.assertTrue(page.live)\n return response, page\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 199, "n_words": 36, "vocab_size": 32, "complexity": 1, "nloc": 17, "token_counts": 97, "n_ast_nodes": 164, "n_identifiers": 17, "random_cut": "def _create_page(self, parent):\n response = self.client.post(\n reverse(\"wagtailadmin_pages:add\", args=(\"tests\", \"simplepage\", parent.pk)),\n {\n \"title\": \"Hello, world!\",\n \"content\": \"Some content\",\n \"slug" }, { "id": 29853, "commit_id": "decd505f55d02c616ce5b804c06a71e120d15c15", "repo": "saleor", "path": "saleor/graphql/plugins/schema.py", "file_name": "schema.py", "fun_name": "resolve_plugin", "commit_message": "Add plugin manager promise (#11414)", "code": "def resolve_plugin(_root, info, manager, **data):\n return resolve_plugin(data.get(\"id\"), manager)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 24, "n_ast_nodes": 37, "n_identifiers": 6, "random_cut": "def resolve_plugin(_root, info, manager, **data):\n " }, { "id": 257779, "commit_id": "92046ce5b54ddd0cc21ee98bff30ba507ec9d054", "repo": "haystack", "path": "test/document_stores/test_opensearch.py", "file_name": "test_opensearch.py", "fun_name": "test__create_document_index_no_index_no_mapping_faiss", "commit_message": "feat: FAISS in OpenSearch: Support HNSW for dot product and l2 (#3029)\n\n* support faiss hnsw\r\n\r\n* blacken\r\n\r\n* update docs\r\n\r\n* improve similarity check\r\n\r\n* add tests\r\n\r\n* update schema\r\n\r\n* set ef_search param correctly\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Agnieszka Marzec <97166305+agnieszka-m@users.noreply.github.com>\r\n\r\n* regenerate docs\r\n\r\nCo-authored-by: Massimiliano Pippi \r\nCo-authored-by: Agnieszka Marzec <97166305+agnieszka-m@users.noreply.github.com>", "code": "def test__create_document_index_no_index_no_mapping_faiss(self, mocked_document_store):\n mocked_document_store.client.indices.exists.return_value = False\n mocked_document_store.knn_engine = \"faiss\"\n mocked_document_store._create_document_index(self.index_name)\n _, kwargs = mocked_document_store.client.indices.create.call_args\n assert kwargs[\"body\"] == {\n \"mappings\": {\n \"dynamic_templates\": [\n {\"strings\": {\"mapping\": {\"type\": \"keyword\"}, \"match_mapping_type\": \"string\", \"path_match\": \"*\"}}\n ],\n \"properties\": {\n \"content\": {\"type\": \"text\"},\n \"embedding\": {\n \"dimension\": 768,\n \"method\": {\n \"engine\": \"faiss\",\n \"name\": \"hnsw\",\n \"parameters\": {\"ef_construction\": 512, \"m\": 16},\n \"space_type\": \"innerproduct\",\n },\n \"type\": \"knn_vector\",\n },\n \"name\": {\"type\": \"keyword\"},\n },\n },\n \"settings\": {\"analysis\": {\"analyzer\": {\"default\": {\"type\": \"standard\"}}}, \"index\": {\"knn\": True}},\n }\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 501, "n_words": 72, "vocab_size": 58, "complexity": 1, "nloc": 27, "token_counts": 174, "n_ast_nodes": 330, "n_identifiers": 14, "random_cut": "def test__create_document_index_no_index_no_mapping_faiss(self, mocked_document_store):\n mocked_document_store.client.indices.exists.return_value = False\n mocked_document_store.knn_engine = \"faiss\"\n mocked_document_store._create_document_index(self.index_name)\n _, kwargs = mocked_document_store.client.indices.create.call_args\n assert kwargs[\"body\"] == {\n \"mappings\": {\n \"dynamic_templates\": [\n {\"strings\": {\"mapping\": {\"type\": \"keyword\"}, \"match_mapping_type\": \"string\", \"path_match\": \"*\"}}\n ],\n \"properties\": {\n \"content\": {\"type\": \"text\"},\n \"embedding\": {\n \"dimension\": 768,\n \"method\": {\n \"engine\": \"faiss\",\n \"name\": \"hnsw\",\n \"parameters\": {\"ef_construction\": 512, \"m\": 16},\n \"space" }, { "id": 200959, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/aggregation_regress/tests.py", "file_name": "tests.py", "fun_name": "test_annotation_with_value", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_annotation_with_value(self):\n values = (\n Book.objects.filter(\n name=\"Practical Django Projects\",\n )\n .annotate(\n discount_price=F(\"price\") * 2,\n )\n .values(\n \"discount_price\",\n )\n .annotate(sum_discount=Sum(\"discount_price\"))\n )\n self.assertSequenceEqual(\n values,\n [{\"discount_price\": Decimal(\"59.38\"), \"sum_discount\": Decimal(\"59.38\")}],\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 198, "n_words": 27, "vocab_size": 23, "complexity": 1, "nloc": 17, "token_counts": 74, "n_ast_nodes": 130, "n_identifiers": 14, "random_cut": "def test_annotation_with_value(self):\n values = (\n Book.objects.filter(\n name=\"Practical Django Projects\",\n )\n .annotate(\n discount_price=F(\"price\") * 2,\n )\n .values(\n \"discount_price\",\n )\n .annotate(sum_discount=Sum(\"discount_price\"))\n )\n self.assertSequenceEqual(\n values,\n [{\"discount_pric" }, { "id": 258135, "commit_id": "2bb81331b75aec68de0d45c4cb116170d265f1fe", "repo": "haystack", "path": "test/document_stores/test_base.py", "file_name": "test_base.py", "fun_name": "test_in_filters", "commit_message": "feat: add SQLDocumentStore tests (#3517)\n\n* port SQL tests\r\n\r\n* cleanup document_store_tests.py from sql tests\r\n\r\n* leftover\r\n\r\n* Update .github/workflows/tests.yml\r\n\r\nCo-authored-by: Sara Zan \r\n\r\n* review comments\r\n\r\n* Update test/document_stores/test_base.py\r\n\r\nCo-authored-by: bogdankostic \r\n\r\nCo-authored-by: Sara Zan \r\nCo-authored-by: bogdankostic ", "code": "def test_in_filters(self, ds, documents):\n ds.write_documents(documents)\n\n result = ds.get_all_documents(filters={\"year\": {\"$in\": [\"2020\", \"2021\", \"n.a.\"]}})\n assert len(result) == 6\n result = ds.get_all_documents(filters={\"year\": [\"2020\", \"2021\", \"n.a.\"]})\n assert len(result) == 6\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 60, "n_words": 26, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 73, "n_ast_nodes": 127, "n_identifiers": 9, "random_cut": "def test_in_filters(self, ds, documents):\n ds.write_documents(documents)\n\n result = ds.get_all_documents(filters={\"year\": {\"$in\": [\"2020\", \"2021\", \"n.a.\"]}})\n assert len(result) == 6\n result = ds.get_all_documents(filters={\"year\": [\"2020\", \"2021\"" }, { "id": 73588, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/table_block/tests.py", "file_name": "tests.py", "fun_name": "test_empty_table_block_is_not_rendered", "commit_message": "Reformat with black", "code": "def test_empty_table_block_is_not_rendered(self):\n \n value = None\n block = TableBlock()\n result = block.render(value)\n expected = \"\"\n\n self.assertHTMLEqual(result, expected)\n self.assertNotIn(\"None\", result)\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 67, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 41, "n_ast_nodes": 73, "n_identifiers": 10, "random_cut": "def test_empty_table_block_is_not_rendered(self):\n \n value = None\n block = TableBlock()\n result = block.render(value)\n expected = \"\"\n\n self.assertHTMLEqual(result, expected)\n self.assertNotIn(\"None\", result)\n\n" }, { "id": 115336, "commit_id": "acc5b7c65d4f0356f7c68e343ef43735f2f33593", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/mongodb_handler/mongodb_handler.py", "file_name": "mongodb_handler.py", "fun_name": "flatten", "commit_message": "fixes", "code": "def flatten(self, row, level=0):\n # move sub-keys to upper level\n # TODO is disabled now\n\n if level <= 0:\n return row\n\n add = {}\n del_keys = []\n for k, v in row.items():\n if isinstance(v, dict):\n for k2, v2 in self.flatten(v, level=level - 1).items():\n add[f'{k}.{k2}'] = v2\n del_keys.append(k)\n if add:\n row.update(add)\n for key in del_keys:\n del row[key]\n\n return row\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 213, "n_words": 58, "vocab_size": 45, "complexity": 7, "nloc": 15, "token_counts": 101, "n_ast_nodes": 166, "n_identifiers": 16, "random_cut": "def flatten(self, row, level=0):\n # move sub-keys to upper level\n # TODO is disabled now\n\n if level <= 0:\n return row\n\n add = {}\n del_keys = []\n for k, v in row.items():\n if isinstance(v, dict):\n for k2, v2 in self.flatten(v, level=level - 1).items():\n add[f'{k}.{k2}'] = v2\n del_keys.ap" }, { "id": 175535, "commit_id": "42a64c03ec5c443f2a5c2ee4284622f5d1f5326c", "repo": "cpython", "path": "Lib/test/test_enum.py", "file_name": "test_enum.py", "fun_name": "test_format", "commit_message": "Revert \"bpo-40066: [Enum] update str() and format() output (GH-30582)\" (GH-30632)\n\nThis reverts commit acf7403f9baea3ae1119fc6b4a3298522188bf96.", "code": "def test_format(self):\n Perm = self.Perm\n self.assertEqual(format(Perm.R, ''), 'R')\n self.assertEqual(format(Perm.R | Perm.X, ''), 'R|X')\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 33, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 44, "n_ast_nodes": 75, "n_identifiers": 7, "random_cut": "def test_format(self):\n Perm = self.Perm\n self.assertE" }, { "id": 132962, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/client/server/server_stubs.py", "file_name": "server_stubs.py", "fun_name": "__reduce__", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def __reduce__(self):\n remote_obj = self.get_remote_obj()\n if remote_obj is None:\n return (self.__class__, (self.client_id, self.id))\n return (identity, (remote_obj,))\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 42, "n_ast_nodes": 63, "n_identifiers": 8, "random_cut": "def __reduce__(self):\n remote_obj = self.get_remote_obj()\n if remote_obj is None:\n return (self.__class__, (self.client_id, self.id))\n return (identi" }, { "id": 289594, "commit_id": "b35cfe711a7032bc1e41b685ea180277abc99edb", "repo": "core", "path": "homeassistant/components/google_travel_time/config_flow.py", "file_name": "config_flow.py", "fun_name": "async_step_user", "commit_message": "Move default option handling to config_flow for google_travel_time (#80607)\n\nMove default option handling to config_flow", "code": "async def async_step_user(self, user_input=None) -> FlowResult:\n \n errors = {}\n user_input = user_input or {}\n if user_input:\n try:\n await self.hass.async_add_executor_job(\n validate_config_entry,\n self.hass,\n user_input[CONF_API_KEY],\n user_input[CONF_ORIGIN],\n user_input[CONF_DESTINATION],\n )\n return self.async_create_entry(\n title=user_input.get(CONF_NAME, DEFAULT_NAME),\n data=user_input,\n options=default_options(self.hass),\n )\n except InvalidApiKeyException:\n errors[\"base\"] = \"invalid_auth\"\n except UnknownException:\n errors[\"base\"] = \"cannot_connect\"\n\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {\n vol.Required(\n CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME)\n ): cv.string,\n vol.Required(CONF_API_KEY): cv.string,\n vol.Required(CONF_DESTINATION): cv.string,\n vol.Required(CONF_ORIGIN): cv.string,\n }\n ),\n errors=errors,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 572, "n_words": 63, "vocab_size": 50, "complexity": 5, "nloc": 36, "token_counts": 183, "n_ast_nodes": 282, "n_identifiers": 30, "random_cut": "async def async_step_user(self, user_input=None) -> FlowResult:\n \n errors = {}\n user_input = user_input or {}\n if user_input:\n try:\n await self.hass.async_add_executor_job(\n validate_config_entry,\n self.hass,\n user_input[CONF_API_KEY],\n user_input[CONF_ORIGIN],\n user_input[CONF_DESTINATION],\n )\n return self.async_create_entry(\n title=user_input.get(CONF_NAME, DEFAULT_NAME),\n data=user_input,\n options=default_options(self.hass),\n )\n except InvalidApiKeyException:\n errors[\"base\"] = \"invalid_auth\"\n except UnknownException:\n errors[\"base\"] = \"cannot_connect\"\n\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {\n vol.Required(\n CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME)\n ): cv.string,\n vol.Required(CONF_API_KEY): cv.string,\n vol.Required(CONF_DESTINATION): cv.string,\n vol.Required(CONF_ORIGIN): cv.string,\n }\n )" }, { "id": 25845, "commit_id": "a4f2c7976dae1f9608b1bc130e497d558169848f", "repo": "saleor", "path": "saleor/checkout/tests/test_checkout.py", "file_name": "test_checkout.py", "fun_name": "test_change_address_in_checkout_to_same", "commit_message": "Fix incorrect handling of unavailable products in checkout (#9058)\n\n* Fix incorrect handling of unavailable products in CheckoutComplete\r\n\r\n* Fix incorrect handling of unavailable products in CheckoutPaymentCreate\r\n\r\n* Refactor fetch_checkout_lines methods - return list of correct lines and invalid variant ids\r\n\r\n* Raise validation error when completing checkout with empty lines\r\n\r\n* Raise ValidationError when creating payment for checkout with empty lines", "code": "def test_change_address_in_checkout_to_same(checkout, address):\n checkout.shipping_address = address\n checkout.billing_address = address.get_copy()\n checkout.save(update_fields=[\"shipping_address\", \"billing_address\"])\n shipping_address_id = checkout.shipping_address.id\n billing_address_id = checkout.billing_address.id\n\n manager = get_plugins_manager()\n lines, _ = fetch_checkout_lines(checkout)\n checkout_info = fetch_checkout_info(checkout, lines, [], manager)\n change_shipping_address_in_checkout(\n checkout_info,\n address,\n lines,\n [],\n manager,\n checkout.channel.shipping_method_listings.all(),\n )\n change_billing_address_in_checkout(checkout, address)\n\n checkout.refresh_from_db()\n assert checkout.shipping_address.id == shipping_address_id\n assert checkout.billing_address.id == billing_address_id\n assert checkout_info.shipping_address == address\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 139, "n_words": 53, "vocab_size": 35, "complexity": 1, "nloc": 22, "token_counts": 130, "n_ast_nodes": 202, "n_identifiers": 24, "random_cut": "def test_change_address_in_checkout_to_same(checkout, address):\n checkout.shipping_address = address\n checkout.billing_address = address.get_copy()\n checkout.save(update_fields=[\"shipping_address\", \"billing_addres" }, { "id": 337220, "commit_id": "a0995e1ccb81cea86a065d05c520112b156079d8", "repo": "accelerate", "path": "src/accelerate/accelerator.py", "file_name": "accelerator.py", "fun_name": "prepare_model", "commit_message": "make deepspeed optimizer match parameters of passed optimizer (#246)\n\n* make deepspeed optimizer match parameters of passed optimizer, instead of all model parameters\r\n\r\n* style\r\n\r\nCo-authored-by: Jack Hessel ", "code": "def prepare_model(self, model):\n if self.device_placement:\n model = model.to(self.device)\n if self.distributed_type == DistributedType.MULTI_GPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[self.local_process_index], output_device=self.local_process_index, **kwargs\n )\n elif self.distributed_type == DistributedType.MULTI_CPU:\n kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}\n model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)\n if self.native_amp:\n model.forward = torch.cuda.amp.autocast()(model.forward)\n model.forward = convert_outputs_to_fp32(model.forward)\n return model\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 195, "n_words": 58, "vocab_size": 34, "complexity": 7, "nloc": 15, "token_counts": 157, "n_ast_nodes": 242, "n_identifiers": 26, "random_cut": "def prepare_model(self, model):\n if self.device_placement:\n model = mo" }, { "id": 55646, "commit_id": "c0a02e64ffd641513a757a6676b6ecdabba91158", "repo": "prefect", "path": "tests/orion/models/test_flow_run_alert_policies.py", "file_name": "test_flow_run_alert_policies.py", "fun_name": "failed_policy", "commit_message": "Running on postgres", "code": "async def failed_policy(session, notifier_block):\n policy = await models.flow_run_alert_policies.create_flow_run_alert_policy(\n session=session,\n flow_run_alert_policy=schemas.core.FlowRunAlertPolicy(\n name=\"My Success Policy\",\n state_names=[\"Failed\"],\n tags=[],\n block_document_id=notifier_block.id,\n ),\n )\n await session.commit()\n return policy\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 98, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 12, "token_counts": 60, "n_ast_nodes": 95, "n_identifiers": 17, "random_cut": "async def failed_policy(session, notifier_block):\n policy = await models.flow" }, { "id": 197219, "commit_id": "b27e2b44626d138bd6ea235fbf114644baa5b144", "repo": "sympy", "path": "sympy/functions/combinatorial/numbers.py", "file_name": "numbers.py", "fun_name": "divides", "commit_message": "Deprecate redundant static methods", "code": "def divides(p, n):\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.11\",\n active_deprecations_target='deprecated-carmichael-static-methods',\n )\n return n % p == 0\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 9, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 6, "random_cut": "def divides(p, n):\n sympy_deprecation_warning(\n ,\n " }, { "id": 221182, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bz2.py", "file_name": "bz2.py", "fun_name": "readinto", "commit_message": "add python 3.10.4 for windows", "code": "def readinto(self, b):\n \n self._check_can_read()\n return self._buffer.readinto(b)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 38, "n_identifiers": 5, "random_cut": "def readinto(self, b):\n \n self._check_can_read()\n return self._buffer.r" }, { "id": 269177, "commit_id": "f6a1bda81886a721413eb21a12fcbd69b3f14dfa", "repo": "keras", "path": "keras/utils/dataset_utils.py", "file_name": "dataset_utils.py", "fun_name": "_convert_dataset_to_list", "commit_message": "update dataset_utils.py", "code": "def _convert_dataset_to_list(dataset,data_size_warning_flag = True):\n \n # TODO prakashsellathurai: add failing test cases for list of tuples,tuples of nd array\n # TODO prakashsellathurai: add support for Batched and unbatched tf datasets\n if isinstance(dataset,tuple):\n if len(dataset) == 0:\n raise ValueError('`dataset` must be a non-empty list/tuple of'\n ' numpy.ndarrays or tf.data.Dataset objects.')\n dataset_iterator = list(zip(*dataset))\n elif isinstance(dataset,list):\n if len(dataset) == 0:\n raise ValueError('`dataset` must be a non-empty list/tuple of'\n ' numpy.ndarrays or tf.data.Dataset objects.')\n if isinstance(dataset[0],np.ndarray):\n dataset_iterator = list(zip(*dataset))\n else:\n dataset_iterator = list(dataset)\n\n elif isinstance(dataset,np.ndarray):\n dataset_iterator = list(dataset)\n elif isinstance(dataset,tf.data.Dataset):\n dataset_iterator = list(dataset)\n else:\n raise TypeError('`dataset` must be either a tf.data.Dataset object'\n f' or a list/tuple of arrays. Received : {type(dataset)}'\n )\n \n dataset_as_list = []\n try:\n dataset_iterator = iter(dataset_iterator)\n first_datum = next(dataset_iterator)\n dataset_as_list.append(first_datum)\n except ValueError:\n raise ValueError('Received an empty Dataset i.e dataset with no elements. '\n '`dataset` must be a non-empty list/tuple of'\n ' numpy.ndarrays or tf.data.Dataset objects.')\n \n \n \n start_time = time.time()\n for i,datum in enumerate(dataset_iterator):\n if data_size_warning_flag:\n if i % 10 == 0:\n cur_time = time.time()\n # warns user if the dataset is too large to iterate within 10s\n if int(cur_time - start_time) > 10 and data_size_warning_flag:\n warnings.warn('Takes too long time to process the `dataset`,'\n 'this function is only for small datasets '\n '(e.g. < 10,000 samples).'\n )\n data_size_warning_flag = False\n \n dataset_as_list.append(datum)\n \n return dataset_as_list\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 531, "n_words": 210, "vocab_size": 119, "complexity": 14, "nloc": 44, "token_counts": 237, "n_ast_nodes": 420, "n_identifiers": 31, "random_cut": "def _convert_dataset_to_list(dataset,data_size_warning_flag = True):\n \n # TODO prakashsellathurai: add failing test cases for list of tuples,tuples of nd array\n # TODO prakashsellathurai: add support for Batched and unbatched tf datasets\n if isinstance(dataset,tuple):\n if len(dataset) == 0:\n raise ValueError('`dataset` must be a non-empty list/tuple of'\n ' numpy.ndarrays or tf.data.Dataset objects.')\n dataset_iterator = list(zip(*dataset))\n elif isinstance(dataset,list):\n if len(dataset) == 0:\n raise ValueError('`dataset` must be a non-empty list/tuple of'\n ' numpy.ndarrays or tf.data.Dataset objects.')\n if isinstance(dataset[0],np.ndarray):\n dataset_iterator = list(zip(*dataset))\n else:\n dataset_iterator = list(dataset)\n\n elif isinstance(dataset,np.ndarray):\n dataset_iterator = list(dataset)\n elif isinstance(dataset,tf.data.Dataset):\n dataset_iterator = list(dataset)\n else:\n raise TypeError('`dataset` must be either a tf.data.Dataset object'\n f' or a list/tuple of arrays. Received : {type(dataset)}'\n )\n \n dataset_as_list = []\n try:\n dataset_iterator = iter(dataset_iterator)\n first_datum = next(dataset_iterator)\n dataset_as_list.append(first_datum)\n except ValueError:\n raise ValueError('Received an empty Dataset i.e dataset with no elements. '\n '`dataset` must be a" }, { "id": 112699, "commit_id": "1896212902bd8d1fa11c5df6c8e1fe38ae4b5392", "repo": "nni", "path": "nni/retiarii/nn/pytorch/api.py", "file_name": "api.py", "fun_name": "__repr__", "commit_message": "Add license header and typehints for NAS (#4774)", "code": "def __repr__(self) -> str:\n reprs = []\n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX) and not isinstance(arg, ValueChoice):\n reprs.append('(' + repr(arg) + ')') # add parenthesis for operator priority\n else:\n reprs.append(repr(arg))\n return self.repr_template.format(*reprs)\n\n # the following are a series of methods to create \"ValueChoiceX\"\n # which is a transformed version of value choice\n # https://docs.python.org/3/reference/datamodel.html#special-method-names\n\n # Special operators that can be useful in place of built-in conditional operators.", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 153, "n_words": 68, "vocab_size": 57, "complexity": 4, "nloc": 8, "token_counts": 68, "n_ast_nodes": 118, "n_identifiers": 13, "random_cut": "def __repr__(self) -> str:\n reprs = []\n for arg in self.arguments:\n if isinstance(arg, ValueChoiceX) and not isinstance(arg, ValueChoice):\n reprs.append('(' + repr(arg) + ')') # add parenthesis for operator priority\n else:\n reprs.append(repr(arg))\n return self.repr_template.format(*reprs)\n\n # the following are a series of methods to create \"ValueChoiceX\"\n # which is a transformed version of value choice\n # https://docs.python.org/3/reference/datamodel.html#special-method-names\n\n # Spe" }, { "id": 106220, "commit_id": "96f87aaa3b34d80bc72097a7475d8093849091fc", "repo": "youtube-dl", "path": "youtube_dl/jsinterp.py", "file_name": "jsinterp.py", "fun_name": "__iter__", "commit_message": "Back-port JS interpreter upgrade from yt-dlp PR #1437", "code": "def __iter__(self):\n for scope in self.stack:\n for scope_item in iter(scope):\n yield scope_item\n", "url": "https://github.com/ytdl-org/youtube-dl.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 12, "vocab_size": 9, "complexity": 3, "nloc": 4, "token_counts": 22, "n_ast_nodes": 35, "n_identifiers": 6, "random_cut": "def __iter__(self):\n for scope in self.stack:\n for scope_item in iter(scope):\n yield scope_item\n" }, { "id": 162324, "commit_id": "135dfa2c7ebc9284db940713c0dc6cbc19ca5fa4", "repo": "yt-dlp", "path": "yt_dlp/extractor/tiktok.py", "file_name": "tiktok.py", "fun_name": "_real_extract", "commit_message": "[extractor,cleanup] Use `_search_nextjs_data`", "code": "def _real_extract(self, url):\n video_id = self._match_id(url)\n\n try:\n return self._extract_aweme_app(video_id)\n except ExtractorError as e:\n self.report_warning(f'{e}; Retrying with webpage')\n\n # If we only call once, we get a 403 when downlaoding the video.\n self._download_webpage(url, video_id)\n webpage = self._download_webpage(url, video_id, note='Downloading video webpage')\n next_data = self._search_nextjs_data(webpage, video_id, default='{}')\n\n if next_data:\n status = traverse_obj(next_data, ('props', 'pageProps', 'statusCode'), expected_type=int) or 0\n video_data = traverse_obj(next_data, ('props', 'pageProps', 'itemInfo', 'itemStruct'), expected_type=dict)\n else:\n sigi_json = self._search_regex(\n r'>\\s*window\\[[\\'\"]SIGI_STATE[\\'\"]\\]\\s*=\\s*(?P{.+});',\n webpage, 'sigi data', group='sigi_state')\n sigi_data = self._parse_json(sigi_json, video_id)\n status = traverse_obj(sigi_data, ('VideoPage', 'statusCode'), expected_type=int) or 0\n video_data = traverse_obj(sigi_data, ('ItemModule', video_id), expected_type=dict)\n\n if status == 0:\n return self._parse_aweme_video_web(video_data, url)\n elif status == 10216:\n raise ExtractorError('This video is private', expected=True)\n raise ExtractorError('Video not available', video_id=video_id)\n\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 336, "n_words": 113, "vocab_size": 82, "complexity": 7, "nloc": 24, "token_counts": 215, "n_ast_nodes": 352, "n_identifiers": 28, "random_cut": "def _real_extract(self, url):\n video_id = self._match_id(url)\n\n try:\n return self._extract_aweme_app(video_id)\n except ExtractorError as e:\n self.report_warning(f'{e}; Retrying with webpage')\n\n # If we only call once, we get a 403 when downlaoding the video.\n self._download_webpage(url, video_id)\n webpage = self._download_webpage(url, video_id, note='Downloading video webpage')\n next_data = self._search_nextjs_data(webpage, video_id, default='{}')\n\n if next_data:\n status = traverse_obj(next_data, ('props', 'pageProps', 'statusCode'), expected_type=int) or 0\n video_data = traverse_obj(next_data, ('props', 'pageProps', 'itemInfo', 'itemStruct'), expected_type=dict)\n else:\n sigi_json = self._search_regex(\n r'>\\s*window\\[[\\'\"]SIGI_STATE[\\'\"]\\]\\s*=\\s*(?P{.+});',\n webpage, 'sigi data', group='sigi_state')\n sigi_data = self._parse_json(sigi_json, video_id)\n status = traverse_obj(sigi_data, ('VideoPage', 'statusCode'), expected_type=int) or 0\n video_data = traverse_obj(sigi_data, ('ItemModule', video_id), expected_type=dict)\n\n if status == 0:\n " }, { "id": 147136, "commit_id": "909cdea3cdbebb11ea2e62355b99f8bc3008c3ac", "repo": "ray", "path": "python/ray/_private/function_manager.py", "file_name": "function_manager.py", "fun_name": "_wait_for_function", "commit_message": "[Python Worker] add feature flag to support forking from workers (#23260)\n\nMake sure Python dependencies can be imported on demand, without the background importer thread. Use cases are:\r\n\r\nIf the pubsub notification for a new export is lost, importing can still be done.\r\nAllow not running the background importer thread, without affecting Ray's functionalities.\r\nAdd a feature flag to support forking from Python workers, by\r\n\r\nEnable fork support in gRPC.\r\nDisable importer thread and only leave the main thread in the Python worker. The importer thread will not run after forking anyway.", "code": "def _wait_for_function(self, function_descriptor, job_id, timeout=10):\n \n start_time = time.time()\n # Only send the warning once.\n warning_sent = False\n while True:\n with self.lock:\n if self._worker.actor_id.is_nil() and (\n function_descriptor.function_id in self._function_execution_info\n ):\n break\n elif not self._worker.actor_id.is_nil() and (\n self._worker.actor_id in self._worker.actors\n ):\n break\n if time.time() - start_time > timeout:\n warning_message = (\n \"This worker was asked to execute a function \"\n f\"that has not been registered ({function_descriptor}, \"\n f\"node={self._worker.node_ip_address}, \"\n f\"worker_id={self._worker.worker_id.hex()}, \"\n f\"pid={os.getpid()}). You may have to restart Ray.\"\n )\n if not warning_sent:\n logger.error(warning_message)\n ray._private.utils.push_error_to_driver(\n self._worker,\n ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR,\n warning_message,\n job_id=job_id,\n )\n warning_sent = True\n # Try importing in case the worker did not get notified, or the\n # importer thread did not run.\n self._worker.import_thread._do_importing()\n time.sleep(0.001)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 652, "n_words": 111, "vocab_size": 80, "complexity": 8, "nloc": 32, "token_counts": 158, "n_ast_nodes": 297, "n_identifiers": 32, "random_cut": "def _wait_for_function(self, function_descriptor, job_id, timeout=10):\n \n start_time = time.time()\n # Only send the warning once.\n warning_sent = False\n while True:\n with self.lock:\n if self._worker.actor_id.is_nil() and (\n function_descriptor.function_id in self._function_execution_info\n ):\n break\n elif not self._worker.actor_id.is_nil() and (\n self._worker.actor_id in self._worker.actors\n ):\n break\n if time.time() - start_time > timeout:\n warning_message = (\n \"This worker was asked to execute a function \"\n f\"that has not been registered ({function_descriptor}, \"\n f\"node={self._worker.node_ip_address}, \"\n f\"worker_id={self._worker.worker_id.hex()}, \"\n f\"pid={os.getpid()}). You may have to restart Ray.\"\n )\n if not warning" }, { "id": 90825, "commit_id": "b75841f096c4c8ebda0dffa159f16dbddcf2fc69", "repo": "sentry", "path": "tests/sentry/eventstore/test_models.py", "file_name": "test_models.py", "fun_name": "test_event_node_id", "commit_message": "ref: fix sentry.models test (#35382)\n\n* ref: fix sentry.models test\r\n\r\n* ref: split and rename sentry.models.tests so it actually runs", "code": "def test_event_node_id(self):\n # Create an event without specifying node_id. A node_id should be generated\n e1 = Event(project_id=1, event_id=\"abc\", data={\"foo\": \"bar\"})\n assert e1.data.id is not None, \"We should have generated a node_id for this event\"\n e1_node_id = e1.data.id\n e1.data.save()\n e1_body = nodestore.get(e1_node_id)\n assert e1_body == {\"foo\": \"bar\"}, \"The event body should be in nodestore\"\n\n e1 = Event(project_id=1, event_id=\"abc\")\n\n assert e1.data.data == {\"foo\": \"bar\"}, \"The event body should be loaded from nodestore\"\n assert e1.data.id == e1_node_id, \"The event's node_id should be the same after load\"\n\n # Event with no data should not be saved to nodestore\n e2 = Event(project_id=1, event_id=\"mno\", data=None)\n e2_node_id = e2.data.id\n assert e2.data.data == {} # NodeData returns {} by default\n eventstore.bind_nodes([e2], \"data\")\n assert e2.data.data == {}\n e2_body = nodestore.get(e2_node_id)\n assert e2_body is None\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 251, "n_words": 125, "vocab_size": 75, "complexity": 1, "nloc": 17, "token_counts": 167, "n_ast_nodes": 282, "n_identifiers": 18, "random_cut": "def test_event_node_id(self):\n # Create an event without specifying node_id. A node_id should be generated\n e1 = Event(project_id=1, event_id=\"abc\", data={\"foo\": \"bar\"})\n assert e1.data.id is not None, \"We should have generated a node_id for this event\"\n e1_node_id = e1.data.id\n e1.data.save()\n e1_body = nodestore.get(e1_node_id)\n assert e1_body == {\"foo\": \"bar\"}, \"The event body should be in nodestore\"\n\n e1 = Event(project_id=1, event_id=\"abc\")\n\n assert e1.data.data == {\"foo\": \"bar\"}, \"The event body should be loaded from nodestore\"\n assert e1.data.id == e1_node_id, \"The event's node_id should be the same after load\"\n\n # Event with no data should not be saved to nodestore\n e2 = Event(project_id=1, event_id" }, { "id": 62530, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/filters/optionaltags.py", "file_name": "optionaltags.py", "fun_name": "__iter__", "commit_message": "upd; format", "code": "def __iter__(self):\n for previous, token, next in self.slider():\n type = token[\"type\"]\n if type == \"StartTag\":\n if (token[\"data\"] or\n not self.is_optional_start(token[\"name\"], previous, next)):\n yield token\n elif type == \"EndTag\":\n if not self.is_optional_end(token[\"name\"], next):\n yield token\n else:\n yield token\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 193, "n_words": 37, "vocab_size": 26, "complexity": 7, "nloc": 12, "token_counts": 79, "n_ast_nodes": 135, "n_identifiers": 9, "random_cut": "def __iter__(self):\n for previous, token, next in self.slider():\n type = token[\"type\"]\n if type == \"StartTag\":\n if (token[\"data\"] or\n not self.is_optional_start(token[\"name\"], previous, next)):\n yield token\n elif type == \"EndTag\":\n if not s" }, { "id": 43253, "commit_id": "f3aacebe502c4ea5dc2b7d29373539296fa037eb", "repo": "airflow", "path": "airflow/providers/sftp/hooks/sftp.py", "file_name": "sftp.py", "fun_name": "get_conn", "commit_message": "Convert sftp hook to use paramiko instead of pysftp (#24512)", "code": "def get_conn(self) -> paramiko.SFTPClient: # type: ignore[override]\n \n if self.conn is None:\n # TODO: remove support for ssh_hook when it is removed from SFTPOperator\n if self.ssh_hook is not None:\n self.conn = self.ssh_hook.get_conn().open_sftp()\n else:\n self.conn = super().get_conn().open_sftp()\n return self.conn\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 122, "n_words": 37, "vocab_size": 28, "complexity": 3, "nloc": 12, "token_counts": 61, "n_ast_nodes": 105, "n_identifiers": 8, "random_cut": "def get_conn(self) -> paramiko.SFTPClient: # type: ignore[override]\n \n if self.conn is None:\n # TODO: remove support for ssh_hook when it is removed from SFTPO" }, { "id": 33685, "commit_id": "59407bbeb31fff8340938768051c9daabd38d7a7", "repo": "transformers", "path": "tests/models/deformable_detr/test_modeling_deformable_detr.py", "file_name": "test_modeling_deformable_detr.py", "fun_name": "prepare_config_and_inputs", "commit_message": "Add Deformable DETR (#17281)\n\n* First draft\r\n\r\n* More improvements\r\n\r\n* Improve model, add custom CUDA code\r\n\r\n* Import torch before\r\n\r\n* Add script that imports custom layer\r\n\r\n* Add everything in new ops directory\r\n\r\n* Import custom layer in modeling file\r\n\r\n* Fix ARCHIVE_MAP typo\r\n\r\n* Creating the custom kernel on the fly.\r\n\r\n* Import custom layer in modeling file\r\n\r\n* More improvements\r\n\r\n* Fix CUDA loading\r\n\r\n* More improvements\r\n\r\n* Improve conversion script\r\n\r\n* Improve conversion script\r\n\r\n* Make it work until encoder_outputs\r\n\r\n* Make forward pass work\r\n\r\n* More improvements\r\n\r\n* Make logits match original implementation\r\n\r\n* Make implementation also support single_scale model\r\n\r\n* Add support for single_scale and dilation checkpoint\r\n\r\n* Add support for with_box_refine model\r\n\r\n* Support also two stage model\r\n\r\n* Improve tests\r\n\r\n* Fix more tests\r\n\r\n* Make more tests pass\r\n\r\n* Upload all models to the hub\r\n\r\n* Clean up some code\r\n\r\n* Improve decoder outputs\r\n\r\n* Rename intermediate hidden states and reference points\r\n\r\n* Improve model outputs\r\n\r\n* Move tests to dedicated folder\r\n\r\n* Improve model outputs\r\n\r\n* Fix retain_grad test\r\n\r\n* Improve docs\r\n\r\n* Clean up and make test_initialization pass\r\n\r\n* Improve variable names\r\n\r\n* Add copied from statements\r\n\r\n* Improve docs\r\n\r\n* Fix style\r\n\r\n* Improve docs\r\n\r\n* Improve docs, move tests to model folder\r\n\r\n* Fix rebase\r\n\r\n* Remove DetrForSegmentation from auto mapping\r\n\r\n* Apply suggestions from code review\r\n\r\n* Improve variable names and docstrings\r\n\r\n* Apply some more suggestions from code review\r\n\r\n* Apply suggestion from code review\r\n\r\n* better docs and variables names\r\n\r\n* hint to num_queries and two_stage confusion\r\n\r\n* remove asserts and code refactor\r\n\r\n* add exception if two_stage is True and with_box_refine is False\r\n\r\n* use f-strings\r\n\r\n* Improve docs and variable names\r\n\r\n* Fix code quality\r\n\r\n* Fix rebase\r\n\r\n* Add require_torch_gpu decorator\r\n\r\n* Add pip install ninja to CI jobs\r\n\r\n* Apply suggestion of @sgugger\r\n\r\n* Remove DeformableDetrForObjectDetection from auto mapping\r\n\r\n* Remove DeformableDetrModel from auto mapping\r\n\r\n* Add model to toctree\r\n\r\n* Add model back to mappings, skip model in pipeline tests\r\n\r\n* Apply @sgugger's suggestion\r\n\r\n* Fix imports in the init\r\n\r\n* Fix copies\r\n\r\n* Add CPU implementation\r\n\r\n* Comment out GPU function\r\n\r\n* Undo previous change\r\n\r\n* Apply more suggestions\r\n\r\n* Remove require_torch_gpu annotator\r\n\r\n* Fix quality\r\n\r\n* Add logger.info\r\n\r\n* Fix logger\r\n\r\n* Fix variable names\r\n\r\n* Fix initializaztion\r\n\r\n* Add missing initialization\r\n\r\n* Update checkpoint name\r\n\r\n* Add model to doc tests\r\n\r\n* Add CPU/GPU equivalence test\r\n\r\n* Add Deformable DETR to pipeline tests\r\n\r\n* Skip model for object detection pipeline\r\n\r\nCo-authored-by: Nicolas Patry \r\nCo-authored-by: Nouamane Tazi \r\nCo-authored-by: Sylvain Gugger ", "code": "def prepare_config_and_inputs(self):\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n\n pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)\n\n labels = None\n if self.use_labels:\n # labels is a list of Dict (each Dict being the labels for a given example in the batch)\n labels = []\n for i in range(self.batch_size):\n target = {}\n target[\"class_labels\"] = torch.randint(\n high=self.num_labels, size=(self.n_targets,), device=torch_device\n )\n target[\"boxes\"] = torch.rand(self.n_targets, 4, device=torch_device)\n target[\"masks\"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device)\n labels.append(target)\n\n config = self.get_config()\n return config, pixel_values, pixel_mask, labels\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 258, "n_words": 75, "vocab_size": 52, "complexity": 3, "nloc": 16, "token_counts": 170, "n_ast_nodes": 257, "n_identifiers": 26, "random_cut": "def prepare_config_and_inputs(self):\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n\n pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)\n\n labels = None\n if self.use_labels:\n # labels is a list of Dict (each Dict being the labels for a given example in the batch)\n labels = []\n for i in range(self.batch_size):\n target = {}\n target[\"class_labels\"] = torch.randint(\n high=self.num_labels, size=(self.n_targets,), device=torch_device\n )\n target[\"boxes\"] = torc" }, { "id": 167518, "commit_id": "ed55bdf198590dd572f2e546c7b2afe7ae98ba74", "repo": "pandas", "path": "pandas/tests/reshape/test_from_dummies.py", "file_name": "test_from_dummies.py", "fun_name": "test_with_prefix_basic", "commit_message": "Initial draft: from_dummies (#41902)", "code": "def test_with_prefix_basic(dummies_basic):\n expected = DataFrame({\"col1\": [\"a\", \"b\", \"a\"], \"col2\": [\"b\", \"a\", \"c\"]})\n result = from_dummies(dummies_basic, sep=\"_\")\n tm.assert_frame_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 26, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 49, "n_ast_nodes": 89, "n_identifiers": 9, "random_cut": "def test_with_prefix_basic(dummies_basic):\n expected = DataFrame({\"col1\": [\"a\", \"b\", \"a\"], \"col2\": [\"b\", \"a\", \"c\"]})\n result = fr" }, { "id": 306924, "commit_id": "9fc9d50e077d17cd35822701a8c7b85efa80e49d", "repo": "core", "path": "homeassistant/components/rainmachine/binary_sensor.py", "file_name": "binary_sensor.py", "fun_name": "update_from_latest_data", "commit_message": "Fix bug with 1st gen RainMachine controllers and unknown API calls (#78070)\n\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>", "code": "def update_from_latest_data(self) -> None:\n \n if self.entity_description.key == TYPE_FLOW_SENSOR:\n self._attr_is_on = self.coordinator.data.get(\"system\", {}).get(\n \"useFlowSensor\"\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 65, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 39, "n_ast_nodes": 68, "n_identifiers": 9, "random_cut": "def update_from_latest_data(self) -> None:\n \n if self.ent" }, { "id": 150590, "commit_id": "8eeaab27467fa2e0bdc7314bdb888998bbb20af8", "repo": "freqtrade", "path": "freqtrade/freqai/prediction_models/RL/RLPrediction_env.py", "file_name": "RLPrediction_env.py", "fun_name": "report", "commit_message": "add reward function", "code": "def report(self):\n\n # get total trade\n long_trade = 0\n short_trade = 0\n neutral_trade = 0\n for trade in self.trade_history:\n if trade['type'] == 'long':\n long_trade += 1\n\n elif trade['type'] == 'short':\n short_trade += 1\n else:\n neutral_trade += 1\n\n negative_trade = 0\n positive_trade = 0\n for tr in self.close_trade_profit:\n if tr < 0.:\n negative_trade += 1\n\n if tr > 0.:\n positive_trade += 1\n\n total_trade_lr = negative_trade+positive_trade\n\n\n total_trade = long_trade + short_trade\n sharp_ratio = self.sharpe_ratio()\n sharp_log = self.get_sharpe_ratio()\n\n from tabulate import tabulate\n\n headers = [\"Performance\", \"\"]\n performanceTable = [[\"Total Trade\", \"{0:.2f}\".format(total_trade)],\n [\"Total reward\", \"{0:.3f}\".format(self.total_reward)],\n [\"Start profit(unit)\", \"{0:.2f}\".format(1.)],\n [\"End profit(unit)\", \"{0:.3f}\".format(self._total_profit)],\n [\"Sharp ratio\", \"{0:.3f}\".format(sharp_ratio)],\n [\"Sharp log\", \"{0:.3f}\".format(sharp_log)],\n # [\"Sortino ratio\", \"{0:.2f}\".format(0) + '%'],\n [\"winrate\", \"{0:.2f}\".format(positive_trade*100/total_trade_lr) + '%']\n ]\n tabulation = tabulate(performanceTable, headers, tablefmt=\"fancy_grid\", stralign=\"center\")\n print(tabulation)\n\n result = {\n \"Start\": \"{0:.2f}\".format(1.),\n \"End\": \"{0:.2f}\".format(self._total_profit),\n \"Sharp\": \"{0:.3f}\".format(sharp_ratio),\n \"Winrate\": \"{0:.2f}\".format(positive_trade*100/total_trade_lr)\n }\n return result\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 640, "n_words": 135, "vocab_size": 85, "complexity": 7, "nloc": 41, "token_counts": 273, "n_ast_nodes": 469, "n_identifiers": 28, "random_cut": "def report(self):\n\n # get total trade\n long_trade = 0\n short_trade = 0\n neutral_trade = 0\n for trade in self.trade_history:\n if trade['type'] == 'long':\n long_trade += 1\n\n elif trade['type'] == 'short':\n short_trade += 1\n else:\n neutral_trade += 1\n\n negative_trade = 0\n positive_trade = 0\n for tr in self.close_trade_profit:\n if tr < 0.:\n negative_trade += 1\n\n if tr > 0.:\n positive_trade += 1\n\n total_trade_lr = negative_trade+positive_trade\n\n\n total_trade = long_trade + short_trade\n sharp_ratio = self.sharpe_ratio()\n sharp_log = self.get_sharpe_ratio()\n\n from tabulate import tabulate\n\n headers = [\"Performance\", \"\"]\n performanceTable = [[\"Total Trade\", \"{0:.2f}\".format(total_trade)],\n [\"Total reward\", \"{0:.3f}\".format(self.total_reward)],\n [\"Start profit(unit)\", \"{0:.2f}\".format(1.)],\n [\"End profit(unit)\", \"{0:.3f}\".format(self._to" }, { "id": 38888, "commit_id": "31ee80d55673f32c0f5d50936f371e661b74b21a", "repo": "transformers", "path": "tests/models/layoutlmv3/test_tokenization_layoutlmv3.py", "file_name": "test_tokenization_layoutlmv3.py", "fun_name": "test_number_of_added_tokens", "commit_message": "Add LayoutLMv3 (#17060)\n\n* Make forward pass work\r\n\r\n* More improvements\r\n\r\n* Remove unused imports\r\n\r\n* Remove timm dependency\r\n\r\n* Improve loss calculation of token classifier\r\n\r\n* Fix most tests\r\n\r\n* Add docs\r\n\r\n* Add model integration test\r\n\r\n* Make all tests pass\r\n\r\n* Add LayoutLMv3FeatureExtractor\r\n\r\n* Improve integration test + make fixup\r\n\r\n* Add example script\r\n\r\n* Fix style\r\n\r\n* Add LayoutLMv3Processor\r\n\r\n* Fix style\r\n\r\n* Add option to add visual labels\r\n\r\n* Make more tokenizer tests pass\r\n\r\n* Fix more tests\r\n\r\n* Make more tests pass\r\n\r\n* Fix bug and improve docs\r\n\r\n* Fix import of processors\r\n\r\n* Improve docstrings\r\n\r\n* Fix toctree and improve docs\r\n\r\n* Fix auto tokenizer\r\n\r\n* Move tests to model folder\r\n\r\n* Move tests to model folder\r\n\r\n* change default behavior add_prefix_space\r\n\r\n* add prefix space for fast\r\n\r\n* add_prefix_spcae set to True for Fast\r\n\r\n* no space before `unique_no_split` token\r\n\r\n* add test to hightligh special treatment of added tokens\r\n\r\n* fix `test_batch_encode_dynamic_overflowing` by building a long enough example\r\n\r\n* fix `test_full_tokenizer` with add_prefix_token\r\n\r\n* Fix tokenizer integration test\r\n\r\n* Make the code more readable\r\n\r\n* Add tests for LayoutLMv3Processor\r\n\r\n* Fix style\r\n\r\n* Add model to README and update init\r\n\r\n* Apply suggestions from code review\r\n\r\n* Replace asserts by value errors\r\n\r\n* Add suggestion by @ducviet00\r\n\r\n* Add model to doc tests\r\n\r\n* Simplify script\r\n\r\n* Improve README\r\n\r\n* a step ahead to fix\r\n\r\n* Update pair_input_test\r\n\r\n* Make all tokenizer tests pass - phew\r\n\r\n* Make style\r\n\r\n* Add LayoutLMv3 to CI job\r\n\r\n* Fix auto mapping\r\n\r\n* Fix CI job name\r\n\r\n* Make all processor tests pass\r\n\r\n* Make tests of LayoutLMv2 and LayoutXLM consistent\r\n\r\n* Add copied from statements to fast tokenizer\r\n\r\n* Add copied from statements to slow tokenizer\r\n\r\n* Remove add_visual_labels attribute\r\n\r\n* Fix tests\r\n\r\n* Add link to notebooks\r\n\r\n* Improve docs of LayoutLMv3Processor\r\n\r\n* Fix reference to section\r\n\r\nCo-authored-by: SaulLu \r\nCo-authored-by: Niels Rogge ", "code": "def test_number_of_added_tokens(self):\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n\n # test 1: single sequence\n words, boxes = self.get_words_and_boxes()\n\n sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)\n attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)\n\n # Method is implemented (e.g. not GPT-2)\n if len(attached_sequences) != 2:\n self.assertEqual(\n tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences)\n )\n\n # test 2: two sequences\n question, words, boxes = self.get_question_words_and_boxes()\n\n sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False)\n attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True)\n\n # Method is implemented (e.g. not GPT-2)\n if len(attached_sequences) != 2:\n self.assertEqual(\n tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)\n )\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 412, "n_words": 86, "vocab_size": 45, "complexity": 4, "nloc": 18, "token_counts": 179, "n_ast_nodes": 289, "n_identifiers": 22, "random_cut": "def test_number_of_added_tokens(self):\n tokenizers = self.get_tokenizers(do_lower_case=False)\n for tokenizer in tokenizers:\n with self.subTest(f\"{tokenizer.__class__.__name__}\"):\n\n # test 1: single sequence\n words, boxes = self.get_words_and_boxes()\n\n sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)\n attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)\n\n # Method is implemented (e.g. not GPT-2)\n if len(attached_sequences) != 2:\n self.assertEqual(\n tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences)\n )\n\n # test 2: two sequences\n question, words, boxes = self.get_question_words_and_boxes()\n\n sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False)\n attached_seque" }, { "id": 63279, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "_defaultExceptionDebugAction", "commit_message": "upd; format", "code": "def _defaultExceptionDebugAction(instring, loc, expr, exc):\n print(\"Exception raised:\" + _ustr(exc))\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 11, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 32, "n_identifiers": 7, "random_cut": "def _defaultExceptionDebugAction(instring, loc, expr, exc):\n print(\"Exception raised:\" " }, { "id": 211007, "commit_id": "35b1c4a4e52bd0416725984a1c360d20456f2c29", "repo": "PaddleDetection", "path": "ppdet/optimizer/optimizer.py", "file_name": "optimizer.py", "fun_name": "__call__", "commit_message": "fix params groups.trainable (#6354)\n\n* add FGD distill code\r\n\r\n* add configs\r\n\r\n* add doc\r\n\r\n* fix pretrain\r\n\r\n* pre-commit\r\n\r\n* fix ci\r\n\r\n* fix readme\r\n\r\n* fix readme\r\n\r\n* fix ci\r\n\r\n* fix param groups\r\n\r\n* fix\r\n\r\n* fix doc\r\n\r\n* fix doc,test=document_fix\r\n\r\n* fix params groups", "code": "def __call__(self, learning_rate, model=None):\n if self.clip_grad_by_norm is not None:\n grad_clip = nn.ClipGradByGlobalNorm(\n clip_norm=self.clip_grad_by_norm)\n else:\n grad_clip = None\n if self.regularizer and self.regularizer != 'None':\n reg_type = self.regularizer['type'] + 'Decay'\n reg_factor = self.regularizer['factor']\n regularization = getattr(regularizer, reg_type)(reg_factor)\n else:\n regularization = None\n\n optim_args = self.optimizer.copy()\n optim_type = optim_args['type']\n del optim_args['type']\n\n if optim_type == 'AdamWDL':\n return build_adamwdl(model, lr=learning_rate, **optim_args)\n\n if optim_type != 'AdamW':\n optim_args['weight_decay'] = regularization\n\n op = getattr(optimizer, optim_type)\n\n if 'param_groups' in optim_args:\n assert isinstance(optim_args['param_groups'], list), ''\n\n param_groups = optim_args.pop('param_groups')\n\n params, visited = [], []\n for group in param_groups:\n assert isinstance(group,\n dict) and 'params' in group and isinstance(\n group['params'], list), ''\n _params = {\n n: p\n for n, p in model.named_parameters()\n if any([k in n\n for k in group['params']]) and p.trainable is True\n }\n _group = group.copy()\n _group.update({'params': list(_params.values())})\n\n params.append(_group)\n visited.extend(list(_params.keys()))\n\n ext_params = [\n p for n, p in model.named_parameters()\n if n not in visited and p.trainable is True\n ]\n\n if len(ext_params) < len(model.parameters()):\n params.append({'params': ext_params})\n\n elif len(ext_params) > len(model.parameters()):\n raise RuntimeError\n\n else:\n _params = model.parameters()\n params = [param for param in _params if param.trainable is True]\n\n return op(learning_rate=learning_rate,\n parameters=params,\n grad_clip=grad_clip,\n **optim_args)\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 852, "n_words": 179, "vocab_size": 105, "complexity": 21, "nloc": 53, "token_counts": 391, "n_ast_nodes": 641, "n_identifiers": 47, "random_cut": "def __call__(self, learning_rate, model=None):\n if self.clip_grad_by_norm is not None:\n grad_clip = nn.ClipGradByGlobalNorm(\n clip_norm=self.clip_grad_by_norm)\n else:\n grad_clip = None\n if self.regularizer and self.regularizer != 'None':\n reg_type = self.regularizer['type'] + 'Decay'\n reg_factor = self.regularizer['factor']\n regularization = getattr(regularizer, reg_type)(reg_factor)\n else:\n regularization = None\n\n optim_args = self.optimizer.copy()\n optim_type = optim_args['type']\n del optim_args['type']\n\n if optim_type == 'AdamWDL':\n return build_adamwdl(model, lr=learning_rate, **optim_args)\n\n if optim_type != 'AdamW':\n " }, { "id": 98790, "commit_id": "a88dd006ae647debe4a9d17ad3908d6fdcc576ce", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_dashboard_widget_details.py", "file_name": "test_organization_dashboard_widget_details.py", "fun_name": "test_issue_search_condition", "commit_message": "fix(widget-builder): Do widget validation with columns and aggregates as fields (#33986)\n\nThere was code to construct columns and aggregates from fields to account for\r\nstale frontend during the transition to split up fields, but enough time has passed\r\nthat we can use the columns and aggregates as the source of truth for validation", "code": "def test_issue_search_condition(self):\n self.user = self.create_user(is_superuser=False)\n self.create_member(\n user=self.user, organization=self.organization, role=\"member\", teams=[self.team]\n )\n self.login_as(self.user)\n\n event = self.store_event(\n data={\n \"event_id\": \"a\" * 32,\n \"transaction\": \"/example\",\n \"message\": \"how to make fast\",\n \"timestamp\": iso_format(before_now(minutes=2)),\n \"fingerprint\": [\"group_1\"],\n },\n project_id=self.project.id,\n )\n\n data = {\n \"title\": \"EPM Big Number\",\n \"displayType\": \"big_number\",\n \"queries\": [\n {\n \"name\": \"\",\n \"fields\": [\"epm()\"],\n \"columns\": [],\n \"aggregates\": [\"epm()\"],\n \"conditions\": f\"issue:{event.group.qualified_short_id}\",\n \"orderby\": \"\",\n }\n ],\n }\n response = self.do_request(\n \"post\",\n self.url(),\n data=data,\n )\n assert response.status_code == 200, response.data\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 489, "n_words": 73, "vocab_size": 64, "complexity": 1, "nloc": 36, "token_counts": 183, "n_ast_nodes": 326, "n_identifiers": 26, "random_cut": "def test_issue_search_condition(self):\n self.user = self.create_user(is_superuser=False)\n self.create_member(\n user=self.user, organization=self.organization, role=\"member\", teams=[self.team]\n )\n self.login_as(self.user)\n\n event = self.store_event(\n data={\n \"event_id\": \"a\" * 32,\n \"transaction\": \"/example\",\n \"message\": \"how to make fast\",\n \"timestamp\": iso_format(before_now(minutes" }, { "id": 338317, "commit_id": "74642aac95a261148d32324688c1d6391775aded", "repo": "accelerate", "path": "src/accelerate/commands/config/config_args.py", "file_name": "config_args.py", "fun_name": "from_yaml_file", "commit_message": "Add support for torch dynamo (#829)\n\n* Add torch dynamo optimizations\r\n\r\n* More work\r\n\r\n* Fix enum values\r\n\r\n* Add to basic config\r\n\r\n* fix more tests\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com>\r\n\r\nCo-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com>", "code": "def from_yaml_file(cls, yaml_file=None):\n yaml_file = default_yaml_config_file if yaml_file is None else yaml_file\n with open(yaml_file, \"r\", encoding=\"utf-8\") as f:\n config_dict = yaml.safe_load(f)\n if \"compute_environment\" not in config_dict:\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n\n if \"mixed_precision\" not in config_dict:\n config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n if \"fp16\" in config_dict: # Convert the config to the new format.\n del config_dict[\"fp16\"]\n if \"use_cpu\" not in config_dict:\n config_dict[\"use_cpu\"] = False\n if \"dynamo_backend\" not in config_dict:\n config_dict[\"dynamo_backend\"] = DynamoBackend.NO\n\n return cls(**config_dict)\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 199, "n_words": 77, "vocab_size": 48, "complexity": 9, "nloc": 15, "token_counts": 121, "n_ast_nodes": 217, "n_identifiers": 14, "random_cut": "def from_yaml_file(cls, yaml_file=None):\n yaml_file = default_yaml_config_file if yaml_file is None else yaml_file\n with open(yaml_file, \"r\", encoding=\"utf-8\") as f:\n config_dict = yaml.safe_load(f)\n if \"compute_environment\" not in config_dict:\n config_dict[\"compute_environment\"] = ComputeEnvironment.LOCAL_MACHINE\n\n if \"mixed_precision\" not in config_dict:\n config_dict[\"mixed_precision\"] = \"fp16\" if (\"fp16\" in config_dict and config_dict[\"fp16\"]) else \"no\"\n if \"fp16\" in config_dict: # Convert th" }, { "id": 78707, "commit_id": "dd892a650827770318d21e5984ca5d984a510655", "repo": "wagtail", "path": "wagtail/images/views/chooser.py", "file_name": "chooser.py", "fun_name": "get_chosen_response_data", "commit_message": "Use ChosenResponseMixin for returning 'image chosen' responses", "code": "def get_chosen_response_data(self, image):\n \n response_data = super().get_chosen_response_data(image)\n preview_image = image.get_rendition(\"max-165x165\")\n response_data[\"preview\"] = {\n \"url\": preview_image.url,\n \"width\": preview_image.width,\n \"height\": preview_image.height,\n }\n return response_data\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 96, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 53, "n_ast_nodes": 93, "n_identifiers": 10, "random_cut": "def get_chosen_response_data(self, image):\n \n response_data = super().get_chosen_response_data(image)\n preview_image = image.get_rendition(\"max-165x165\")\n response_data[\"preview\"] = {\n \"url\":" }, { "id": 182064, "commit_id": "5651e97a64b850b80f42799e7f7d868f1f11ab7b", "repo": "textual", "path": "tests/renderables/test_underline_bar.py", "file_name": "test_underline_bar.py", "fun_name": "test_highlight_out_of_bounds_end", "commit_message": "Underline bar renderable", "code": "def test_highlight_out_of_bounds_end():\n bar = UnderlineBar(highlight_range=(3, 9), width=6)\n assert render(bar) == (\n f\"{GREY}━━{STOP}{GREY}╸{STOP}{MAGENTA}━━━{STOP}\"\n )\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 30, "n_ast_nodes": 71, "n_identifiers": 9, "random_cut": "def test_highlight_out_of_bounds_end():\n bar = UnderlineBar(highlight_range=(3, 9), wi" }, { "id": 242971, "commit_id": "f0353c599676d694692174e32dc3acee2912b4a0", "repo": "Pillow", "path": "Tests/test_file_tga.py", "file_name": "test_file_tga.py", "fun_name": "test_cross_scan_line", "commit_message": "When reading past the end of a scan line, reduce bytes left", "code": "def test_cross_scan_line():\n with Image.open(\"Tests/images/cross_scan_line.tga\") as im:\n assert_image_equal_tofile(im, \"Tests/images/cross_scan_line.png\")\n\n with Image.open(\"Tests/images/cross_scan_line_truncated.tga\") as im:\n with pytest.raises(OSError):\n im.load()\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 15, "vocab_size": 11, "complexity": 1, "nloc": 6, "token_counts": 43, "n_ast_nodes": 85, "n_identifiers": 9, "random_cut": "def test_cross_scan_line():\n with Image.open(\"Tests/images/cross_scan_line.tga\") as im:\n assert_image_equal_tofile(im, \"Tests/images/cross_scan_line.png\")\n\n with Image.open(\"Tests/im" }, { "id": 335658, "commit_id": "0926dc24180a8931de6081f6de7bc44c1366678c", "repo": "diffusers", "path": "src/diffusers/models/resnet.py", "file_name": "resnet.py", "fun_name": "forward", "commit_message": "save intermediate grad tts", "code": "def forward(self, x, mask):\n output = self.block(x * mask)\n return output * mask\n\n\n# unet_score_estimation.py", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 36, "n_identifiers": 6, "random_cut": "def forward(self, x, mask):\n output = self.block(x * mask)\n return output * m" }, { "id": 53909, "commit_id": "bcee6a35d831f5dfd55d77c0ad3eee3ea7b41e57", "repo": "prefect", "path": "tests/orion/models/test_agents.py", "file_name": "test_agents.py", "fun_name": "test_read_agent", "commit_message": "Add models layer for agents", "code": "async def test_read_agent(self, agents, session):\n read_agent = await models.agents.read_agents(session=session)\n assert len(read_agent) == len(agents)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 26, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "async def test_read_agent(self, agents, session):\n read_agent = await models.agents.read_agents(session=session)\n " }, { "id": 246605, "commit_id": "64c73c6ac88a740ee480a0ad1f9afc8596bccfa4", "repo": "synapse", "path": "tests/rest/client/test_login.py", "file_name": "test_login.py", "fun_name": "test_deactivated_user", "commit_message": "Add type hints to `tests/rest/client` (#12066)", "code": "def test_deactivated_user(self) -> None:\n \n redirect_url = \"https://legit-site.com/\"\n\n # First login (to create the user).\n self._test_redirect(redirect_url)\n\n # Deactivate the account.\n self.get_success(\n self.deactivate_account_handler.deactivate_account(\n self.user_id, False, create_requester(self.user_id)\n )\n )\n\n # Request the CAS ticket.\n cas_ticket_url = (\n \"/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket\"\n % (urllib.parse.quote(redirect_url))\n )\n\n # Get Synapse to call the fake CAS and serve the template.\n channel = self.make_request(\"GET\", cas_ticket_url)\n\n # Because the user is deactivated they are served an error template.\n self.assertEqual(channel.code, 403)\n self.assertIn(b\"SSO account deactivated\", channel.result[\"body\"])\n\n\n@skip_unless(HAS_JWT, \"requires jwt\")", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "@skip_unless(HAS_JWT, \"requires jwt\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 238, "n_words": 75, "vocab_size": 60, "complexity": 1, "nloc": 16, "token_counts": 91, "n_ast_nodes": 169, "n_identifiers": 21, "random_cut": "def test_deactivated_user(self) -> None:\n \n redirect_url = \"https://legit-site.com/\"\n\n # First login (to create the user).\n self._test_redirect(redirect_url)\n\n # Deactivate the account.\n self.get_success(\n self.deactivate_account_handler.deactivate_account(\n self.user_id, False, create_requester(self.user_id)\n )\n )\n\n # Request the CAS ticket.\n cas_ticket_url = (\n \"/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket\"\n % (urllib.parse.quote(redirect_url))\n )\n\n # Get Synapse to call the fake CAS and serve the template.\n channel = self.make_request(\"GET\", cas_ticket_url)\n\n # Because the user is deactivated they are served an error template.\n self.assertEqual(channel.code, 403)\n self.assertIn(b\"SSO account " }, { "id": 33795, "commit_id": "31be02f14b1724c677bb2e32a5101c7cb6448556", "repo": "transformers", "path": "src/transformers/models/flaubert/modeling_tf_flaubert.py", "file_name": "modeling_tf_flaubert.py", "fun_name": "get_masks", "commit_message": "TF: tf.debugging assertions without tf.running_eagerly() protection (#19030)", "code": "def get_masks(slen, lengths, causal, padding_mask=None):\n \n bs = shape_list(lengths)[0]\n if padding_mask is not None:\n mask = padding_mask\n else:\n # assert lengths.max().item() <= slen\n alen = tf.range(slen, dtype=lengths.dtype)\n mask = alen < tf.expand_dims(lengths, axis=1)\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n if causal:\n attn_mask = tf.less_equal(\n tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))\n )\n else:\n attn_mask = mask\n\n # sanity check\n # assert shape_list(mask) == [bs, slen]\n tf.debugging.assert_equal(shape_list(mask), [bs, slen])\n if causal:\n tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen])\n\n return mask, attn_mask\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 190, "n_words": 87, "vocab_size": 58, "complexity": 4, "nloc": 17, "token_counts": 162, "n_ast_nodes": 243, "n_identifiers": 20, "random_cut": "def get_masks(slen, lengths, causal, padding_mask=None):\n \n bs = shape_list(lengths)[0]\n if padding_mask is not None:\n mask = padding_mask\n else:\n # assert lengths.max().item() <= slen\n alen = tf.range(slen, dtype=lengths.dtype)\n mask = alen < tf.expand_dims(lengths, axis=1)\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n if causal:\n attn_mask = tf.less_equal(\n tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))\n )\n else:\n at" }, { "id": 252600, "commit_id": "f4dc2f2cfdb40e04022e4deb4aa67578deff5d23", "repo": "mitmproxy", "path": "mitmproxy/tools/console/master.py", "file_name": "master.py", "fun_name": "__setattr__", "commit_message": "Replace blinker with custom implementation (#5528)\n\n* replace blinker with custom implementation\r\n\r\nThe major benefit here is type checking, followed by proper support for async receivers.\r\n\r\n* fix compatibility with Python 3.9\r\n\r\n* fix nits\r\n\r\n* try harder to force gc\r\n\r\n* try harderer\r\n\r\n* coverage++\r\n\r\n* coverage++\r\n\r\n* nits", "code": "def __setattr__(self, name, value):\n super().__setattr__(name, value)\n signals.update_settings.send()\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 41, "n_identifiers": 8, "random_cut": "def __setattr__(self, name, value):\n super().__setattr__(name, value)\n signals.update_set" }, { "id": 56766, "commit_id": "f5cc99efbbe532e89b123c9afc9179ac36d3e2da", "repo": "prefect", "path": "tests/utilities/test_pydantic.py", "file_name": "test_pydantic.py", "fun_name": "test_both_type_field_and_dispatch_key_cannot_be_set", "commit_message": "Add support for literal \"type\" fields to pydantic utility", "code": "def test_both_type_field_and_dispatch_key_cannot_be_set(self):\n\n with pytest.raises(\n ValueError,\n match=\"Model class 'Base' defines a `__dispatch_key__` and a type field. Only one of these may be defined for dispatch\",\n ):\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 34, "n_ast_nodes": 32, "n_identifiers": 6, "random_cut": "def test_both_type_field_and_dispatch_key_cannot_be_set(self):\n\n with pytest.raises(\n ValueError,\n match=\"Mod" }, { "id": 169414, "commit_id": "8c3c9e3bdc6e6870036428bd192c8fa92b93c295", "repo": "pandas", "path": "pandas/tests/arrays/test_datetimes.py", "file_name": "test_datetimes.py", "fun_name": "test_sub_datetimelike_scalar_mismatch", "commit_message": "BUG: DatetimeArray-datetimelike mixed resos (#48894)", "code": "def test_sub_datetimelike_scalar_mismatch(self):\n dti = pd.date_range(\"2016-01-01\", periods=3)\n dta = dti._data._as_unit(\"us\")\n\n ts = dta[0]._as_unit(\"s\")\n\n result = dta - ts\n expected = (dti - dti[0])._data._as_unit(\"us\")\n assert result.dtype == \"m8[us]\"\n tm.assert_extension_array_equal(result, expected)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 76, "n_words": 28, "vocab_size": 21, "complexity": 1, "nloc": 8, "token_counts": 74, "n_ast_nodes": 125, "n_identifiers": 15, "random_cut": "def test_sub_datetimelike_scalar_mismatch(self):\n dti = pd.date_range(\"2016-01-01\", periods=3)\n dta = dti._data._as_unit(\"us\")\n\n ts = dta[0]._as_unit(\"s\")\n\n result = dta - ts\n " }, { "id": 61136, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/requirements.py", "file_name": "requirements.py", "fun_name": "project_name", "commit_message": "upd; format", "code": "def project_name(self):\n # type: () -> NormalizedName\n # No need to canonicalise - the candidate did this\n return self.candidate.project_name\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 39, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 20, "n_identifiers": 3, "random_cut": "def project_name(self):\n # type: () -> NormalizedName\n # No nee" }, { "id": 92725, "commit_id": "ef5a739249de199b25d2cba7a2ee52820d9f34de", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_v2.py", "file_name": "test_organization_events_v2.py", "fun_name": "setUp", "commit_message": "tests(discover): Improve stability of eventsv2 tests (#36641)\n\nSame motivation as #36619, this aims to improve the stability of the eventsv2\r\ntests by moving the event timestamps further in the past.", "code": "def setUp(self):\n super().setUp()\n self.ten_mins_ago = iso_format(before_now(minutes=10))\n self.eleven_mins_ago = iso_format(before_now(minutes=11))\n self.transaction_data = load_data(\"transaction\", timestamp=before_now(minutes=10))\n self.features = {}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 50, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 61, "n_ast_nodes": 102, "n_identifiers": 12, "random_cut": "def setUp(self):\n super()." }, { "id": 264821, "commit_id": "304282bd4f20aa80b4826b47777b87972ac11832", "repo": "netbox", "path": "netbox/dcim/tests/test_cablepaths.py", "file_name": "test_cablepaths.py", "fun_name": "test_206_unidirectional_split_paths", "commit_message": "Update tests", "code": "def test_206_unidirectional_split_paths(self):\n \n interface1 = Interface.objects.create(device=self.device, name='Interface 1')\n interface2 = Interface.objects.create(device=self.device, name='Interface 2')\n interface3 = Interface.objects.create(device=self.device, name='Interface 3')\n rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1', positions=4)\n frontport1_1 = FrontPort.objects.create(\n device=self.device, name='Front Port 1:1', rear_port=rearport1, rear_port_position=1\n )\n frontport1_2 = FrontPort.objects.create(\n device=self.device, name='Front Port 1:2', rear_port=rearport1, rear_port_position=2\n )\n\n # Create cables 1\n cable1 = Cable(terminations=[\n CableTermination(cable_end='A', termination=interface1),\n CableTermination(cable_end='B', termination=rearport1),\n ])\n cable1.save()\n self.assertPathExists(\n (interface1, cable1, rearport1),\n is_complete=False,\n is_split=True\n )\n self.assertEqual(CablePath.objects.count(), 1)\n\n # Create cables 2-3\n cable2 = Cable(terminations=[\n CableTermination(cable_end='A', termination=interface2),\n CableTermination(cable_end='B', termination=frontport1_1),\n ])\n cable2.save()\n cable3 = Cable(terminations=[\n CableTermination(cable_end='A', termination=interface3),\n CableTermination(cable_end='B', termination=frontport1_2),\n ])\n cable3.save()\n self.assertPathExists(\n (interface2, cable2, frontport1_1, rearport1, cable1, interface1),\n is_complete=True,\n is_active=True\n )\n self.assertPathExists(\n (interface3, cable3, frontport1_2, rearport1, cable1, interface1),\n is_complete=True,\n is_active=True\n )\n self.assertEqual(CablePath.objects.count(), 3)\n\n # Delete cable 1\n cable1.delete()\n\n # Check that the partial path was deleted and the two complete paths are now partial\n self.assertPathExists(\n (interface2, cable2, frontport1_1, rearport1),\n is_complete=False\n )\n self.assertPathExists(\n (interface3, cable3, frontport1_2, rearport1),\n is_complete=False\n )\n self.assertEqual(CablePath.objects.count(), 2)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 630, "n_words": 147, "vocab_size": 86, "complexity": 1, "nloc": 53, "token_counts": 397, "n_ast_nodes": 606, "n_identifiers": 35, "random_cut": "def test_206_unidirectional_split_paths(self):\n \n interface1 = Interface.objects.create(device=self.device, name='Interface 1')\n interface2 = Interface.objects.create(device=self.device, name='Interface 2')\n interface3 = Interface.objects.create(device=self.device, name='Interface 3')\n rearport1 = RearPort.objects.create(device=self.device, name='Rear Port 1', positions=4)\n frontport1_1 = FrontPort.objects.create(\n device=self.device, name='Front Port 1:1', rear_port=rearport1, rear_port_position=1\n )\n frontport1_2 = FrontPort.objects.create(\n device=self.device, name='Front Port 1:2', rear_port=rearport1, rear_port_position=2\n )\n\n # Create cables 1\n cable1 = Cable(terminations=[\n CableTermination(cable_end='A', termination=interface1),\n CableTermination(cable_end='B', termination=rearport1),\n ])\n cable1.save()\n self.assertPathExists(\n (interface1, cable1, rearport1),\n is_complete=False,\n is_split=True\n )\n self.assertEqual(CablePath.objects.count(), 1)\n\n # Create cables 2-3\n cable2 = Cable(terminations=[\n CableTermi" }, { "id": 109811, "commit_id": "352bb1fb5f30bfdda8c0240b463afef952944efd", "repo": "matplotlib", "path": "lib/matplotlib/pyplot.py", "file_name": "pyplot.py", "fun_name": "get_plot_commands", "commit_message": "Generalize validation that pyplot commands are documented\n\nUntil now, the test made some exclusions (_NON_PLOT_COMMANDS) and\nreqired all functions to be documented in a single autosummary block.\n\nThis change ensures the documentation of the _NON_PLOT_COMMANDS\nand it allows the commands to be spread across arbitrary many\nautosummary sections. This is in preparation of regrouping the pyplot\ncommands similar to the Axes documentation.\n\nThis also pending deprecates `pyplot.get_plot_commands`, which should\nnot be a public function. I'm defensive by using pending, because if\n`get_plot_commands` is used somewhere, that's most likely some\ndownstream lib and we want to give them time to adapt.\n\nCo-authored-by: hannah ", "code": "def get_plot_commands():\n \n NON_PLOT_COMMANDS = {\n 'connect', 'disconnect', 'get_current_fig_manager', 'ginput',\n 'new_figure_manager', 'waitforbuttonpress'}\n return (name for name in _get_pyplot_commands()\n if name not in NON_PLOT_COMMANDS)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 22, "vocab_size": 20, "complexity": 3, "nloc": 6, "token_counts": 35, "n_ast_nodes": 64, "n_identifiers": 4, "random_cut": "def get_plot_commands():\n \n NON_" }, { "id": 171100, "commit_id": "b7ea7c6dfd100c40b0bc45aacf6d92c5c22f2e63", "repo": "pandas", "path": "pandas/tests/frame/test_reductions.py", "file_name": "test_reductions.py", "fun_name": "test_any_all_np_func", "commit_message": "DEPR: Enforce deprecation of numeric_only=None in DataFrame aggregations (#49551)\n\n* WIP\r\n\r\n* DEPR: Enforce deprecation of numeric_only=None in DataFrame aggregations\r\n\r\n* Partial reverts\r\n\r\n* numeric_only in generic/series, fixup\r\n\r\n* cleanup\r\n\r\n* Remove docs warning\r\n\r\n* fixups\r\n\r\n* Fixups", "code": "def test_any_all_np_func(self, func, data, expected):\n # GH 19976\n data = DataFrame(data)\n\n if any(is_categorical_dtype(x) for x in data.dtypes):\n with pytest.raises(\n TypeError, match=\"dtype category does not support reduction\"\n ):\n func(data)\n\n # method version\n with pytest.raises(\n TypeError, match=\"dtype category does not support reduction\"\n ):\n getattr(DataFrame(data), func.__name__)(axis=None)\n else:\n result = func(data)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n\n # method version\n result = getattr(DataFrame(data), func.__name__)(axis=None)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 287, "n_words": 68, "vocab_size": 40, "complexity": 3, "nloc": 18, "token_counts": 136, "n_ast_nodes": 221, "n_identifiers": 22, "random_cut": "def test_any_all_np_func(self, func, data, expected):\n # GH 19976\n data = DataFrame(data)\n\n if any(is_categorical_dtype(x) for x in data.dtypes):\n with pytest.raises(\n " }, { "id": 186287, "commit_id": "d3c91075c658d1d366824c862f05449ad3f5016d", "repo": "textual", "path": "tests/test_segment_tools.py", "file_name": "test_segment_tools.py", "fun_name": "test_line_crop_highlight_reverse_bug", "commit_message": "Add test for line crop issue", "code": "def test_line_crop_highlight_reverse_bug():\n \n segments_joined = [Segment('a1あ11bcdaef123a1a')]\n segments_split = [Segment('a1あ11bcdaef'), Segment('1'), Segment('23a1a')]\n\n joined1 = \"\".join(seg.text for seg in line_crop(segments_split, start=9, end=16, total=23))\n joined2 = \"\".join(seg.text for seg in line_crop(segments_joined, start=9, end=16, total=23))\n\n assert joined1 == joined2\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 52, "n_words": 34, "vocab_size": 22, "complexity": 3, "nloc": 6, "token_counts": 93, "n_ast_nodes": 152, "n_identifiers": 13, "random_cut": "def test_line_crop_highlight_reverse_bug():\n \n segments_joined = [Segment('a1あ11bcdaef123a1a')]\n segments_split = [Segment('a1あ11bcdaef'), Segment('1'), Segment('23a1a')]\n\n joined1 = \"\".join(seg.text for seg in line_crop(segments_split, start=9, end=16, total=23))\n joined2 = \"\".join(seg.text for seg in line_crop(segments_joined, start=9, end=16, total=23))\n\n assert joined1 == joined2\n\n" }, { "id": 84320, "commit_id": "83383090f9461b81bf718afc449bc0b2196db0cd", "repo": "zulip", "path": "zerver/tests/test_message_send.py", "file_name": "test_message_send.py", "fun_name": "test_wildcard_mention_restrictions", "commit_message": "realm: Removed WILDCARD_MENTION_POLICY_STREAM_ADMINS option.\n\nThis commit removes WILDCARD_MENTION_POLICY_STREAM_ADMINS\noption of wildcard_mention_policy since we are not moving\nforward with stream administrator concept and instead working\non new permssions model as per #19525.\n\nWe also add a migration to change wildcard_mention_policy of\nexisting realms to WILDCARD_MENTION_POLICY_ADMINS. This change\nis fine since we were already treating both the setting values\nas same as stream admin concept was not implemented completely.", "code": "def test_wildcard_mention_restrictions(self) -> None:\n cordelia = self.example_user(\"cordelia\")\n iago = self.example_user(\"iago\")\n polonius = self.example_user(\"polonius\")\n shiva = self.example_user(\"shiva\")\n realm = cordelia.realm\n\n stream_name = \"test_stream\"\n self.subscribe(cordelia, stream_name)\n self.subscribe(iago, stream_name)\n self.subscribe(polonius, stream_name)\n self.subscribe(shiva, stream_name)\n\n do_set_realm_property(\n realm,\n \"wildcard_mention_policy\",\n Realm.WILDCARD_MENTION_POLICY_EVERYONE,\n acting_user=None,\n )\n self.send_and_verify_wildcard_mention_message(\"polonius\")\n\n do_set_realm_property(\n realm,\n \"wildcard_mention_policy\",\n Realm.WILDCARD_MENTION_POLICY_MEMBERS,\n acting_user=None,\n )\n self.send_and_verify_wildcard_mention_message(\"polonius\", test_fails=True)\n # There is no restriction on small streams.\n self.send_and_verify_wildcard_mention_message(\"polonius\", sub_count=10)\n self.send_and_verify_wildcard_mention_message(\"cordelia\")\n\n do_set_realm_property(\n realm,\n \"wildcard_mention_policy\",\n Realm.WILDCARD_MENTION_POLICY_FULL_MEMBERS,\n acting_user=None,\n )\n do_set_realm_property(realm, \"waiting_period_threshold\", 10, acting_user=None)\n iago.date_joined = timezone_now()\n iago.save()\n shiva.date_joined = timezone_now()\n shiva.save()\n cordelia.date_joined = timezone_now()\n cordelia.save()\n self.send_and_verify_wildcard_mention_message(\"cordelia\", test_fails=True)\n self.send_and_verify_wildcard_mention_message(\"cordelia\", sub_count=10)\n # Administrators and moderators can use wildcard mentions even if they are new.\n self.send_and_verify_wildcard_mention_message(\"iago\")\n self.send_and_verify_wildcard_mention_message(\"shiva\")\n\n cordelia.date_joined = timezone_now() - datetime.timedelta(days=11)\n cordelia.save()\n self.send_and_verify_wildcard_mention_message(\"cordelia\")\n\n do_set_realm_property(\n realm,\n \"wildcard_mention_policy\",\n Realm.WILDCARD_MENTION_POLICY_MODERATORS,\n acting_user=None,\n )\n self.send_and_verify_wildcard_mention_message(\"cordelia\", test_fails=True)\n self.send_and_verify_wildcard_mention_message(\"cordelia\", sub_count=10)\n self.send_and_verify_wildcard_mention_message(\"shiva\")\n\n cordelia.date_joined = timezone_now()\n cordelia.save()\n do_set_realm_property(\n realm, \"wildcard_mention_policy\", Realm.WILDCARD_MENTION_POLICY_ADMINS, acting_user=None\n )\n self.send_and_verify_wildcard_mention_message(\"shiva\", test_fails=True)\n # There is no restriction on small streams.\n self.send_and_verify_wildcard_mention_message(\"shiva\", sub_count=10)\n self.send_and_verify_wildcard_mention_message(\"iago\")\n\n do_set_realm_property(\n realm, \"wildcard_mention_policy\", Realm.WILDCARD_MENTION_POLICY_NOBODY, acting_user=None\n )\n self.send_and_verify_wildcard_mention_message(\"iago\", test_fails=True)\n self.send_and_verify_wildcard_mention_message(\"iago\", sub_count=10)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 716, "n_words": 148, "vocab_size": 77, "complexity": 1, "nloc": 78, "token_counts": 431, "n_ast_nodes": 659, "n_identifiers": 28, "random_cut": "def test_wildcard_mention_restrictions(self) -> None:\n cordelia = self.example_user(\"cordelia\")\n iago = self.example_user(\"iago\")\n polonius = self.example_user(\"polonius\")\n shiva = self.example_user(\"shiva\")\n realm = cordelia.realm\n\n stream_name = \"test_stream\"\n self.subscribe(cordelia, stream_name)\n self.subscribe(iago, stream_name)\n self.subscribe(polonius, stream_name)\n self.subscribe(shiva, stream_name)\n\n do_set_realm_property(\n realm,\n \"wildcard_mention_policy\",\n Realm.WILDCARD_MENTION_POLICY_EVERYONE,\n acting_user=None,\n )\n self.send_and_verify_wildcard_mention_message(\"polonius\")\n\n do_set_realm_property(\n realm,\n \"wildcard_mention_policy\",\n Realm.WILDCARD_MENTION_POLICY_MEMBERS,\n acting_user=None,\n )\n self.send_and_verify_wildcard_mention_message(\"polonius\", test_fails=True)\n # There is no restrict" }, { "id": 122348, "commit_id": "69525cd96dc3a55258aeabcd6624ddf909595198", "repo": "jax", "path": "jax/experimental/sparse/bcoo.py", "file_name": "bcoo.py", "fun_name": "_bcoo_to_elt", "commit_message": "[sparse] Make BCSR vmappable.\n\nPiperOrigin-RevId: 481257762", "code": "def _bcoo_to_elt(cont, _, val, axis):\n if axis is None:\n return val\n if axis >= val.n_batch:\n raise ValueError(f\"Cannot map in_axis={axis} for BCOO array with n_batch={val.n_batch}. \"\n \"in_axes for batched BCOO operations must correspond to a batch dimension.\")\n return BCOO((cont(val.data, axis), cont(val.indices, axis)),\n shape=val.shape[:axis] + val.shape[axis + 1:])\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 87, "n_words": 46, "vocab_size": 40, "complexity": 3, "nloc": 8, "token_counts": 75, "n_ast_nodes": 124, "n_identifiers": 11, "random_cut": "def _bcoo_to_elt(cont, _, val, axis):\n if axis is None:\n return val\n if axis >= val.n_batch:\n raise ValueError(f\"Cannot map in_axis={axis} for BCOO array with n_batch={" }, { "id": 181650, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/nn_tests.py", "file_name": "nn_tests.py", "fun_name": "test_nn_conf_dict", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_nn_conf_dict():\n \n clf = TPOTClassifier(config_dict=classifier_config_nn)\n assert clf.config_dict == classifier_config_nn\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 34, "n_identifiers": 5, "random_cut": "def test_nn_conf_dict():\n \n clf = TPOTClassifier(config_dict=classifier_config_nn)\n assert clf.config" }, { "id": 145458, "commit_id": "e85540a1a2fb2b5a121dfe54f45342a7046bc3d7", "repo": "ray", "path": "dashboard/modules/serve/tests/test_schema.py", "file_name": "test_schema.py", "fun_name": "test_valid_serve_application_status_schema", "commit_message": "[serve] Expose deployment statuses in REST API (#22611)", "code": "def test_valid_serve_application_status_schema(self):\n # Ensure a valid ServeApplicationStatusSchema can be generated\n\n serve_application_status_schema = {\n \"deployment_1\": {\"status\": \"HEALTHY\", \"message\": \"\"},\n \"deployment_2\": {\n \"status\": \"UNHEALTHY\",\n \"message\": \"this deployment is deeply unhealthy\",\n },\n }\n\n serve_application_status_to_schema(serve_application_status_schema)\n\n\n# This function is defined globally to be accessible via import path", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 132, "n_words": 43, "vocab_size": 38, "complexity": 1, "nloc": 9, "token_counts": 38, "n_ast_nodes": 78, "n_identifiers": 4, "random_cut": "def test_valid_serve_application_status_schema(self):\n # Ensure a valid ServeApplicationStatusSchema can be generated\n\n serve_application_status_schema = {\n \"deployment_1\": {\"status\": \"HEALTHY\", \"message\": \"\"},\n \"deployment_2\": {\n \"status\": \"UNHEALTHY\",\n " }, { "id": 221672, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/configparser.py", "file_name": "configparser.py", "fun_name": "readfp", "commit_message": "add python 3.10.4 for windows", "code": "def readfp(self, fp, filename=None):\n \n warnings.warn(\n \"This method will be removed in Python 3.12. \"\n \"Use 'parser.read_file()' instead.\",\n DeprecationWarning, stacklevel=2\n )\n self.read_file(fp, source=filename)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 83, "n_words": 22, "vocab_size": 22, "complexity": 1, "nloc": 7, "token_counts": 35, "n_ast_nodes": 58, "n_identifiers": 10, "random_cut": "def readfp(self, fp, filename=None):\n \n warnings.warn(\n \"This method will be removed in Python 3.12. \"\n \"Use 'parser.read_file()' instead.\",\n DeprecationWarning, stacklevel=2\n )\n self.read_file(fp, source=filename)\n" }, { "id": 8959, "commit_id": "4b3c8211b3e3eca5f9fdf6553bbd45c9c7587b0d", "repo": "insightface", "path": "body/human_pose/ambiguity_aware/lib/utils/misc.py", "file_name": "misc.py", "fun_name": "process_dataset_for_video", "commit_message": "update", "code": "def process_dataset_for_video(path, is_mpi=False):\n # add some content for specified dataset(h5)\n f = h5py.File(path, \"a\")\n imagenames = [name.decode() for name in f['imagename'][:]]\n seqnames = ['/'.join(name.split('/')[:-1]) for name in imagenames]\n if is_mpi: \n indices_in_seq_ref = [int(name.split('/')[-1].split('.')[0].split('_')[1]) for name in imagenames]\n # reset indices \n indices_in_seq = []\n i = 0 \n last_seqname = None\n for index, seqname in zip(indices_in_seq_ref, seqnames): \n if last_seqname is not None and seqname != last_seqname: \n i = 0 \n last_seqname = seqname \n indices_in_seq.append(i)\n i += 1\n # indices_in_seq = [i for i, index in enumerate(indices_in_seq)]\n else: \n indices_in_seq = [int(name.split('/')[-1]) for name in imagenames]\n f['index_in_seq'] = indices_in_seq\n f['seqname'] = [name.encode() for name in seqnames]\n seq_lens = {}\n for seqname in seqnames: \n if seqname not in seq_lens: \n seq_lens[seqname] = 0 \n seq_lens[seqname] += 1\n\n f['seqlen'] = [seq_lens[seqname] for seqname in seqnames]\n f.close()\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 313, "n_words": 127, "vocab_size": 64, "complexity": 13, "nloc": 26, "token_counts": 241, "n_ast_nodes": 395, "n_identifiers": 24, "random_cut": "def process_dataset_for_video(path, is_mpi=False):\n # add some content for specified dataset(h5)\n f = h5py.File(path, \"a\")\n imagenames = [name.decode() for name in f['imagename'][:]]\n seqnames = ['/'.join(name.split('/')[:-1]) for name in imagenames]\n if is_mpi: \n indices_in_seq_ref = [int(name.split('/')[-1].split('.')[0].split('_')[1]) for name in imagenames]\n # reset indices \n indices_in_seq = []\n i = 0 \n last_seqname = None\n for index, seqname in zip(indices_in_seq_ref, seqnames): \n if last_seqname is not None and seqname != last_seqname: \n i = 0 \n last_seqname = seqname \n indices_in_seq.append(i)\n i += 1\n # indices_in_seq = [i for i, index in enumerate(indices_in_seq)]\n else: \n indices_in_seq = [int(name.split('/')[-1]) for name in imagenames]\n f['index_in_seq'] = indi" }, { "id": 54948, "commit_id": "37549d157007f6eef07ed8b1e2e14efb73134840", "repo": "prefect", "path": "tests/orion/api/test_flows.py", "file_name": "test_flows.py", "fun_name": "test_read_flow_by_name_returns_404_if_does_not_exist", "commit_message": "Use status constants instead of hardcoded values\n\nCloses: PrefectHQ/orion#1673", "code": "async def test_read_flow_by_name_returns_404_if_does_not_exist(self, client):\n response = await client.get(f\"/flows/{uuid4()}\")\n assert response.status_code == status.HTTP_404_NOT_FOUND\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 25, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 49, "n_identifiers": 9, "random_cut": "async def test_read_flow_by_name_returns_404_if_does_not_exist(self, client):\n response = await client.get(f\"/flows/{uuid4()}\")\n assert response.status_code == status.HTTP_404_NOT_FOUND\n" }, { "id": 156932, "commit_id": "b016998fa931f644df4d266a3ed5e7604c20d2a9", "repo": "dask", "path": "dask/array/einsumfuncs.py", "file_name": "einsumfuncs.py", "fun_name": "parse_einsum_input", "commit_message": "Removed unused loop control variables (`B007`) (#9458)\n\nCo-authored-by: James Bourbeau ", "code": "def parse_einsum_input(operands):\n \n\n if len(operands) == 0:\n raise ValueError(\"No input operands\")\n\n if isinstance(operands[0], basestring):\n subscripts = operands[0].replace(\" \", \"\")\n operands = [asarray(o) for o in operands[1:]]\n\n # Ensure all characters are valid\n for s in subscripts:\n if s in \".,->\":\n continue\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n\n else:\n tmp_operands = list(operands)\n operand_list = []\n subscript_list = []\n for _ in range(len(operands) // 2):\n operand_list.append(tmp_operands.pop(0))\n subscript_list.append(tmp_operands.pop(0))\n\n output_list = tmp_operands[-1] if len(tmp_operands) else None\n operands = [asarray(v) for v in operand_list]\n subscripts = \"\"\n last = len(subscript_list) - 1\n for num, sub in enumerate(subscript_list):\n for s in sub:\n if s is Ellipsis:\n subscripts += \"...\"\n elif isinstance(s, int):\n subscripts += einsum_symbols[s]\n else:\n raise TypeError(\n \"For this input type lists must contain \"\n \"either int or Ellipsis\"\n )\n if num != last:\n subscripts += \",\"\n\n if output_list is not None:\n subscripts += \"->\"\n for s in output_list:\n if s is Ellipsis:\n subscripts += \"...\"\n elif isinstance(s, int):\n subscripts += einsum_symbols[s]\n else:\n raise TypeError(\n \"For this input type lists must contain \"\n \"either int or Ellipsis\"\n )\n # Check for proper \"->\"\n if (\"-\" in subscripts) or (\">\" in subscripts):\n invalid = (subscripts.count(\"-\") > 1) or (subscripts.count(\">\") > 1)\n if invalid or (subscripts.count(\"->\") != 1):\n raise ValueError(\"Subscripts can only contain one '->'.\")\n\n # Parse ellipses\n if \".\" in subscripts:\n used = subscripts.replace(\".\", \"\").replace(\",\", \"\").replace(\"->\", \"\")\n unused = list(einsum_symbols_set - set(used))\n ellipse_inds = \"\".join(unused)\n longest = 0\n\n if \"->\" in subscripts:\n input_tmp, output_sub = subscripts.split(\"->\")\n split_subscripts = input_tmp.split(\",\")\n out_sub = True\n else:\n split_subscripts = subscripts.split(\",\")\n out_sub = False\n\n for num, sub in enumerate(split_subscripts):\n if \".\" in sub:\n if (sub.count(\".\") != 3) or (sub.count(\"...\") != 1):\n raise ValueError(\"Invalid Ellipses.\")\n\n # Take into account numerical values\n if operands[num].shape == ():\n ellipse_count = 0\n else:\n ellipse_count = max(operands[num].ndim, 1)\n ellipse_count -= len(sub) - 3\n\n if ellipse_count > longest:\n longest = ellipse_count\n\n if ellipse_count < 0:\n raise ValueError(\"Ellipses lengths do not match.\")\n elif ellipse_count == 0:\n split_subscripts[num] = sub.replace(\"...\", \"\")\n else:\n rep_inds = ellipse_inds[-ellipse_count:]\n split_subscripts[num] = sub.replace(\"...\", rep_inds)\n\n subscripts = \",\".join(split_subscripts)\n if longest == 0:\n out_ellipse = \"\"\n else:\n out_ellipse = ellipse_inds[-longest:]\n\n if out_sub:\n subscripts += \"->\" + output_sub.replace(\"...\", out_ellipse)\n else:\n # Special care for outputless ellipses\n output_subscript = \"\"\n tmp_subscripts = subscripts.replace(\",\", \"\")\n for s in sorted(set(tmp_subscripts)):\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n if tmp_subscripts.count(s) == 1:\n output_subscript += s\n normal_inds = \"\".join(sorted(set(output_subscript) - set(out_ellipse)))\n\n subscripts += \"->\" + out_ellipse + normal_inds\n\n # Build output string if does not exist\n if \"->\" in subscripts:\n input_subscripts, output_subscript = subscripts.split(\"->\")\n else:\n input_subscripts = subscripts\n # Build output subscripts\n tmp_subscripts = subscripts.replace(\",\", \"\")\n output_subscript = \"\"\n for s in sorted(set(tmp_subscripts)):\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n if tmp_subscripts.count(s) == 1:\n output_subscript += s\n\n # Make sure output subscripts are in the input\n for char in output_subscript:\n if char not in input_subscripts:\n raise ValueError(\"Output character %s did not appear in the input\" % char)\n\n # Make sure number operands is equivalent to the number of terms\n if len(input_subscripts.split(\",\")) != len(operands):\n raise ValueError(\n \"Number of einsum subscripts must be equal to the number of operands.\"\n )\n\n return (input_subscripts, output_subscript, operands)\n\n\n@derived_from(np)", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@derived_from(np)", "n_ast_errors": 1, "ast_levels": 19, "n_whitespaces": 1923, "n_words": 534, "vocab_size": 224, "complexity": 46, "nloc": 117, "token_counts": 785, "n_ast_nodes": 1377, "n_identifiers": 57, "random_cut": "def parse_einsum_input(operands):\n \n\n if len(operands) == 0:\n raise ValueError(\"No input operands\")\n\n if isinstance(operands[0], basestring):\n subscripts = operands[0].replace(\" \", \"\")\n operands = [asarray(o) for o in operands[1:]]\n\n # Ensure all characters are valid\n for s in subscripts:\n if s in \".,->\":\n continue\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n\n else:\n tmp_operands = list(operands)\n operand_list = []\n subscript_list = []\n for _ in range(len(operands) // 2):\n operand_list.append(tmp_operands.pop(0))\n subscript_list.append(tmp_operands.pop(0))\n\n output_list = tmp_operands[-1] if len(tmp_operands) else None\n operands = [asarray(v) for v in operand_list]\n subscripts = \"\"\n last = len(subscript_list) - 1\n for num, sub in enumerate(subscript_list):\n for s in sub:\n if s is Ellipsis:\n subscripts += \"...\"\n elif isinstance(s, int):\n subscripts += einsum_symbols[s]\n else:\n raise TypeError(\n \"For this input type lists must contain \"\n \"either int or Ellipsis\"\n )\n if num != last:\n subscripts += \",\"\n\n if output_list is not None:\n subscripts += \"->\"\n for s in output_list:\n if s is Ellipsis:\n subscripts += \"...\"\n elif isinstance(s, int):\n subscripts += einsum_symbols[s]\n else:\n raise TypeError(\n \"For this input type lists must contain \"\n \"either int or Ellipsis\"\n )\n # Check for proper \"->\"\n if (\"-\" in subscripts) or (\">\" in subscripts):\n invalid = (subscripts.count(\"-\") > 1) or (subscripts.count(\">\") > 1)\n if invalid or (subscripts.count(\"->\") != 1):\n raise ValueError(\"Subscripts can only contain one '->'.\")\n\n # Parse ellipses\n if \".\" in subscripts:\n used = subscripts.replace(\".\", \"\").replace(\",\", \"\").replace(\"->\", \"\")\n unused = list(einsum_symbols_set - set(used))\n ellipse_inds = \"\".join(unused)\n longest = 0\n\n if \"->\" in subscripts:\n input_tmp, output_sub = subscripts.split(\"->\")\n split_subscripts = input_tmp.split(\",\")\n out_sub = True\n else:\n split_subscripts = subscripts.split(\",\")\n out_sub = False\n\n for num, sub in enumerate(split_subscripts):\n if \".\" in sub:\n if (sub.count(\".\") != 3) or (sub.count(\"...\") != 1):\n raise ValueError(\"Invalid Ellipses.\")\n\n # Take into account numerical values\n if operands[num].shape == ():\n ellipse_count = 0\n else:\n ellipse_count = max(operands[num].ndim, 1)\n ellipse_count -= len(sub) - 3\n\n if ellipse_count > longest:\n longest = ellipse_count\n\n if ellipse_count < 0:\n raise ValueError(\"Ellipses lengths do not match.\")\n elif ellipse_count == 0:\n split_subscripts[num] = sub.replace(\"...\", \"\")\n else:\n rep_inds = ellipse_inds[-ellipse_count:]\n split_subscripts[num] = sub.replace(\"...\", rep_inds)\n\n subscripts = \",\".join(split_subscripts)\n if longest == 0:\n out_ellipse = \"\"\n else:\n out_ellipse = ellipse_inds[-longest:]\n\n if out_sub:\n subscripts += \"->\" + output_sub.replace(\"...\", out_ellipse)\n else:\n # Special care for outputless ellipses\n output_subscript = \"\"\n tmp_subscripts = subscripts.replace(\",\", \"\")\n for s in sorted(set(tmp_subscripts)):\n if s not in einsum_symbols_set:\n raise ValueError(\"Character %s is not a valid symbol.\" % s)\n if tmp_subscripts.count(s) == 1:\n " }, { "id": 147078, "commit_id": "aaf47b2493beb985bfbc52dbdf1f52fc48377d74", "repo": "ray", "path": "python/ray/serve/tests/test_application.py", "file_name": "test_application.py", "fun_name": "test_basic_run", "commit_message": "[serve] Implement `serve.run()` and `Application` (#23157)\n\nThese changes expose `Application` as a public API. They also introduce a new public method, `serve.run()`, which allows users to deploy their `Applications` or `DeploymentNodes`. Additionally, the Serve CLI's `run` command and Serve's REST API are updated to use `Applications` and `serve.run()`.\r\n\r\nCo-authored-by: Edward Oakes ", "code": "def test_basic_run(self, serve_instance):\n \n\n deployments = [self.f, self.g, self.C, self.D]\n responses = [\"f reached\", \"g reached\", \"C reached\", \"D reached\"]\n\n self.deploy_and_check_responses(deployments, responses)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 4, "token_counts": 46, "n_ast_nodes": 75, "n_identifiers": 10, "random_cut": "def test_basic_run(self, serve_instance):\n \n\n deployments = [self.f, self.g, self.C, self.D]\n responses = [\"f reached\", \"g reached\", \"C reached\", \"D reached\"]\n\n " }, { "id": 280445, "commit_id": "0d37837d448a3b9202d5c4c9928ef40940578719", "repo": "keras", "path": "keras/saving/experimental/saving_lib_test.py", "file_name": "saving_lib_test.py", "fun_name": "test_load_model_api_endpoint", "commit_message": "Keras Model saving - Use GFile handle for python zipfile when loading and saving model.\n\nPiperOrigin-RevId: 486753122", "code": "def test_load_model_api_endpoint(self):\n temp_filepath = Path(os.path.join(self.get_temp_dir(), \"mymodel.keras\"))\n model = self._get_functional_model()\n ref_input = np.random.random((10, 32))\n ref_output = model.predict(ref_input)\n model.save(temp_filepath, save_format=\"keras_v3\")\n model = keras.models.load_model(temp_filepath)\n self.assertAllClose(model.predict(ref_input), ref_output, atol=1e-6)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 72, "n_words": 24, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 92, "n_ast_nodes": 145, "n_identifiers": 22, "random_cut": "def test_load_model_api_endpoint(self):\n temp_filepath = Path(os.path.join(self.get_temp_dir(), \"mymodel.keras\"))\n model = self._get_functiona" }, { "id": 294148, "commit_id": "43772b3fa9db00d146292854ee3b52392a29dd37", "repo": "core", "path": "homeassistant/components/minecraft_server/__init__.py", "file_name": "__init__.py", "fun_name": "_async_status_request", "commit_message": "Add World Message/MOTD support for MinecraftServer Integration (#66297)", "code": "async def _async_status_request(self) -> None:\n \n try:\n status_response = await self._hass.async_add_executor_job(\n self._mc_status.status, self._MAX_RETRIES_STATUS\n )\n\n # Got answer to request, update properties.\n self.version = status_response.version.name\n self.protocol_version = status_response.version.protocol\n self.players_online = status_response.players.online\n self.players_max = status_response.players.max\n self.latency_time = status_response.latency\n self.motd = (status_response.description).get(\"text\")\n self.players_list = []\n if status_response.players.sample is not None:\n for player in status_response.players.sample:\n self.players_list.append(player.name)\n self.players_list.sort()\n\n # Inform user once about successful update if necessary.\n if self._last_status_request_failed:\n _LOGGER.info(\n \"Updating the properties of '%s:%s' succeeded again\",\n self.host,\n self.port,\n )\n self._last_status_request_failed = False\n except OSError as error:\n # No answer to request, set all properties to unknown.\n self.version = None\n self.protocol_version = None\n self.players_online = None\n self.players_max = None\n self.latency_time = None\n self.players_list = None\n self.motd = None\n\n # Inform user once about failed update if necessary.\n if not self._last_status_request_failed:\n _LOGGER.warning(\n \"Updating the properties of '%s:%s' failed - OSError: %s\",\n self.host,\n self.port,\n error,\n )\n self._last_status_request_failed = True\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 694, "n_words": 141, "vocab_size": 79, "complexity": 6, "nloc": 40, "token_counts": 221, "n_ast_nodes": 358, "n_identifiers": 35, "random_cut": "async def _async_status_request(self) -> None:\n \n try:\n status_response = await self._hass.async_add_executor_job(\n self._mc_status.status, self._MAX_RETRIES_STATUS\n )\n\n # Got answer to request, update properties.\n self.version = status_response.version.name\n self.protocol_version = status_response.version.protocol\n self.players_online = status_response.players.online\n self.players_max = status_response.players.max\n self.latency_time = status_response.latency\n self.motd = (status_response.description).get(\"text\")\n self.players_list = []\n if status_response.players.sample is not None:\n for player in status_response.players.sample:\n self.players_list.append(player.name)\n self.players_list.sort()\n\n # Inform user once about successful update if necessary.\n if self._last_status_request_failed:\n _LOGGER.info(\n \"Updating the properties of '%s:%s' succeeded again\",\n self.host,\n self.port,\n )\n self._last_status_request_failed = False\n except OSError as error:\n # No answer to request, set all properties to unknown.\n self.version = None\n self.protocol_version = None\n self.players_online = None\n self.players_max = None\n self.latency_time = None\n self.players_list = None\n self.motd = None\n\n # Inform user once about failed update if necessary.\n if not self._last_status_request_failed:\n _LOGGER.warning(\n \"Updating the properties of '%s:%s' failed - OSError: %s\",\n self.host,\n self.port,\n error,\n )\n " }, { "id": 207846, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_redirect_on_add_view_continue_button", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_redirect_on_add_view_continue_button(self):\n \n response = self.client.post(\n reverse(\"admin:admin_views_modelwithstringprimarykey_add\"),\n {\n \"string_pk\": \"123/history\",\n \"_continue\": \"1\", # Save and continue editing\n },\n )\n\n self.assertEqual(response.status_code, 302) # temporary redirect\n self.assertIn(\"/123_2Fhistory/\", response.headers[\"location\"]) # PK is quoted\n\n\n@override_settings(ROOT_URLCONF=\"admin_views.urls\")", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@override_settings(ROOT_URLCONF=\"admin_views.urls\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 130, "n_words": 30, "vocab_size": 28, "complexity": 1, "nloc": 10, "token_counts": 54, "n_ast_nodes": 114, "n_identifiers": 12, "random_cut": "def test_redirect_on_add_view_continue_button(self):\n \n response = self.client.post(\n reverse(\"admin:admin_views_modelwithstringprimarykey_add\"),\n {\n \"string_pk\": \"123/history\",\n \"_continue\": \"1\", # Save and continue editing\n },\n )\n\n self.assertEqual(response.status_code, 302) # temporary redirect\n self.assertIn(\"/123_2Fhistory/\", response.headers[\"locatio" }, { "id": 225721, "commit_id": "c3cb70a39473ed8a8601758f0cd3e67c6e1d076c", "repo": "albumentations", "path": "albumentations/augmentations/transforms.py", "file_name": "transforms.py", "fun_name": "apply", "commit_message": "Move common functions to utils.py (#1260)\n\n* Move common functions into util.py\r\n\r\n* Fix mypy errors", "code": "def apply(self, image, **params):\n if not is_rgb_image(image):\n raise TypeError(\"ToSepia transformation expects 3-channel images.\")\n return F.linear_transformation_rgb(image, self.sepia_transformation_matrix)\n", "url": "https://github.com/albumentations-team/albumentations.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 40, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 4, "token_counts": 33, "n_ast_nodes": 53, "n_identifiers": 9, "random_cut": "def apply(self, image, **params):\n if not is_rgb_image(image):\n raise TypeError(\"ToSepia t" }, { "id": 140038, "commit_id": "ec89fe52033e8087d0ca4e5bb9331863d0bb3a52", "repo": "ray", "path": "rllib/agents/dqn/tests/test_apex_dqn.py", "file_name": "test_apex_dqn.py", "fun_name": "test_apex_zero_workers", "commit_message": "[RLlib] APEX-DQN and R2D2 config objects. (#25067)", "code": "def test_apex_zero_workers(self):\n config = (\n apex.ApexConfig()\n .rollouts(num_rollout_workers=0)\n .resources(num_gpus=0)\n .training(\n replay_buffer_config={\n \"learning_starts\": 1000,\n },\n optimizer={\n \"num_replay_buffer_shards\": 1,\n },\n )\n .reporting(\n min_sample_timesteps_per_reporting=100,\n min_time_s_per_reporting=1,\n )\n )\n\n for _ in framework_iterator(config):\n trainer = config.build(env=\"CartPole-v0\")\n results = trainer.train()\n check_train_results(results)\n print(results)\n trainer.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 316, "n_words": 36, "vocab_size": 31, "complexity": 2, "nloc": 24, "token_counts": 100, "n_ast_nodes": 163, "n_identifiers": 25, "random_cut": "def test_apex_zero_workers(self):\n config = (\n apex.ApexConfig()\n .rollouts(num_rollout_workers=0)\n .resources(num_gpus=0)\n .training(\n replay_buffer_config={\n \"learning_starts\": 1000,\n },\n optimizer={\n \"num_replay_buffer_shards\": 1,\n },\n )\n .reporting(\n min_sample_timesteps_per_reporting=100,\n min_time_s_per_reporting=1,\n )\n " }, { "id": 291473, "commit_id": "19abba7f6ba24fe746889e33c5364702a62946bf", "repo": "core", "path": "homeassistant/components/onkyo/media_player.py", "file_name": "media_player.py", "fun_name": "_parse_audio_information", "commit_message": "Use _attr in onkyo media player (#82832)", "code": "def _parse_audio_information(self, audio_information_raw):\n values = _parse_onkyo_payload(audio_information_raw)\n if values is False:\n self._audio_info_supported = False\n return\n\n if values:\n info = {\n \"format\": _tuple_get(values, 1),\n \"input_frequency\": _tuple_get(values, 2),\n \"input_channels\": _tuple_get(values, 3),\n \"listening_mode\": _tuple_get(values, 4),\n \"output_channels\": _tuple_get(values, 5),\n \"output_frequency\": _tuple_get(values, 6),\n }\n self._attr_extra_state_attributes[ATTR_AUDIO_INFORMATION] = info\n else:\n self._attr_extra_state_attributes.pop(ATTR_AUDIO_INFORMATION, None)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 227, "n_words": 44, "vocab_size": 33, "complexity": 3, "nloc": 17, "token_counts": 105, "n_ast_nodes": 166, "n_identifiers": 11, "random_cut": "def _parse_audio_information(self, audio_information_raw):\n values = _parse_onkyo_payload(audio_information_raw)\n if values is False:\n self._audio_info_supported = False\n return\n\n if values:\n info = {\n \"format\": _tuple_get(values, 1),\n \"input_frequency\": _tuple_get(values, 2),\n \"i" }, { "id": 47808, "commit_id": "91b82763c5c17e8ab021f2d4f2a5681ea90adf6b", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_process_executor_events", "commit_message": "Fix TI failure handling when task cannot be unmapped. (#23119)\n\nAt first glance this looks like a lot of un-related changed, but it is\r\nall related to handling errors in unmapping:\r\n\r\n- Ensure that SimpleTaskInstance (and thus the Zombie callback) knows\r\n about map_index, and simplify the code for SimpleTaskInstance -- no\r\n need for properties, just attributes works.\r\n\r\n- Be able to create a TaskFail from a TI, not a Task.\r\n\r\n This is so that we can create the TaskFail with the mapped task so we\r\n can delay unmapping the task in TI.handle_failure as long as possible.\r\n\r\n- Change email_alert and get_email_subject_content to take the task so\r\n we can pass the unmapped Task around.", "code": "def test_process_executor_events(self, mock_stats_incr, mock_task_callback, dag_maker):\n dag_id = \"test_process_executor_events\"\n task_id_1 = 'dummy_task'\n\n session = settings.Session()\n with dag_maker(dag_id=dag_id, fileloc='/test_path1/'):\n task1 = EmptyOperator(task_id=task_id_1)\n ti1 = dag_maker.create_dagrun().get_task_instance(task1.task_id)\n\n mock_stats_incr.reset_mock()\n\n executor = MockExecutor(do_update=False)\n task_callback = mock.MagicMock()\n mock_task_callback.return_value = task_callback\n self.scheduler_job = SchedulerJob(executor=executor)\n self.scheduler_job.processor_agent = mock.MagicMock()\n ti1.state = State.QUEUED\n session.merge(ti1)\n session.commit()\n\n executor.event_buffer[ti1.key] = State.FAILED, None\n\n self.scheduler_job._process_executor_events(session=session)\n ti1.refresh_from_db(session=session)\n assert ti1.state == State.FAILED\n self.scheduler_job.executor.callback_sink.send.assert_not_called()\n self.scheduler_job.processor_agent.reset_mock()\n\n # ti in success state\n ti1.state = State.SUCCESS\n session.merge(ti1)\n session.commit()\n executor.event_buffer[ti1.key] = State.SUCCESS, None\n\n self.scheduler_job._process_executor_events(session=session)\n ti1.refresh_from_db(session=session)\n assert ti1.state == State.SUCCESS\n self.scheduler_job.executor.callback_sink.send.assert_not_called()\n mock_stats_incr.assert_has_calls(\n [\n mock.call('scheduler.tasks.killed_externally'),\n mock.call('operator_failures_EmptyOperator'),\n mock.call('ti_failures'),\n ],\n any_order=True,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 390, "n_words": 85, "vocab_size": 57, "complexity": 1, "nloc": 38, "token_counts": 288, "n_ast_nodes": 471, "n_identifiers": 45, "random_cut": "def test_process_executor_events(self, mock_stats_incr, mock_task_callback, dag_maker):\n dag_id = \"test_process_executor_events\"\n task_id_1 = 'dummy_task'\n\n session = settings.Session()\n with dag_maker(dag_id=dag_id, fileloc='/test_path1/'):\n task1 = EmptyOperator(task_id=task_id_1)\n ti1 = dag_maker.create_dagrun().get_task_instance(task1.task_id)\n\n mock_stats_incr.reset_mock()\n\n executor = MockExecutor(do_update=False)\n task_callback = mock.MagicMock()\n mock_task_callback.return_value = task_callback\n self.scheduler_job = SchedulerJob(executor=executor)\n self.scheduler_job.processor_agent = mock.MagicMock()\n ti1.state = State.QUEUED\n session.merge(ti1)\n session.commit()\n\n executor.event_buffer[ti1.key] = State.FAILED, None\n\n self.scheduler_job._process_executor_events(session=session)\n ti1.refresh_from_db(session=session)\n assert ti1.state == State.FAILED\n self.scheduler_job.executor.callback_sink.send.assert_not_called()\n self.scheduler_job.processor_agent.reset_mock()\n\n # ti in success state\n ti1.state = State.SUCCESS\n session.merge(ti1)\n session.commit()\n executor.event_" }, { "id": 94529, "commit_id": "23d8888328564e6a86d1bfe0c36aea6f6f084f6a", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_mep.py", "file_name": "test_organization_events_mep.py", "fun_name": "test_has_transaction", "commit_message": "feat(mep): For the transaction column treat unparam/null the same (#37678)\n\n* feat(mep): For the transaction column treat unparam/null the same\r\n\r\n- This causes the query builder to transform the transaction tag so that\r\n the values `<< unparameterized >>` and null (or empty value) both\r\n become `<< unparameterized >>` so that null never shows up\r\n- This causes `!has:transaction` to raise a 400, since there won't ever\r\n be a result\r\n - TODO: probably should move this logic to transactions later\r\n- This causes `has:transaction` to be ignored since \"all transactions\"\r\n will have a transaction name now\r\n\r\n* fix: Check type before accessing properties\r\n\r\n* ref: Move txn specific stuff out of the builder", "code": "def test_has_transaction(self):\n self.store_transaction_metric(\n 1,\n tags={},\n timestamp=self.min_ago,\n )\n\n self.store_transaction_metric(\n 100,\n tags={\"transaction\": \"foo_transaction\"},\n timestamp=self.min_ago,\n )\n\n query = {\n \"project\": [self.project.id],\n \"orderby\": \"p50(transaction.duration)\",\n \"field\": [\n \"transaction\",\n \"p50(transaction.duration)\",\n ],\n \"query\": \"has:transaction\",\n \"statsPeriod\": \"24h\",\n \"dataset\": \"metricsEnhanced\",\n \"per_page\": 50,\n }\n\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert len(response.data[\"data\"]) == 2\n data = response.data[\"data\"]\n meta = response.data[\"meta\"]\n\n assert data[0][\"transaction\"] == \"<< unparameterized >>\"\n assert data[0][\"p50(transaction.duration)\"] == 1\n assert data[1][\"transaction\"] == \"foo_transaction\"\n assert data[1][\"p50(transaction.duration)\"] == 100\n assert meta[\"isMetricsData\"]\n\n query = {\n \"project\": [self.project.id],\n \"orderby\": \"p50(transaction.duration)\",\n \"field\": [\n \"transaction\",\n \"p50(transaction.duration)\",\n ],\n \"query\": \"!has:transaction\",\n \"statsPeriod\": \"24h\",\n \"dataset\": \"metricsEnhanced\",\n \"per_page\": 50,\n }\n\n response = self.do_request(query)\n assert response.status_code == 400, response.content\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 542, "n_words": 101, "vocab_size": 56, "complexity": 1, "nloc": 47, "token_counts": 239, "n_ast_nodes": 414, "n_identifiers": 16, "random_cut": "def test_has_transaction(self):\n self.store_transaction_metric(\n 1,\n tags={},\n timestamp=self.min_ago,\n )\n\n self.store_transaction_metric(\n 100,\n tags={\"transaction\": \"foo_transaction\"},\n timestamp=self.min_ago,\n )\n\n query = {\n \"project\": [self.project.id],\n \"orderby\": \"p50(transaction.duration)\",\n \"field\": [\n \"transaction\",\n \"p50(transaction.duration)\",\n ],\n \"query\": \"has:transaction\",\n \"statsPeriod\": \"24h\",\n \"dataset\": \"metricsEnhanced\",\n \"per_page\": 50,\n }\n\n response = self.do_request(query)\n assert response.status_code == 200, response.content\n assert len(response.data[\"data\"]) == 2\n data = response.data[\"data\"]\n meta = response.data[\"meta\"]\n\n assert data[0][\"transaction\"] == \"<< unparameterized >>\"\n assert data[0][\"p50(transaction.duration)\"] == 1\n assert data[1][\"transaction\"] == \"foo_transaction\"\n assert data[1][\"p50(transaction.duration)\"] == 100\n assert meta[\"isMetricsData\"]\n\n query = {\n \"project\": [self.project.id],\n \"orderby\": \"p50(transaction.duration)\",\n \"field\": [\n \"transaction\",\n \"p50(transaction.duration)\",\n ],\n \"query\": \"!has:transaction\",\n \"statsPeriod\": \"24h\",\n \"dataset\": \"metricsEnhanced\",\n \"per_page\": 50,\n }\n\n response = self.do_request(query)\n assert response.status_code == 4" }, { "id": 266478, "commit_id": "e9ffcf3c85f2fa40a20ee03bd9c1ce7296574cd1", "repo": "ansible", "path": "test/lib/ansible_test/_internal/completion.py", "file_name": "completion.py", "fun_name": "windows_completion", "commit_message": "ansible-test - Defer loading of completion entries. (#76852)\n\n* ansible-test - Defer loading of completion entries.\r\n\r\nThis avoids a traceback when running ansible-test outside of a supported directory.", "code": "def windows_completion(): # type: () -> t.Dict[str, WindowsRemoteCompletionConfig]\n \n return load_completion('windows', WindowsRemoteCompletionConfig)\n\n\n@cache", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "@cache", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 18, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 29, "n_identifiers": 4, "random_cut": "def windows_completion(): # type: () -> t.Dict[str, WindowsRemoteCompletionConfig]\n \n return load_completion('windows'" }, { "id": 208404, "commit_id": "ae73d5d777feefb4044bc37bbe618cad242202f8", "repo": "celery", "path": "celery/concurrency/asynpool.py", "file_name": "asynpool.py", "fun_name": "unpack_from", "commit_message": "[pre-commit.ci] pre-commit autoupdate (#7927)\n\n* [pre-commit.ci] pre-commit autoupdate\r\n\r\nupdates:\r\n- [github.com/asottile/pyupgrade: v3.2.2 → v3.3.1](https://github.com/asottile/pyupgrade/compare/v3.2.2...v3.3.1)\r\n- [github.com/PyCQA/flake8: 5.0.4 → 6.0.0](https://github.com/PyCQA/flake8/compare/5.0.4...6.0.0)\r\n- [github.com/pre-commit/pre-commit-hooks: v4.3.0 → v4.4.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.3.0...v4.4.0)\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def unpack_from(fmt, iobuf, unpack=unpack):\n return unpack(fmt, iobuf.getvalue()) # <-- BytesIO\n\n__all__ = ('AsynPool',)\n\nlogger = get_logger(__name__)\nerror, debug = logger.error, logger.debug\n\nUNAVAIL = frozenset({errno.EAGAIN, errno.EINTR})\n\n#: Constant sent by child process when started (ready to accept work)\nWORKER_UP = 15\n\n#: A process must've started before this timeout (in secs.) expires.\nPROC_ALIVE_TIMEOUT = 4.0\n\nSCHED_STRATEGY_FCFS = 1\nSCHED_STRATEGY_FAIR = 4\n\nSCHED_STRATEGIES = {\n None: SCHED_STRATEGY_FAIR,\n 'default': SCHED_STRATEGY_FAIR,\n 'fast': SCHED_STRATEGY_FCFS,\n 'fcfs': SCHED_STRATEGY_FCFS,\n 'fair': SCHED_STRATEGY_FAIR,\n}\nSCHED_STRATEGY_TO_NAME = {v: k for k, v in SCHED_STRATEGIES.items()}\n\nAck = namedtuple('Ack', ('id', 'fd', 'payload'))\n\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 97, "n_words": 89, "vocab_size": 73, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 208, "n_identifiers": 27, "random_cut": "def unpack_from(fmt, iobuf, unpack=unpack):\n return unpack(fmt, iobuf.getvalue()) # <-- BytesIO\n\n__all__ = ('AsynPool',)\n\nlogger = get_logger(__name__)\nerror, debug = logger.error, logger.debug\n\nUNAVAIL = frozenset({errno.EAGAIN, errno.EINTR})\n\n#: Constant sent by child process when started (ready to accept work)\nWORKER_UP = 15\n\n#: A process must've started before this timeout (in secs.) expires.\nPROC_ALIVE_TIMEOUT = 4.0\n\nSCHED_STRATEGY_FCFS = 1\nSCHED_STRATEGY_FAIR = 4\n\nSCHED_STRATEGIES = {\n None: SCHED_STRATEGY_FAIR,\n 'default': SCHED_STRATEGY_FAIR,\n 'fast': SCHED_STRATEGY_FCFS,\n 'fcfs': SCHED_STRATEGY_FCFS,\n 'fair" }, { "id": 267129, "commit_id": "b439e41a915ccec0ccbabecc966919ea406db74e", "repo": "ansible", "path": "lib/ansible/cli/doc.py", "file_name": "doc.py", "fun_name": "add_collection_plugins", "commit_message": "expand ansible-doc coverage (#74963)\n\n* Expand ansible-doc to tests/filters and fix existing issues\r\n\r\n enable filter/test docs if in single file or companion yaml\r\n add docs for several filters/tests plugins\r\n allow .yml companion for docs for other plugins, must be colocated\r\n verify plugins are valid (not modules, cannot)\r\n fix 'per collection' filtering\r\n limit old style deprecation (_ prefix) to builtin/legacy\r\n start move to pathlib for saner path handling\r\n moved some funcitons, kept backwards compat shims with deprecation notice\r\n\r\n Co-authored-by: Abhijeet Kasurde \r\n Co-authored-by: Felix Fontein \r\n Co-authored-by: Sandra McCann ", "code": "def add_collection_plugins(plugin_list, plugin_type, coll_filter=None):\n display.deprecated(\"add_collection_plugins method, use ansible.plugins.list functions instead.\", version='2.17')\n plugin_list.update(list_plugins(plugin_type, coll_filter))\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 52, "n_identifiers": 9, "random_cut": "def add_collection_plugins(plugin_list, plugin_type, coll_filter=None):\n display.deprecated(\"add_collection_plugins method, use ansible.plugins.list functions ins" }, { "id": 84512, "commit_id": "150f77aea2002aa4f9c174a8e73c9a3b83b71e34", "repo": "zulip", "path": "zerver/tests/test_realm.py", "file_name": "test_realm.py", "fun_name": "test_changing_waiting_period_updates_system_groups", "commit_message": "bulk_create: Add users to system user groups in bulk_create_users.\n\nThis commit modifies bulk_create_users to add the users to the\nrespective system groups. And due to this change, now bots in\ndevelopment environment are also added to system groups.\n\nTests are changed accordingly as more UserGroupMembeship objects\nare created.", "code": "def test_changing_waiting_period_updates_system_groups(self) -> None:\n realm = get_realm(\"zulip\")\n members_system_group = UserGroup.objects.get(\n realm=realm, name=\"@role:members\", is_system_group=True\n )\n full_members_system_group = UserGroup.objects.get(\n realm=realm, name=\"@role:fullmembers\", is_system_group=True\n )\n\n self.assert_length(UserGroupMembership.objects.filter(user_group=members_system_group), 10)\n self.assert_length(\n UserGroupMembership.objects.filter(user_group=full_members_system_group), 10\n )\n self.assertEqual(realm.waiting_period_threshold, 0)\n\n hamlet = self.example_user(\"hamlet\")\n othello = self.example_user(\"othello\")\n prospero = self.example_user(\"prospero\")\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=prospero\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=prospero\n ).exists()\n )\n\n hamlet.date_joined = timezone_now() - timedelta(days=50)\n hamlet.save()\n othello.date_joined = timezone_now() - timedelta(days=75)\n othello.save()\n prospero.date_joined = timezone_now() - timedelta(days=150)\n prospero.save()\n do_set_realm_property(realm, \"waiting_period_threshold\", 100, acting_user=None)\n\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=prospero\n ).exists()\n )\n self.assertFalse(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertFalse(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=prospero\n ).exists()\n )\n\n do_set_realm_property(realm, \"waiting_period_threshold\", 70, acting_user=None)\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=prospero\n ).exists()\n )\n self.assertFalse(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=prospero\n ).exists()\n )\n\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 1262, "n_words": 172, "vocab_size": 53, "complexity": 1, "nloc": 114, "token_counts": 616, "n_ast_nodes": 978, "n_identifiers": 32, "random_cut": "def test_changing_waiting_period_updates_system_groups(self) -> None:\n realm = get_realm(\"zulip\")\n members_system_group = UserGroup.objects.get(\n realm=realm, name=\"@role:members\", is_system_group=True\n )\n full_members_system_group = UserGroup.objects.get(\n realm=realm, name=\"@role:fullmembers\", is_system_group=True\n )\n\n self.assert_length(UserGroupMembership.objects.filter(user_group=members_system_group), 10)\n self.assert_length(\n UserGroupMembership.objects.filter(user_group=full_members_system_group), 10\n )\n self.assertEqual(realm.waiting_period_threshold, 0)\n\n hamlet = self.example_user(\"hamlet\")\n othello = self.example_user(\"othello\")\n prospero = self.example_user(\"prospero\")\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=prospero\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=prospero\n ).exists()\n )\n\n hamlet.date_joined = timezone_now() - timedelta(days=50)\n hamlet.save()\n othello.date_joined = timezone_now() - timedelta(days=75)\n othello.save()\n prospero.date_joined = timezone_now() - timedelta(days=150)\n prospero.save()\n do_set_realm_property(realm, \"waiting_period_threshold\", 100, acting_user=None)\n\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=prospero\n ).exists()\n )\n self.assertFalse(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertFalse(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=full_members_system_group, user_profile=prospero\n ).exists()\n )\n\n do_set_realm_property(realm, \"waiting_period_threshold\", 70, acting_user=None)\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=hamlet\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_profile=othello\n ).exists()\n )\n self.assertTrue(\n UserGroupMembership.objects.filter(\n user_group=members_system_group, user_pr" }, { "id": 245750, "commit_id": "d18ec258093098f92f9ef30266f9ddd2acebf864", "repo": "mmdetection", "path": "tools/analysis_tools/analyze_results.py", "file_name": "analyze_results.py", "fun_name": "detection_evaluate", "commit_message": "[Refactor]: update analyze_results.py (#8430)\n\n* update analyze_results.py\r\n\r\n* working in progress\r\n\r\n* fix panoptic config bug\r\n\r\n* update\r\n\r\n* update\r\n\r\n* Support panoptic_seg visualization\r\n\r\n* fix base config\r\n\r\n* recover config\r\n\r\n* update misc.py\r\n\r\n* fix bug\r\n\r\n* update\r\n\r\n* update\r\n\r\n* update\r\n\r\n* support new dataflow\r\n\r\n* update\r\n\r\n* update\r\n\r\n* update doc str\r\n\r\n* update", "code": "def detection_evaluate(self, dataset, results, topk=20, eval_fn=None):\n \n\n if eval_fn is None:\n eval_fn = bbox_map_eval\n else:\n assert callable(eval_fn)\n\n prog_bar = ProgressBar(len(results))\n _mAPs = {}\n data_info = {}\n for i, (result, ) in enumerate(zip(results)):\n\n # self.dataset[i] should not call directly\n # because there is a risk of mismatch\n data_info = dataset.prepare_data(i)\n data_info['bboxes'] = data_info['gt_bboxes'].tensor\n data_info['labels'] = data_info['gt_bboxes_labels']\n\n pred = result['pred_instances']\n pred_bboxes = pred['bboxes'].cpu().numpy()\n pred_scores = pred['scores'].cpu().numpy()\n pred_labels = pred['labels'].cpu().numpy()\n\n dets = []\n for label in range(len(dataset.metainfo['CLASSES'])):\n index = np.where(pred_labels == label)[0]\n pred_bbox_scores = np.hstack(\n [pred_bboxes[index], pred_scores[index].reshape((-1, 1))])\n dets.append(pred_bbox_scores)\n mAP = eval_fn(dets, data_info)\n\n _mAPs[i] = mAP\n prog_bar.update()\n # descending select topk image\n _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1]))\n good_mAPs = _mAPs[-topk:]\n bad_mAPs = _mAPs[:topk]\n\n return good_mAPs, bad_mAPs\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 437, "n_words": 113, "vocab_size": 84, "complexity": 4, "nloc": 29, "token_counts": 270, "n_ast_nodes": 444, "n_identifiers": 45, "random_cut": "def detection_evaluate(self, dataset, results, topk=20, eval_fn=None):\n \n\n if eval_fn is None:\n eval_fn = bbox_map_eval\n else:\n assert callable(eval_fn)\n\n prog_bar = ProgressBar(len(results))\n _mAPs = {}\n data_info = {}\n for i, (result, ) in enumerate(zip(results)):\n\n # self.dataset[i] should not call directly\n # because there is a risk of mismatch\n data_info = dataset.prepare_data(i)\n data_info['bboxes'] = data_info['gt_bboxes'].tensor\n data_info['labels'] = data_info['gt_bboxes_labels']\n\n pred = result['pred_instances']\n pred_bboxes = pred['bboxes'].cpu().numpy" }, { "id": 263609, "commit_id": "64ccb7aea824fbec57f7ed1bbe483ec486183c13", "repo": "pyinstaller", "path": "bootloader/waflib/Tools/qt5.py", "file_name": "qt5.py", "fun_name": "process_mocs", "commit_message": "Bootloader: Building: Unpack waf's lib archive.\n\nDoing so makes it easier to modify. This is a temporary measure until the next\nwaf version is released (although I'm tempted to keep it since it's much more\nIDE completion friendly).", "code": "def process_mocs(self):\n lst = self.to_nodes(getattr(self, 'moc', []))\n self.source = self.to_list(getattr(self, 'source', []))\n for x in lst:\n prefix = x.name[:x.name.rfind('.')]\n moc_target = 'moc_%s.%d.cpp' % (prefix, self.idx)\n moc_node = x.parent.find_or_declare(moc_target)\n self.source.append(moc_node)\n self.create_task('moc', x, moc_node)\n\n\n@feature('qt5')\n@after_method('apply_link')", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "@feature('qt5')\n@after_method('apply_link')", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 75, "n_words": 34, "vocab_size": 29, "complexity": 2, "nloc": 9, "token_counts": 99, "n_ast_nodes": 182, "n_identifiers": 20, "random_cut": "def process_mocs(self):\n lst = self.to_nodes(getattr(self, 'moc', []))\n self.source = self.to_list(getattr(self, 'source', []))\n for x in lst:\n prefix = x.name[:x.name.rfind('.')]\n moc_target = 'moc_%s.%d.cpp" }, { "id": 221112, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "user_call", "commit_message": "add python 3.10.4 for windows", "code": "def user_call(self, frame, args):\n name = frame.f_code.co_name\n if not name: name = '???'\n print('+++ call', name, args)", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 4, "token_counts": 31, "n_ast_nodes": 51, "n_identifiers": 8, "random_cut": "def user_call(self, frame, args):\n name = frame.f_code.co_name\n if not nam" }, { "id": 217585, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/glob.py", "file_name": "glob.py", "fun_name": "glob1", "commit_message": "add python 3.10.4 for windows", "code": "def glob1(dirname, pattern):\n return _glob1(dirname, pattern, None, False)\n\n# This helper function recursively yields relative pathnames inside a literal\n# directory.\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 27, "n_identifiers": 4, "random_cut": "def glob1(dirname, pattern):\n return _glob1(dirname, pattern, None, False)\n\n# This helper function recursively yields rel" }, { "id": 126563, "commit_id": "286343601aa60c8a4222e954388a9055fbe59e90", "repo": "ray", "path": "python/ray/serve/controller.py", "file_name": "controller.py", "fun_name": "get_app_config", "commit_message": "[Serve] Enable lightweight config update (#27000)", "code": "def get_app_config(self) -> Dict:\n checkpoint = self.kv_store.get(CONFIG_CHECKPOINT_KEY)\n if checkpoint is None:\n return ServeApplicationSchema.get_empty_schema_dict()\n else:\n _, config, _ = pickle.loads(checkpoint)\n return config\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 44, "n_ast_nodes": 71, "n_identifiers": 13, "random_cut": "def get_app_config(self) -> Dict:\n checkpoint = self.kv_store.get(CONFIG_CHECKPOINT_KEY)\n if checkpoint is None:\n " }, { "id": 162475, "commit_id": "f7d48541312f1dafbac4fae639cf3a06df776abc", "repo": "yt-dlp", "path": "yt_dlp/extractor/pladform.py", "file_name": "pladform.py", "fun_name": "_real_extract", "commit_message": "[Pladform] Fix redirection to external player (#2550)\n\nAuthored by: KiberInfinity", "code": "def _real_extract(self, url):\n video_id = self._match_id(url)\n\n qs = parse_qs(url)\n pl = qs.get('pl', ['1'])[0]\n\n video = self._download_xml(\n 'http://out.pladform.ru/getVideo', video_id, query={\n 'pl': pl,\n 'videoid': video_id,\n }, fatal=False)\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 104, "n_words": 25, "vocab_size": 21, "complexity": 13, "nloc": 60, "token_counts": 398, "n_ast_nodes": 103, "n_identifiers": 13, "random_cut": "def _real_extract(self, url):\n video_id = self._match_id(url)\n\n qs = parse_qs(url)\n pl = qs.get('pl', ['1'])[0]\n\n video = self._download_xml(\n 'http://out.pladform.ru/getVideo', video_id, query={\n 'pl': pl,\n " }, { "id": 22113, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/sessions.py", "file_name": "sessions.py", "fun_name": "prepare_request", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def prepare_request(self, request):\n \n cookies = request.cookies or {}\n\n # Bootstrap CookieJar.\n if not isinstance(cookies, cookielib.CookieJar):\n cookies = cookiejar_from_dict(cookies)\n\n # Merge with session cookies\n merged_cookies = merge_cookies(\n merge_cookies(RequestsCookieJar(), self.cookies), cookies\n )\n\n # Set environment's basic authentication if not explicitly set.\n auth = request.auth\n if self.trust_env and not auth and not self.auth:\n auth = get_netrc_auth(request.url)\n\n p = PreparedRequest()\n p.prepare(\n method=request.method.upper(),\n url=request.url,\n files=request.files,\n data=request.data,\n json=request.json,\n headers=merge_setting(\n request.headers, self.headers, dict_class=CaseInsensitiveDict\n ),\n params=merge_setting(request.params, self.params),\n auth=merge_setting(auth, self.auth),\n cookies=merged_cookies,\n hooks=merge_hooks(request.hooks, self.hooks),\n )\n return p\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 344, "n_words": 77, "vocab_size": 57, "complexity": 6, "nloc": 26, "token_counts": 180, "n_ast_nodes": 273, "n_identifiers": 30, "random_cut": "def prepare_request(self, request):\n \n cookies = request.cookies or {}\n\n # Bootstrap CookieJar.\n if not isinstance(cookies, cookielib.CookieJar):\n cookies = cookiejar_from_dict(cookies)\n\n # Merge with session cookies\n merge" }, { "id": 169601, "commit_id": "fd8e3e773887c0ba9721406b3034494fff2c2567", "repo": "pandas", "path": "pandas/tests/reshape/merge/test_join.py", "file_name": "test_join.py", "fun_name": "test_handle_overlap_arbitrary_key", "commit_message": "CLN/TST: Use fixture instead of setup_method (#49004)", "code": "def test_handle_overlap_arbitrary_key(self, df, df2):\n joined = merge(\n df,\n df2,\n left_on=\"key2\",\n right_on=\"key1\",\n suffixes=(\".foo\", \".bar\"),\n )\n assert \"key1.foo\" in joined\n assert \"key2.bar\" in joined\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 104, "n_words": 22, "vocab_size": 17, "complexity": 1, "nloc": 10, "token_counts": 42, "n_ast_nodes": 70, "n_identifiers": 9, "random_cut": "def test_handle_overlap_arbitrary_key(self, df, df2):\n joined = merge(\n " }, { "id": 311432, "commit_id": "cc94af2872945667d80f8f76512260ae6205d739", "repo": "core", "path": "homeassistant/components/homekit_controller/config_flow.py", "file_name": "config_flow.py", "fun_name": "_entry_from_accessory", "commit_message": "Remove deprecated helper functions from homekit_controller pairing flow (#65270)", "code": "async def _entry_from_accessory(self, pairing):\n \n # The bulk of the pairing record is stored on the config entry.\n # A specific exception is the 'accessories' key. This is more\n # volatile. We do cache it, but not against the config entry.\n # So copy the pairing data and mutate the copy.\n pairing_data = pairing.pairing_data.copy()\n\n # Use the accessories data from the pairing operation if it is\n # available. Otherwise request a fresh copy from the API.\n # This removes the 'accessories' key from pairing_data at\n # the same time.\n if not (accessories := pairing_data.pop(\"accessories\", None)):\n accessories = await pairing.list_accessories_and_characteristics()\n\n parsed = Accessories.from_list(accessories)\n accessory_info = parsed.aid(1).services.first(\n service_type=ServicesTypes.ACCESSORY_INFORMATION\n )\n name = accessory_info.value(CharacteristicsTypes.NAME, \"\")\n\n return self.async_create_entry(title=name, data=pairing_data)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 247, "n_words": 113, "vocab_size": 75, "complexity": 2, "nloc": 10, "token_counts": 92, "n_ast_nodes": 160, "n_identifiers": 25, "random_cut": "async def _entry_from_accessory(self, pairing):\n \n # The bulk of the pairing record is stored on the config entry.\n # A specific exception is the 'accessories' key. This is more\n # volatile. We do cache it, but not against the config entry.\n # So copy the pairing data and mutate the copy.\n pairing_data = pairing.pairing_data.copy()\n\n # Use the accessories data from the pairing operation if it is\n # available. Otherwise request a fresh copy from the API.\n # This removes the 'accessories' key from pairing_data at\n # the same time.\n if not (accessories := pairing_data.pop(\"accessories\", None)):\n accessories = await pairing.list_accessories_and_characteristics()\n\n parsed = Accessories.from_list(accessories)\n accessory_info = parsed.aid(1).services.first(\n service_type=ServicesTypes.ACCESSORY_INFORMATION\n )\n name = accessory_info.value(CharacteristicsTypes.NAME, \"\")\n\n return self.async_create_entry(title=name, data=pairing_data)\n\n" }, { "id": 46681, "commit_id": "60d90896486cc3d9f1fc0029ca9833c7d561caa4", "repo": "airflow", "path": "airflow/cli/commands/db_command.py", "file_name": "db_command.py", "fun_name": "downgrade", "commit_message": "Consistent DB upgrade/downgrade arguments (#22537)\n\nThis is a follow up to #22102, and be forewarned, this might be a bikeshed. If this gets contentious at all, I'll just close it and move on.\r\n\r\nI think it's a little bit easier for users to have consistent flags/arguments for the `airflow db upgrade` and `airflow db downgrade` commands. This PR just tweaks the argument processing to expect `--to-revision` and `--to-version` instead of `--revision` and `--version`, respectively.\r\n\r\nThat change makes the arguments to those commands more consistent with the `--from-revision` and `--from-version` arguments. Doing so also avoids overloading the `--version` flag, which is usually a flag that prints out the version information of the command itself (eg: Airflow's version, which is available via `airflow version`).\r\n\r\nAn argument against this change is that the `--to-...` arguments can be understood to be implied, like this:\r\n\r\n```bash\r\nairflow db upgrade --from-version 10.15.8 # Upgrade from 10.15.8 to the current Airflow version\r\n```\r\n\r\nand this means that you do not necessarily need to always specify the `--to-...` arguments. By having both `--to-` and `--from-` arguments, users might think that they always need to specify both a `--to-` and `--from-` argument.\r\n\r\nI also fixed an unrelated grammar typo, which corrects the grammar used to log the operation.", "code": "def downgrade(args):\n \n if args.to_revision and args.to_version:\n raise SystemExit(\"Cannot supply both `--to-revision` and `--to-version`.\")\n if args.from_version and args.from_revision:\n raise SystemExit(\"`--from-revision` may not be combined with `--from-version`\")\n if (args.from_revision or args.from_version) and not args.show_sql_only:\n raise SystemExit(\n \"Args `--from-revision` and `--from-version` may only be used with `--show-sql-only`\"\n )\n if not (args.to_version or args.to_revision):\n raise SystemExit(\"Must provide either --to-revision or --to-version.\")\n from_revision = None\n if args.from_revision:\n from_revision = args.from_revision\n elif args.from_version:\n from_revision = REVISION_HEADS_MAP.get(args.from_version)\n if not from_revision:\n raise SystemExit(f\"Unknown version {args.from_version!r} supplied as `--from-version`.\")\n if args.to_version:\n to_revision = REVISION_HEADS_MAP.get(args.to_version)\n if not to_revision:\n raise SystemExit(f\"Downgrading to version {args.to_version} is not supported.\")\n elif args.to_revision:\n to_revision = args.to_revision\n if not args.show_sql_only:\n print(\"Performing downgrade with database \" + repr(settings.engine.url))\n else:\n print(\"Generating sql for downgrade -- downgrade commands will *not* be submitted.\")\n\n if args.show_sql_only or (\n args.yes\n or input(\n \"\\nWarning: About to reverse schema migrations for the airflow metastore. \"\n \"Please ensure you have backed up your database before any upgrade or \"\n \"downgrade operation. Proceed? (y/n)\\n\"\n ).upper()\n == \"Y\"\n ):\n db.downgrade(to_revision=to_revision, from_revision=from_revision, show_sql_only=args.show_sql_only)\n if not args.show_sql_only:\n print(\"Downgrade complete\")\n else:\n raise SystemExit(\"Cancelled\")\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 435, "n_words": 173, "vocab_size": 114, "complexity": 21, "nloc": 42, "token_counts": 229, "n_ast_nodes": 412, "n_identifiers": 19, "random_cut": "def downgrade(args):\n \n if args.to_revision and args.to_version:\n raise SystemExit(\"Cannot supply both `--to-revision` and `--to-version`.\")\n if args.from_version and args.from_revision:\n raise SystemExit(\"`--from-revision` may not be combined with `--from-version`\")\n if (args.from_revision or args.from_version) and not args.show_sql_only:\n raise SystemExit(\n \"Args `--from-revision` and `--from-version` may only be used with `--show-sql-only`\"\n )\n if not (args.to_v" }, { "id": 294967, "commit_id": "ab0abdc988ac101217ba043909c4be8b33101ab3", "repo": "core", "path": "tests/components/subaru/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "two_factor_verify_form", "commit_message": "Add 2FA support for Subaru integration setup (#68753)\n\n* Add 2FA support for Subaru integration setup\r\n\r\n* Update config flow to abort with 2FA request fail", "code": "async def two_factor_verify_form(hass, two_factor_start_form):\n \n with patch(\n MOCK_API_2FA_REQUEST,\n return_value=True,\n ), patch(MOCK_API_2FA_CONTACTS, new_callable=PropertyMock) as mock_contacts:\n mock_contacts.return_value = MOCK_2FA_CONTACTS\n return await hass.config_entries.flow.async_configure(\n two_factor_start_form[\"flow_id\"],\n user_input={config_flow.CONF_CONTACT_METHOD: \"email@addr.com\"},\n )\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 89, "n_words": 24, "vocab_size": 24, "complexity": 1, "nloc": 10, "token_counts": 61, "n_ast_nodes": 108, "n_identifiers": 19, "random_cut": "async def two_factor_verify_form(hass, two_factor_start_form):\n \n with patch(\n MOCK_API_2FA_REQUEST,\n " }, { "id": 177839, "commit_id": "323578fd2e49def3df2b3d7b7a9fc9af0132d592", "repo": "label-studio", "path": "label_studio/tasks/models.py", "file_name": "models.py", "fun_name": "remove_predictions_from_project", "commit_message": "fix: DEV-2372: Delete action doesn't decrease total annotations counter (#2354)\n\n* fix: DEV-2372: Delete action doesn't decrease total annotations counter\r\n\r\n* Update test_api_tasks.py\r\n\r\n* Fix negative total annotations\r\n\r\n* Update models.py", "code": "def remove_predictions_from_project(sender, instance, **kwargs):\n \n instance.task.total_predictions = instance.task.predictions.all().count() - 1\n instance.task.save(update_fields=['total_predictions'])\n\n@receiver(post_save, sender=Prediction)", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "@receiver(post_save, sender=Prediction)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 20, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 44, "n_ast_nodes": 88, "n_identifiers": 14, "random_cut": "def remove_predictions_from_project(sender, instance, **kwargs):\n \n instance.task.total_predic" }, { "id": 43626, "commit_id": "9c0ba1b6abc593bad6fe51ed52d9c0963cd09b7c", "repo": "airflow", "path": "tests/providers/amazon/aws/operators/test_ecs.py", "file_name": "test_ecs.py", "fun_name": "set_up_log_fetcher", "commit_message": "Standardize AWS ECS naming (#20332)\n\n* Rename ECS Hook and Operator", "code": "def set_up_log_fetcher(self, logger_mock):\n self.logger_mock = logger_mock\n\n self.log_fetcher = EcsTaskLogFetcher(\n log_group=\"test_log_group\",\n log_stream_name=\"test_log_stream_name\",\n fetch_interval=timedelta(milliseconds=1),\n logger=logger_mock,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 78, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 8, "token_counts": 40, "n_ast_nodes": 63, "n_identifiers": 11, "random_cut": "def set_up_log_fetcher(self, logger_mock):\n self.logger_mock = logger_mock\n\n self.log_fetcher = EcsTaskLogFetcher(\n log_group=\"test_log_group\",\n log_stream_name=\"test_log_stream_name\",\n fetch_interval=timedelta(milliseconds=1),\n logger=logger_mock,\n )\n" }, { "id": 183139, "commit_id": "191a6b7775a7ca3cde794eef96ebd86fac4fb455", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "process_messages", "commit_message": "fix broken align and error logic", "code": "async def process_messages(self) -> None:\n active_app.set(self)\n log(\"---\")\n log(f\"driver={self.driver_class}\")\n\n if os.getenv(\"TEXTUAL_DEVTOOLS\") == \"1\":\n try:\n await self.devtools.connect()\n self.log(f\"Connected to devtools ({self.devtools.url})\")\n except DevtoolsConnectionError:\n self.log(f\"Couldn't connect to devtools ({self.devtools.url})\")\n try:\n if self.css_file is not None:\n self.stylesheet.read(self.css_file)\n self.stylesheet.parse()\n if self.css is not None:\n self.stylesheet.add_source(\n self.css, path=f\"<{self.__class__.__name__}>\"\n )\n except Exception as error:\n self.on_exception(error)\n self._print_error_renderables()\n return\n\n if self.css_monitor:\n self.set_interval(0.5, self.css_monitor, name=\"css monitor\")\n self.log(\"started\", self.css_monitor)\n\n self._running = True\n try:\n load_event = events.Load(sender=self)\n await self.dispatch_message(load_event)\n # Wait for the load event to be processed, so we don't go in to application mode beforehand\n # await load_event.wait()\n\n driver = self._driver = self.driver_class(self.console, self)\n driver.start_application_mode()\n try:\n mount_event = events.Mount(sender=self)\n await self.dispatch_message(mount_event)\n\n # TODO: don't override `self.console` here\n self.console = Console(file=sys.__stdout__)\n self.title = self._title\n self.refresh()\n await self.animator.start()\n\n with redirect_stdout(StdoutRedirector(self.devtools, self._log_file)): # type: ignore\n await super().process_messages()\n await self.animator.stop()\n await self.close_all()\n finally:\n driver.stop_application_mode()\n except Exception as error:\n self.on_exception(error)\n finally:\n self._running = False\n if self._exit_renderables:\n self._print_error_renderables()\n if self.devtools.is_connected:\n await self._disconnect_devtools()\n if self._log_console is not None:\n self._log_console.print(\n f\"Disconnected from devtools ({self.devtools.url})\"\n )\n if self._log_file is not None:\n self._log_file.close()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 919, "n_words": 163, "vocab_size": 108, "complexity": 14, "nloc": 58, "token_counts": 370, "n_ast_nodes": 672, "n_identifiers": 62, "random_cut": "async def process_messages(self) -> None:\n active_app.set(self)\n log(\"---\")\n log(f\"driver={self.driver_class}\")\n\n if os.getenv(\"TEXTUAL_DEVTOOLS\") == \"1\":\n try:\n await self.devtools.connect()\n self.log(f\"Connected to devtools ({self.devtools.url})\")\n except DevtoolsConnectionError:\n self.log(f\"Couldn't connect to devtools ({self.devtools.url})\")\n try:\n if self.css_file is not None:\n self.stylesheet.read(self.css_file)\n self.stylesheet.parse()\n if self.css is not None:\n self.stylesheet.add_source(\n self.css, path=f\"<{self.__class__.__name__}>\"\n )\n except Exception as error:\n self.on_exception(error)\n self._print_error_renderables()\n return\n\n if self.css_monitor:\n self.set_interval(0.5, self.css_monitor, name=\"css monitor\")\n self.log(\"started\", self.css_monitor)\n\n self._running = True\n try:\n load_event = events.Load(sender=self)\n " }, { "id": 164085, "commit_id": "f46df091df3afea25a273f491d1f6b2c7d20b32c", "repo": "pandas", "path": "pandas/tests/io/pytables/test_read.py", "file_name": "test_read.py", "fun_name": "test_pytables_native2_read", "commit_message": "TST: Remove unused fixtures (#45692)\n\n* TST: Remove unused fixtures\r\n\r\n* Undo a removed fixture\r\n\r\n* Add back other fixtures\r\n\r\n* Undo a file\r\n\r\n* Try undoing this?\r\n\r\n* Revert \"Try undoing this?\"\r\n\r\nThis reverts commit 0e56cb04f5e8cb1f7b2ac4c5e6191485bb2fe1ab.", "code": "def test_pytables_native2_read(datapath):\n with ensure_clean_store(\n datapath(\"io\", \"data\", \"legacy_hdf\", \"pytables_native2.h5\"), mode=\"r\"\n ) as store:\n str(store)\n d1 = store[\"detector\"]\n assert isinstance(d1, DataFrame)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 52, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 43, "n_ast_nodes": 79, "n_identifiers": 9, "random_cut": "def test_pytables_native2_read(datapath):\n with ensure" }, { "id": 72484, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/pages/edit.py", "file_name": "edit.py", "fun_name": "get_context_data", "commit_message": "Reformat with black", "code": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"page\": self.page,\n \"page_for_status\": self.page_for_status,\n \"content_type\": self.page_content_type,\n \"edit_handler\": self.edit_handler,\n \"errors_debug\": self.errors_debug,\n \"action_menu\": PageActionMenu(\n self.request, view=\"edit\", page=self.page\n ),\n \"preview_modes\": self.page.preview_modes,\n \"form\": self.form,\n \"next\": self.next_url,\n \"has_unsaved_changes\": self.has_unsaved_changes,\n \"page_locked\": self.page_perms.page_locked(),\n \"workflow_state\": self.workflow_state\n if self.workflow_state and self.workflow_state.is_active\n else None,\n \"current_task_state\": self.page.current_workflow_task_state,\n \"publishing_will_cancel_workflow\": self.workflow_tasks\n and getattr(settings, \"WAGTAIL_WORKFLOW_CANCEL_ON_PUBLISH\", True),\n \"locale\": None,\n \"translations\": [],\n }\n )\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n user_perms = UserPagePermissionsProxy(self.request.user)\n\n context.update(\n {\n \"locale\": self.page.locale,\n \"translations\": [\n {\n \"locale\": translation.locale,\n \"url\": reverse(\n \"wagtailadmin_pages:edit\", args=[translation.id]\n ),\n }\n for translation in self.page.get_translations()\n .only(\"id\", \"locale\", \"depth\")\n .select_related(\"locale\")\n if user_perms.for_page(translation).can_edit()\n ],\n }\n )\n\n return context\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 838, "n_words": 93, "vocab_size": 75, "complexity": 7, "nloc": 47, "token_counts": 261, "n_ast_nodes": 432, "n_identifiers": 39, "random_cut": "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"page\": self.page,\n \"page_for_status\": self.page_for_status,\n \"content_type\": self.page_content_type,\n \"edit_handler\": self.edit_handler,\n \"errors_debug\": self.errors_debug,\n \"action_menu\": PageActionMenu(\n self.request, view=\"edit\", page=self.page\n ),\n \"preview_modes\": self.page.preview_modes,\n \"form\": self.form,\n \"next\": self.next_url,\n \"has_unsaved_changes\": self.has_unsaved_changes,\n \"page_locked\": self.page_perms.page_locked(),\n \"workflow_state\": self.workflow_state\n if self.workflow_state and self.workflow_state.is_active\n else None,\n \"current_task_state\": self.page.current_workflow_task_state,\n \"publishing_will_cancel_workflow\": self.workflow_tasks\n and getattr(settings, \"WAGTAIL_WORKFLOW_CANCEL_ON_PUBLISH\", True),\n \"locale\": None,\n \"translations\": [],\n }\n )\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n user_perms = UserPagePermissionsProxy(self.request.user)\n\n context.update(\n {\n \"locale\": self.page.locale,\n \"translations\": [\n {\n \"locale\": translation.locale,\n \"url\": reverse(\n \"wagtailadmin_pages:edit\", args=[translation.id]\n ),\n }\n for translation in self.page.get_translations()\n .only(\"id\", \"locale\", \"depth\")\n .select_related(\"locale\")\n if user_perms.for_page(translation).can_edit()" }, { "id": 216021, "commit_id": "121c61c832a58874acf5ad55c7eb20c598995dff", "repo": "salt", "path": "tests/pytests/functional/modules/test_vault.py", "file_name": "test_vault.py", "fun_name": "test_vault_read_secret_issue_61084", "commit_message": "Add tests and changelog", "code": "def test_vault_read_secret_issue_61084(sys_mod):\n \n result = sys_mod.argspec(\"vault.read_secret\")\n assert isinstance(result, dict)\n assert isinstance(result.get(\"vault.read_secret\"), dict)\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 23, "n_words": 11, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 58, "n_identifiers": 7, "random_cut": "def test_vault_read_secret_issue_61084(sys_mod):\n \n res" }, { "id": 137963, "commit_id": "8e680c483ce326cefc62e44f68ab1a6948b1c3d2", "repo": "ray", "path": "rllib/examples/simulators/sumo/marlenvironment.py", "file_name": "marlenvironment.py", "fun_name": "step", "commit_message": "[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)", "code": "def step(self, action_dict):\n \n self.resetted = False\n self.steps += 1\n logger.debug(\n \"====> [SUMOTestMultiAgentEnv:step] Episode: %d - Step: %d <====\",\n self.episodes,\n self.steps,\n )\n dones = {}\n dones[\"__all__\"] = False\n\n shuffled_agents = sorted(\n action_dict.keys()\n ) # it may seem not smar to sort something that\n # may need to be shuffled afterwards, but it\n # is a matter of consistency instead of using\n # whatever insertion order was used in the dict\n if self._config[\"scenario_config\"][\"agent_rnd_order\"]:\n # randomize the agent order to minimize SUMO's\n # insertion queues impact\n logger.debug(\"Shuffling the order of the agents.\")\n self.rndgen.shuffle(shuffled_agents) # in-place shuffle\n\n # Take action\n for agent in shuffled_agents:\n self.agents[agent].step(action_dict[agent], self.simulation)\n\n logger.debug(\"Before SUMO\")\n ongoing_simulation = self.simulation.step(\n until_end=False, agents=set(action_dict.keys())\n )\n logger.debug(\"After SUMO\")\n\n # end of the episode\n if not ongoing_simulation:\n logger.info(\"Reached the end of the SUMO simulation.\")\n dones[\"__all__\"] = True\n\n obs, rewards, infos = {}, {}, {}\n\n for agent in action_dict:\n # check for collisions\n if self.simulation.collisions[agent] > 0:\n # punish the agent and remove it from the simulation\n dones[agent] = True\n obs[agent] = [0, 0]\n rewards[agent] = -self.agents[agent].config[\"max_speed\"]\n # infos[agent] = \"Collision\"\n self.simulation.traci_handler.remove(agent, reason=tc.REMOVE_VAPORIZED)\n else:\n dones[agent] = agent not in self.simulation.veh_subscriptions\n obs[agent] = self.get_observation(agent)\n rewards[agent] = self.get_reward(agent)\n # infos[agent] = \"\"\n\n logger.debug(\"Observations: %s\", pformat(obs))\n logger.debug(\"Rewards: %s\", pformat(rewards))\n logger.debug(\"Dones: %s\", pformat(dones))\n logger.debug(\"Info: %s\", pformat(infos))\n logger.debug(\"========================================================\")\n return obs, rewards, dones, dones, infos\n\n ###########################################################################\n # ACTIONS & OBSERATIONS SPACE\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 743, "n_words": 217, "vocab_size": 136, "complexity": 6, "nloc": 43, "token_counts": 329, "n_ast_nodes": 549, "n_identifiers": 36, "random_cut": "def step(self, action_dict):\n \n self.resetted = False\n self.steps += 1\n logger.debug(\n \"====> [SUMOTestMultiAgentEnv:step] Episode: %d - Step: %d <====\",\n self.episodes,\n self.steps,\n )\n dones = {}\n dones[\"__all__\"] = False\n\n shuffled_agents = sorted(\n action_dict.keys()\n ) # it may seem not smar to sort something that\n # may need to be shuffled afterwards, but it\n # is a matter of consistency instead of using\n # whatever insertion order was used in the dict\n if self._config[\"scenario_config\"][\"agent_rnd_order\"]:\n # randomize the agent order to minimize SUMO's\n # insertion queues impact\n logger.debug(\"Shuffling the order of the agents.\")\n self.rndgen.shuffle(shuffled_agents) # in-place shuffle\n\n # Take action\n for agent in shuffled_agents:\n self.agents[agent].step(action_dict[agent], self.simulation)\n\n logger.debug(\"Before SUMO\")\n ongoing_simulation = self.simulation.step(\n until_end=False, agents=set(action_dict.keys())\n )\n logger.debug(\"After SUMO\")\n\n # end of the episode\n " }, { "id": 1077, "commit_id": "44fa2242416c7131fef4f00db19c5ca36af031dc", "repo": "PySyft", "path": "packages/syft/src/syft/core/tensor/autodp/phi_tensor.py", "file_name": "phi_tensor.py", "fun_name": "__neg__", "commit_message": "Renamed entities -> data subject, NDEPT -> phi tensor", "code": "def __neg__(self) -> PhiTensor:\n\n return PhiTensor(\n child=self.child * -1,\n min_vals=self.max_vals * -1,\n max_vals=self.min_vals * -1,\n data_subjects=self.entities,\n )\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 74, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 44, "n_ast_nodes": 66, "n_identifiers": 8, "random_cut": "def __neg__(self) -> PhiTensor:\n\n return PhiTensor(\n child=self.child * -1,\n min_vals=sel" }, { "id": 107321, "commit_id": "8cd60178545b70c82a99504c53332e16713a60bd", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_wx.py", "file_name": "backend_wx.py", "fun_name": "_get_toolbar", "commit_message": "Ensure that all toolbar (old/new) subclasses can be init'ed consistently\n\ni.e. with the same signature: with `canvas` as sole arg for the\nold-style toolbars, with `toolmanager` as sole arg for the new ones.\n\nSubclasses that explicitly support setting a parent widget keep that\nsupport (except for gtk, which stashed that in the `.win` attribute but\nnever used it), but that argument is always optional now; the default is\nthe canvas' parent.\n\nThe goal is to later replace all _get_toolbar implementations by a\nsimple call (always with the same signature (dependent on the value of\nrcParams[\"toolbar\"])) to the correct class in the FigureManagerBase\nconstructor.", "code": "def _get_toolbar(self):\n if mpl.rcParams['toolbar'] == 'toolbar2':\n toolbar = NavigationToolbar2Wx(self.canvas)\n elif mpl.rcParams['toolbar'] == 'toolmanager':\n toolbar = ToolbarWx(self.toolmanager)\n else:\n toolbar = None\n return toolbar\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 82, "n_words": 22, "vocab_size": 15, "complexity": 3, "nloc": 8, "token_counts": 48, "n_ast_nodes": 85, "n_identifiers": 9, "random_cut": "def _get_toolbar(self):\n if mpl.rcParams['t" }, { "id": 177258, "commit_id": "50ff08de69c6e9541cd6c029bede5dabf56cfe73", "repo": "networkx", "path": "networkx/algorithms/operators/tests/test_all.py", "file_name": "test_all.py", "fun_name": "test_intersection_all", "commit_message": "Make all.py generator friendly (#5984)\n\n* Make compose_all generator friendly\r\n\r\n* Make disjoint_union_all and intersection_all generator friendly\r\n\r\n* Refactor disjoint_union_all to yield relabeled graphs\r\n\r\n* Make union_all generator friendly\r\n\r\n* Fix intersection_all\r\n\r\n* Fix union_all signature\r\n\r\n* Allow passing an infinite rename generator to union_all\r\n\r\n* Copy over generalizations to binary.py\r\n\r\n* Clean up rename\r\n\r\n* Simplify first_label in disjoint_union_all\r\n\r\n* Simplify disjoint_union_all\r\n\r\n* Add missing R.graph.update in intersection_all", "code": "def test_intersection_all():\n G = nx.Graph()\n H = nx.Graph()\n R = nx.Graph(awesome=True)\n G.add_nodes_from([1, 2, 3, 4])\n G.add_edge(1, 2)\n G.add_edge(2, 3)\n H.add_nodes_from([1, 2, 3, 4])\n H.add_edge(2, 3)\n H.add_edge(3, 4)\n R.add_nodes_from([1, 2, 3, 4])\n R.add_edge(2, 3)\n R.add_edge(4, 1)\n I = nx.intersection_all([G, H, R])\n assert set(I.nodes()) == {1, 2, 3, 4}\n assert sorted(I.edges()) == [(2, 3)]\n assert I.graph[\"awesome\"]\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 101, "n_words": 54, "vocab_size": 37, "complexity": 1, "nloc": 17, "token_counts": 175, "n_ast_nodes": 262, "n_identifiers": 16, "random_cut": "def test_intersection_all():\n G = nx.Graph()\n H = nx.Graph()\n R = nx.Graph(awesome=True)\n G.add_nodes_from([1, 2, 3, 4])\n G.add_edge(1, 2)\n G.add_edge(2, 3)\n H.add_nodes_from([1, 2, 3, 4])\n H" }, { "id": 136173, "commit_id": "993008e1ed8c592b268f0e66dac3260c8a14a893", "repo": "ray", "path": "python/ray/tune/tuner.py", "file_name": "tuner.py", "fun_name": "get_results", "commit_message": "[Tune] Fix Jupyter output with Ray Client and `Tuner` (#29956)\n\nEnsures that we can have rich Jupyter output with the Tuner API.\r\n\r\nSigned-off-by: Antoni Baum ", "code": "def get_results(self) -> ResultGrid:\n \n if not self._is_ray_client:\n return self._local_tuner.get_results()\n else:\n (\n progress_reporter,\n string_queue,\n ) = self._prepare_remote_tuner_for_jupyter_progress_reporting()\n fit_future = self._remote_tuner.fit.remote()\n _stream_client_output(\n fit_future,\n progress_reporter,\n string_queue,\n )\n return ray.get(fit_future)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 199, "n_words": 26, "vocab_size": 21, "complexity": 2, "nloc": 34, "token_counts": 63, "n_ast_nodes": 102, "n_identifiers": 15, "random_cut": "def get_results(self) -> ResultGrid:\n \n if not self._is_ray_c" }, { "id": 45149, "commit_id": "de41ccc922b3d1f407719744168bb6822bde9a58", "repo": "airflow", "path": "tests/core/test_configuration.py", "file_name": "test_configuration.py", "fun_name": "test_auth_backends_adds_session", "commit_message": "Change the default auth backend to session (#21640)\n\n* Change default backend\r\n\r\nAs part of AIP-42, change the default auth backend to validate using the session,\r\nso that the UI can use the API. If auth_backends has been set to a non-default\r\nvalue, include the session in the list of backends.\r\n\r\n* When updating a deprecated config value from env, set it back to env\r\n\r\nOtherwise this means the config seen by an execed sub-process would be\r\ndifferent (and wrong, taking neither the configured env var value, nor\r\nthe new default, but instead just what is in the config file!)\r\n\r\n* Remove the chart auth_backends setting\r\n\r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def test_auth_backends_adds_session(self):\n test_conf = AirflowConfigParser(default_config='')\n # Guarantee we have deprecated settings, so we test the deprecation\n # lookup even if we remove this explicit fallback\n test_conf.deprecated_values = {\n 'api': {\n 'auth_backends': (\n re.compile(r'^airflow\\.api\\.auth\\.backend\\.deny_all$|^$'),\n 'airflow.api.auth.backend.session',\n '3.0',\n ),\n },\n }\n test_conf.read_dict({'api': {'auth_backends': 'airflow.api.auth.backend.basic_auth'}})\n\n with pytest.warns(FutureWarning):\n test_conf.validate()\n assert (\n test_conf.get('api', 'auth_backends')\n == 'airflow.api.auth.backend.basic_auth\\nairflow.api.auth.backend.session'\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 271, "n_words": 51, "vocab_size": 45, "complexity": 1, "nloc": 18, "token_counts": 81, "n_ast_nodes": 152, "n_identifiers": 14, "random_cut": "def test_auth_backends_adds_session(self):\n test_conf = AirflowConfigParser(default_config='')\n # Guarantee we have deprecated settings, so we test the deprecation\n # lookup even if we remove this explicit fallback\n test_conf.deprecated_values = {\n 'api': {\n 'auth_backends': (\n re.compile(r'^airflow\\.api\\.auth\\.backend\\.deny_all$|^$'),\n 'airflow.api.auth.backend.session',\n '3.0',\n ),\n },\n }\n test_conf.read_dict({'api': {'auth_backends': 'airflow.api.auth.backend.basic_auth'}})\n\n with pytest.warns(FutureWarning):\n test_conf.validate()\n assert (\n test_conf.get('api', " }, { "id": 72736, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/api/v2/tests/test_images.py", "file_name": "test_images.py", "fun_name": "test_all_fields_then_remove_something", "commit_message": "Reformat with black", "code": "def test_all_fields_then_remove_something(self):\n response = self.get_response(fields=\"*,-title,-tags\")\n content = json.loads(response.content.decode(\"UTF-8\"))\n\n for image in content[\"items\"]:\n self.assertEqual(set(image.keys()), {\"id\", \"meta\", \"width\", \"height\"})\n self.assertEqual(\n set(image[\"meta\"].keys()), {\"type\", \"detail_url\", \"download_url\"}\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 91, "n_words": 23, "vocab_size": 22, "complexity": 2, "nloc": 8, "token_counts": 85, "n_ast_nodes": 150, "n_identifiers": 13, "random_cut": "def test_all_fields_then_remove_something(self):\n response = self.get_response(fields=\"*,-tit" }, { "id": 151661, "commit_id": "659c8c237f7a7e30ad0929fed448c449a01fb2bf", "repo": "freqtrade", "path": "freqtrade/rpc/api_server/webserver.py", "file_name": "webserver.py", "fun_name": "_api_shutdown_event", "commit_message": "initial revision", "code": "async def _api_shutdown_event(self):\n if ApiServer._message_stream:\n ApiServer._message_stream = None\n\n if self._ws_queue:\n self._ws_queue = None\n\n if self._ws_publisher_task:\n self._ws_publisher_task.cancel()\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 69, "n_words": 16, "vocab_size": 12, "complexity": 4, "nloc": 7, "token_counts": 37, "n_ast_nodes": 62, "n_identifiers": 7, "random_cut": "async def _api_shutdown_event(self):" }, { "id": 31239, "commit_id": "66336dc18374cdba550759cc923c36217159d4c9", "repo": "transformers", "path": "src/transformers/pipelines/visual_question_answering.py", "file_name": "visual_question_answering.py", "fun_name": "_forward", "commit_message": "Add Visual Question Answering (VQA) pipeline (#17286)\n\n* wip\r\n\r\n* rebase\r\n\r\n* all tests pass\r\n\r\n* rebase\r\n\r\n* ready for PR\r\n\r\n* address comments\r\n\r\n* fix styles\r\n\r\n* add require_torch to pipeline test\r\n\r\n* remove remote image to improve CI consistency\r\n\r\n* address comments; fix tf/flax tests\r\n\r\n* address comments; fix tf/flax tests\r\n\r\n* fix tests; add alias\r\n\r\n* repo consistency tests\r\n\r\n* Update src/transformers/pipelines/visual_question_answering.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* address comments\r\n\r\n* Update src/transformers/pipelines/visual_question_answering.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* merge\r\n\r\n* Update src/transformers/models/auto/modeling_auto.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* merge\r\n\r\nCo-authored-by: Sijun He \r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _forward(self, model_inputs):\n model_outputs = self.model(**model_inputs)\n return model_outputs\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 29, "n_identifiers": 5, "random_cut": "def _forward(self, model_inputs):\n model_outputs = self.model(**model_inputs)\n return model_outputs\n" }, { "id": 27848, "commit_id": "319a64dabf0c9449833797a089ab6a0bf02b1505", "repo": "saleor", "path": "saleor/graphql/checkout/tests/mutations/test_checkout_create.py", "file_name": "test_checkout_create.py", "fun_name": "test_checkout_create", "commit_message": "Extract tests to separate files for mutations checkout shipping/billing address, checkout create (#10082)\n\n* Extract tests to separate files for mutations checkout shipping/billing address, checkout create\r\n\r\n* Move mutation files to separate directory\r\n\r\n* Add missing init file", "code": "def test_checkout_create(api_client, stock, graphql_address_data, channel_USD):\n \n variant = stock.product_variant\n variant_id = graphene.Node.to_global_id(\"ProductVariant\", variant.id)\n test_email = \"test@example.com\"\n shipping_address = graphql_address_data\n variables = {\n \"checkoutInput\": {\n \"channel\": channel_USD.slug,\n \"lines\": [{\"quantity\": 1, \"variantId\": variant_id}],\n \"email\": test_email,\n \"shippingAddress\": shipping_address,\n }\n }\n assert not Checkout.objects.exists()\n response = api_client.post_graphql(MUTATION_CHECKOUT_CREATE, variables)\n content = get_graphql_content(response)[\"data\"][\"checkoutCreate\"]\n\n new_checkout = Checkout.objects.first()\n assert new_checkout is not None\n checkout_data = content[\"checkout\"]\n assert checkout_data[\"token\"] == str(new_checkout.token)\n assert new_checkout.lines.count() == 1\n checkout_line = new_checkout.lines.first()\n assert checkout_line.variant == variant\n assert checkout_line.quantity == 1\n assert new_checkout.shipping_address is not None\n assert new_checkout.shipping_address.first_name == shipping_address[\"firstName\"]\n assert new_checkout.shipping_address.last_name == shipping_address[\"lastName\"]\n assert (\n new_checkout.shipping_address.street_address_1\n == shipping_address[\"streetAddress1\"]\n )\n assert (\n new_checkout.shipping_address.street_address_2\n == shipping_address[\"streetAddress2\"]\n )\n assert new_checkout.shipping_address.postal_code == shipping_address[\"postalCode\"]\n assert new_checkout.shipping_address.country == shipping_address[\"country\"]\n assert new_checkout.shipping_address.city == shipping_address[\"city\"].upper()\n assert not Reservation.objects.exists()\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 289, "n_words": 116, "vocab_size": 71, "complexity": 1, "nloc": 39, "token_counts": 268, "n_ast_nodes": 444, "n_identifiers": 42, "random_cut": "def test_checkout_create(api_client, stock, graphql_address_data, channel_USD):\n \n variant = stock.product_variant\n variant_id = graphene.Node.to_global_id(\"ProductVariant\", variant.id)\n test_email = \"test@example.com\"\n shipping_address = graphql_address_data\n variables = {\n \"checkoutInput\": {\n \"channel\": channel_USD.slug,\n \"lines\": [{\"quantity\": 1, \"variantId\": variant_id}],\n \"email\": test_email,\n \"shippingAddress\": shipping_address,\n }\n }\n assert not Checkout.objects.exists()\n response = api_client.post_graphql(MUTATION_CHECKOUT_CREATE, variables)\n content = get_graphql_content(response)[\"data\"][\"checkoutCreate\"]\n\n new_checkout = Checkout.objects.first()\n assert new_checkout is not None\n checkout_data = content[\"checkout\"]\n assert checkout_data[\"token\"] == str(new_checkout.token)\n assert new_checkout.lines.count() == 1\n checkout_line = new_checkout.lines.first()\n assert checkout_line.variant == variant\n assert checkout_line.quantity == 1\n assert new_checkout.shipping_address is not None\n assert new_checkout.shipping_address.first_name == shipping_address[\"firstName\"]\n assert new_checkout." }, { "id": 207229, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_inlines/tests.py", "file_name": "tests.py", "fun_name": "test_help_text", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_help_text(self):\n \n response = self.client.get(reverse(\"admin:admin_inlines_holder4_add\"))\n self.assertContains(\n response, '
    Awesome stacked help text is awesome.
    ', 4\n )\n self.assertContains(\n response,\n '',\n 1,\n )\n # ReadOnly fields\n response = self.client.get(reverse(\"admin:admin_inlines_capofamiglia_add\"))\n self.assertContains(\n response,\n '',\n 1,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 281, "n_words": 68, "vocab_size": 38, "complexity": 1, "nloc": 22, "token_counts": 70, "n_ast_nodes": 130, "n_identifiers": 7, "random_cut": "def test_help_text(self):\n \n response = self.client.get(reverse(\"admin:admin_inlines_holder4_add\"))\n self.assertContains(\n response, '
    Awesome stacked help text is awesome.
    ', 4\n )\n self.assertContains(\n response,\n '',\n 1,\n )\n # ReadOnly fields\n response = self.client.get(reverse(\"admin:admin_inlines_capofamiglia_add\"))\n self.assertContains(\n response,\n " }, { "id": 81403, "commit_id": "ad08eafb9a8ed775dc0cf21eb38e443651e11184", "repo": "awx", "path": "awx/api/views/debug.py", "file_name": "debug.py", "fun_name": "get", "commit_message": "add debug views for task manager(s)\n\nimplement https://github.com/ansible/awx/issues/12446\nin development environment, enable set of views that run\nthe task manager(s).\n\nAlso introduce a setting that disables any calls to schedule()\nthat do not originate from the debug views when in the development\nenvironment. With guards around both if we are in the development\nenvironment and the setting, I think we're pretty safe this won't get\ntriggered unintentionally.\n\nuse MODE to determine if we are in devel env\n\nAlso, move test for skipping task managers to the tasks file", "code": "def get(self, request, format=None):\n \n data = OrderedDict()\n data['task_manager'] = '/api/debug/task_manager/'\n data['dependency_manager'] = '/api/debug/dependency_manager/'\n data['workflow_manager'] = '/api/debug/workflow_manager/'\n return Response(data)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 60, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 40, "n_ast_nodes": 76, "n_identifiers": 7, "random_cut": "def get(self, request, format=None):\n \n data = OrderedDict()\n data['task_manager'] = '/api/debug/task_manager/'\n data['dependency_manager'] = '/api/debug/dependency_manager/'\n data['workflow_manager'] =" }, { "id": 213831, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy_tests/test_core/test_general.py", "file_name": "test_general.py", "fun_name": "test_get_num_dims", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call):\n # smoke test\n if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:\n # mxnet does not support 0-dimensional variables\n pytest.skip()\n ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor)\n # type test\n if as_tensor:\n assert ivy.is_array(ret)\n else:\n assert isinstance(ret, int)\n ret = ivy.array(ret)\n # cardinality test\n assert list(ret.shape) == []\n # value test\n assert np.array_equal(ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32))\n # compilation test\n if call in [helpers.torch_call]:\n # pytorch scripting does not support Union\n return\n if not ivy.wrapped_mode():\n helpers.assert_compilable(ivy.shape)\n\n\n# minimum\n@pytest.mark.parametrize(\n \"xy\", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])\n@pytest.mark.parametrize(\n \"dtype\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"xy\", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])\n@pytest.mark.parametrize(\n \"dtype\", ['float32'])\n@pytest.mark.parametrize(\n \"tensor_fn\", [ivy.array, helpers.var_fn])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 205, "n_words": 106, "vocab_size": 76, "complexity": 7, "nloc": 15, "token_counts": 153, "n_ast_nodes": 375, "n_identifiers": 32, "random_cut": "def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call):\n # smoke test\n if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:\n # mxnet does not support 0-dimensional variables\n pytest.skip()\n ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor)\n # type test\n if as_tensor:\n " }, { "id": 51933, "commit_id": "2e373966a7fd3119c205350fb14d0b7bfe74185d", "repo": "PaddleHub", "path": "modules/image/Image_editing/super_resolution/swinir_l_real_sr_x4/module.py", "file_name": "module.py", "fun_name": "serving_method", "commit_message": "add swinir_l_real_sr_x4 (#2076)\n\n* git add swinir_l_real_sr_x4\r\n\r\n* fix typo\r\n\r\n* fix typo\r\n\r\nCo-authored-by: chenjian ", "code": "def serving_method(self, image, **kwargs):\n \n image = base64_to_cv2(image)\n img_output = self.real_sr(image=image, **kwargs)\n\n return cv2_to_base64(img_output)\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 57, "n_identifiers": 8, "random_cut": "def serving_method(self, image, **kwargs):\n \n image = base64_to_cv2(image)\n img_output = self.real_sr(image=image, **kwargs)\n" }, { "id": 84213, "commit_id": "df69e1d9792a5ea7a72e32981f68a46a7fb88ce1", "repo": "zulip", "path": "corporate/tests/test_stripe.py", "file_name": "test_stripe.py", "fun_name": "test_invoice_plan_without_stripe_customer", "commit_message": "mypy: Enable truthy-bool errors.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_invoice_plan_without_stripe_customer(self) -> None:\n self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, False, False)\n plan = get_current_plan_by_realm(get_realm(\"zulip\"))\n assert plan is not None\n plan.customer.stripe_customer_id = None\n plan.customer.save(update_fields=[\"stripe_customer_id\"])\n with self.assertRaises(BillingError) as context:\n invoice_plan(plan, timezone_now())\n self.assertRegex(\n context.exception.error_description,\n \"Realm zulip has a paid plan without a Stripe customer\",\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 128, "n_words": 40, "vocab_size": 35, "complexity": 1, "nloc": 12, "token_counts": 89, "n_ast_nodes": 145, "n_identifiers": 21, "random_cut": "def test_invoice_plan_without_stripe_customer(self) -> None:\n self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, False, False)\n plan = get_current_plan_by_realm(get_realm(\"zulip\"))\n assert plan is not None\n plan.customer.stripe_customer_id = None\n plan.customer.save(update_fields=[\"stripe_customer_id\"])\n with self.assertRaises(BillingError) as context:\n " }, { "id": 153295, "commit_id": "e5e9634357e60925a5a70e56a1d4882d269f533a", "repo": "modin", "path": "modin/config/envvars.py", "file_name": "envvars.py", "fun_name": "_check_vars", "commit_message": "REFACTOR-#3900: add flake8-no-implicit-concat plugin and refactor flake8 error codes (#3901)\n\nSigned-off-by: jeffreykennethli ", "code": "def _check_vars():\n \n valid_names = {\n obj.varname\n for obj in globals().values()\n if isinstance(obj, type)\n and issubclass(obj, EnvironmentVariable)\n and not obj.is_abstract\n }\n found_names = {name for name in os.environ if name.startswith(\"MODIN_\")}\n unknown = found_names - valid_names\n if unknown:\n warnings.warn(\n f\"Found unknown environment variable{'s' if len(unknown) > 1 else ''},\"\n + f\" please check {'their' if len(unknown) > 1 else 'its'} spelling: \"\n + \", \".join(sorted(unknown))\n )\n\n\n_check_vars()\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 164, "n_words": 65, "vocab_size": 48, "complexity": 8, "nloc": 16, "token_counts": 87, "n_ast_nodes": 192, "n_identifiers": 22, "random_cut": "def _check_vars():\n \n valid_names = {\n obj.varname\n for obj in globals().values()\n if isinstance(obj, type)\n and issubclass(obj, EnvironmentVariable)\n and not obj.is_abstract\n }\n found_names = {name for name in os.environ if name.startswith(\"MODIN_\")}\n unknown = found_names - valid_names\n if unknown:\n warnings.warn(\n f\"Found unknown environment va" }, { "id": 209091, "commit_id": "d74d8601575464a017f6e0f0031403b8c18d4429", "repo": "scapy", "path": "scapy/contrib/automotive/scanner/enumerator.py", "file_name": "enumerator.py", "fun_name": "_show_negative_response_information", "commit_message": "Minor refactoring of Automotive-Scanner show functions", "code": "def _show_negative_response_information(self, **kwargs):\n # type: (Any) -> str\n filtered = kwargs.get(\"filtered\", True)\n s = \"%d negative responses were received\\n\" % \\\n len(self.results_with_negative_response)\n\n s += \"\\n\"\n\n s += self._show_negative_response_details(**kwargs) or \"\" + \"\\n\"\n if filtered and len(self.negative_response_blacklist):\n s += \"The following negative response codes are blacklisted: %s\\n\"\\\n % [self._get_negative_response_desc(nr)\n for nr in self.negative_response_blacklist]\n\n return s + \"\\n\"\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 161, "n_words": 56, "vocab_size": 43, "complexity": 5, "nloc": 11, "token_counts": 78, "n_ast_nodes": 139, "n_identifiers": 12, "random_cut": "def _show_negative_response_information(self, **kwargs):\n " }, { "id": 309972, "commit_id": "6a0c3843e51085e59d6fb69920733485f2f98fe5", "repo": "core", "path": "homeassistant/components/github/sensor.py", "file_name": "sensor.py", "fun_name": "native_value", "commit_message": "Revamp github integration (#64190)\n\nCo-authored-by: Paulus Schoutsen \r\nCo-authored-by: Franck Nijhof \r\nCo-authored-by: Martin Hjelmare ", "code": "def native_value(self) -> StateType:\n \n return self.entity_description.value_fn(self.coordinator.data)\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 7, "random_cut": "def native_value(self) -> StateType:\n \n return self.entity_description.value_fn(self.coordinat" }, { "id": 55876, "commit_id": "d225bb1da80d22b32148b68ebc9ff578bfd85c9b", "repo": "prefect", "path": "tests/orion/api/test_task_runs.py", "file_name": "test_task_runs.py", "fun_name": "test_read_task_runs_applies_sort", "commit_message": "Add sorts for task run name", "code": "async def test_read_task_runs_applies_sort(self, flow_run, session, client):\n now = pendulum.now()\n task_run_1 = await models.task_runs.create_task_run(\n session=session,\n task_run=schemas.core.TaskRun(\n name=\"Task Run 1\",\n flow_run_id=flow_run.id,\n task_key=\"my-key\",\n expected_start_time=now.subtract(minutes=5),\n dynamic_key=\"0\",\n ),\n )\n task_run_2 = await models.task_runs.create_task_run(\n session=session,\n task_run=schemas.core.TaskRun(\n name=\"Task Run 2\",\n flow_run_id=flow_run.id,\n task_key=\"my-key\",\n expected_start_time=now.add(minutes=5),\n dynamic_key=\"1\",\n ),\n )\n await session.commit()\n\n response = await client.post(\n \"/task_runs/filter\",\n json=dict(\n limit=1, sort=schemas.sorting.TaskRunSort.EXPECTED_START_TIME_DESC.value\n ),\n )\n assert response.status_code == status.HTTP_200_OK\n assert response.json()[0][\"id\"] == str(task_run_2.id)\n\n response = await client.post(\n \"/task_runs/filter\",\n json=dict(\n limit=1,\n offset=1,\n sort=schemas.sorting.TaskRunSort.EXPECTED_START_TIME_DESC.value,\n ),\n )\n assert response.status_code == status.HTTP_200_OK\n assert response.json()[0][\"id\"] == str(task_run_1.id)\n\n # name asc\n response = await client.post(\n \"/task_runs/filter\",\n json=dict(\n limit=1,\n sort=schemas.sorting.TaskRunSort.NAME_ASC.value,\n ),\n )\n assert response.status_code == status.HTTP_200_OK\n assert response.json()[0][\"id\"] == str(task_run_1.id)\n\n # name desc\n response = await client.post(\n \"/task_runs/filter\",\n json=dict(\n limit=1,\n sort=schemas.sorting.TaskRunSort.NAME_DESC.value,\n ),\n )\n assert response.status_code == status.HTTP_200_OK\n assert response.json()[0][\"id\"] == str(task_run_2.id)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 755, "n_words": 120, "vocab_size": 49, "complexity": 1, "nloc": 59, "token_counts": 369, "n_ast_nodes": 577, "n_identifiers": 43, "random_cut": "async def test_read_task_runs_applies_sort(self, flow_run, session, client):\n now = pendulum.now()\n task_run_1 = await models.task_runs.create_task_run(\n session=session,\n task_run=schemas.core.TaskRun(\n name=\"Task Run 1\",\n flow_run_id=flow_run.id,\n task_key=\"my-key\",\n expected_start_time=now.subtract(minutes=5),\n dynamic_key=\"0\",\n ),\n )\n task_run_2 = await models.task_runs.create_task_run(\n session=session,\n task_run=schemas.core.TaskRun(\n name=\"Task Run 2\",\n flow_run_id=flow_run.id,\n task_key=\"my-key\",\n expected_start_time=now.add(minutes=5),\n dynamic_key=\"1\",\n ),\n )\n await session.commit()\n\n response = await client.post(\n \"/task_runs/filter\",\n json=dict(\n limit=1, sort=schemas.sorting.TaskRunSort.EXPECTED_START_TIME_DESC.value\n ),\n )\n assert response.status_code == status.HTTP_200_OK\n assert response.json()[0][\"id\"] == str(task_run_2.id)\n\n response = await client.post(\n \"/task_runs/filter\",\n json=dict(\n limit=1,\n offset=1,\n sort=schemas.sorting.TaskRunSort.EXPECTED_START_TIME_DESC.value,\n ),\n )\n assert response.status_code == status.HTTP_200_OK\n assert response.json()[0][\"id\"] == str(task_run_1.id)\n\n # name asc\n response = await client.post(\n " }, { "id": 266349, "commit_id": "97104f1221b64ef36cf42cb90c5a0eff263a2adb", "repo": "ansible", "path": "test/units/parsing/vault/test_vault.py", "file_name": "test_vault.py", "fun_name": "test_odd_length", "commit_message": "Avoid deprecated TestCase functions in unit tests. (#76678)\n\n* Avoid deprecated TestCase functions in unit tests.\r\n* Add assertRaisesRegex for Python 2.7.\r\n* Fix indentation.", "code": "def test_odd_length(self):\n b_data = b'123456789abcdefghijklmnopqrstuvwxyz'\n\n self.assertRaisesRegex(vault.AnsibleVaultFormatError,\n '.*Vault format unhexlify error.*',\n vault._unhexlify,\n b_data)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 115, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 25, "n_ast_nodes": 40, "n_identifiers": 7, "random_cut": "def test_odd_length(self):\n b_data = b'123456789abcdefghijklmnopqrstuvwxyz'\n\n se" }, { "id": 118383, "commit_id": "e308f43952f3e27d3b48ac28dd3eaffeb26e8ee0", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/jira_handler/tests/test_jira_handler.py", "file_name": "test_jira_handler.py", "fun_name": "setUpClass", "commit_message": "removing the wrongly commited files and addressing the review comments of PR 4112", "code": "def setUpClass(cls):\n cls.kwargs = {\n \"table_name\": \"project\",\n \"jira_url\": \"https://jira.linuxfoundation.org/\",\n \"user_id\": \"balaceg\",\n \"api_key\": \"4Rhq&Ehd#KV4an!\",\n \"jira_query\": \"project = RELENG and status = 'In Progress'\"\n }\n cls.handler = JiraHandler('test_jira_handler', cls.kwargs)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 102, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 9, "token_counts": 42, "n_ast_nodes": 83, "n_identifiers": 5, "random_cut": "def setUpClass(cls):\n cls.kwargs = {\n \"table_name\": \"project\",\n \"jira_url\": \"https://jira.linuxfoundation.org/\",\n \"user_id\": \"balaceg\",\n \"api_key\": \"4Rhq&Ehd#KV4an!\",\n \"jira_query\": \"project = RELENG and status = 'In Progress'\"\n }\n " }, { "id": 246608, "commit_id": "64c73c6ac88a740ee480a0ad1f9afc8596bccfa4", "repo": "synapse", "path": "tests/rest/client/test_login.py", "file_name": "test_login.py", "fun_name": "test_multi_sso_redirect", "commit_message": "Add type hints to `tests/rest/client` (#12066)", "code": "def test_multi_sso_redirect(self) -> None:\n \n # first hit the redirect url, which should redirect to our idp picker\n channel = self._make_sso_redirect_request(None)\n self.assertEqual(channel.code, 302, channel.result)\n location_headers = channel.headers.getRawHeaders(\"Location\")\n assert location_headers\n uri = location_headers[0]\n\n # hitting that picker should give us some HTML\n channel = self.make_request(\"GET\", uri)\n self.assertEqual(channel.code, 200, channel.result)\n\n # parse the form to check it has fields assumed elsewhere in this class\n html = channel.result[\"body\"].decode(\"utf-8\")\n p = TestHtmlParser()\n p.feed(html)\n p.close()\n\n # there should be a link for each href\n returned_idps: List[str] = []\n for link in p.links:\n path, query = link.split(\"?\", 1)\n self.assertEqual(path, \"pick_idp\")\n params = urllib.parse.parse_qs(query)\n self.assertEqual(params[\"redirectUrl\"], [TEST_CLIENT_REDIRECT_URL])\n returned_idps.append(params[\"idp\"][0])\n\n self.assertCountEqual(returned_idps, [\"cas\", \"oidc\", \"oidc-idp1\", \"saml\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 292, "n_words": 104, "vocab_size": 80, "complexity": 2, "nloc": 21, "token_counts": 188, "n_ast_nodes": 316, "n_identifiers": 33, "random_cut": "def test_multi_sso_redirect(self) -> None:\n \n # first hit the redirect url, which should redirect to our idp picker\n channel = self._make_sso_redirect_request(None)\n self.assertEqual(channel.code, 302, channel.result)\n location_headers = channel.headers.getRawHeaders(\"Location\")\n assert location_headers\n uri = location_headers[0]\n\n # hitting that picker should give us some HTML\n channel = self.make_request(\"GET\", uri)\n self.assertEqual(channel.code, 200, channel." }, { "id": 58239, "commit_id": "b950152d8afc439135d5241c27821b1eeaa72a1e", "repo": "prefect", "path": "tests/utilities/test_importtools.py", "file_name": "test_importtools.py", "fun_name": "reset_sys_modules", "commit_message": "Improve tests", "code": "def reset_sys_modules():\n original = sys.modules.copy()\n yield\n sys.modules = original\n\n\n@pytest.mark.usefixtures(\"reset_sys_modules\")\n@pytest.mark.parametrize(\n \"working_directory,script_path\",\n [\n # Working directory is not necessary for these imports to work\n (__root_path__, TEST_PROJECTS_DIR / \"flat-project\" / \"explicit_relative.py\"),\n (__root_path__, TEST_PROJECTS_DIR / \"flat-project\" / \"implicit_relative.py\"),\n (__root_path__, TEST_PROJECTS_DIR / \"nested-project\" / \"implicit_relative.py\"),\n # They also work with the working directory set\n (TEST_PROJECTS_DIR / \"flat-project\", \"explicit_relative.py\"),\n (TEST_PROJECTS_DIR / \"flat-project\", \"implicit_relative.py\"),\n (TEST_PROJECTS_DIR / \"nested-project\", \"implicit_relative.py\"),\n # The tree structure requires the working directory to be at the base of all\n # module imports\n (TEST_PROJECTS_DIR / \"tree-project\", Path(\"imports\") / \"implicit_relative.py\"),\n ],\n)", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@pytest.mark.usefixtures(\"reset_sys_modules\")\n@pytest.mark.parametrize(\n \"working_directory,script_path\",\n [\n # Working directory is not necessary for these imports to work\n (__root_path__, TEST_PROJECTS_DIR / \"flat-project\" / \"explicit_relative.py\"),\n (__root_path__, TEST_PROJECTS_DIR / \"flat-project\" / \"implicit_relative.py\"),\n (__root_path__, TEST_PROJECTS_DIR / \"nested-project\" / \"implicit_relative.py\"),\n # They also work with the working directory set\n (TEST_PROJECTS_DIR / \"flat-project\", \"explicit_relative.py\"),\n (TEST_PROJECTS_DIR / \"flat-project\", \"implicit_relative.py\"),\n (TEST_PROJECTS_DIR / \"nested-project\", \"implicit_relative.py\"),\n # The tree structure requires the working directory to be at the base of all\n # module imports\n (TEST_PROJECTS_DIR / \"tree-project\", Path(\"imports\") / \"implicit_relative.py\"),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 180, "n_words": 89, "vocab_size": 52, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 193, "n_identifiers": 12, "random_cut": "def reset_sys_modules():\n original = sys.modules.copy()\n yield\n sys.modules = original\n\n\n@pytest.mark.usefixtures(\"reset_sys_modules\")\n@pytest.mark.parametrize(\n \"working_directory,script_path\",\n [\n # Working directory is not necessary for these imports to work\n (__root_path__, TEST_PROJECTS_DIR / \"flat-project\" / \"explicit_relative.py\"),\n (__root_path__, TEST_PROJECTS_DIR / \"flat-project\" / \"implicit_relative.py\"),\n (__root_path__, TEST_PROJECTS_DIR / \"nested-project\" / \"implicit_relative.py\"),\n # They also work with the working directory set\n (TEST_PROJECTS_DIR / \"flat-project\", \"explicit_relative.py\"),\n " }, { "id": 192711, "commit_id": "f1587a20e3169cdd1c8cae5a1067c0ff52d63320", "repo": "vision", "path": "test/test_extended_models.py", "file_name": "test_extended_models.py", "fun_name": "test_schema_meta_validation", "commit_message": "Clean up purely informational fields from Weight Meta-data (#5852)\n\n* Removing `task`, `architecture` and `quantization`\r\n\r\n* Fix mypy\r\n\r\n* Remove size field\r\n\r\n* Remove unused import.\r\n\r\n* Fix mypy\r\n\r\n* Remove size from schema list.\r\n\r\n* update todo\r\n\r\n* Simplify with assert\r\n\r\n* Adding min_size to all models.\r\n\r\n* Update RAFT min size to 128", "code": "def test_schema_meta_validation(model_fn):\n # TODO: add list of permitted fields\n classification_fields = [\"categories\", \"acc@1\", \"acc@5\"]\n defaults = {\n \"all\": [\"recipe\", \"num_params\", \"min_size\"],\n \"models\": classification_fields,\n \"detection\": [\"categories\", \"map\"],\n \"quantization\": classification_fields + [\"backend\", \"unquantized\"],\n \"segmentation\": [\"categories\", \"mIoU\", \"acc\"],\n \"video\": classification_fields,\n \"optical_flow\": [],\n }\n model_name = model_fn.__name__\n module_name = model_fn.__module__.split(\".\")[-2]\n fields = set(defaults[\"all\"] + defaults[module_name])\n\n weights_enum = _get_model_weights(model_fn)\n if len(weights_enum) == 0:\n pytest.skip(f\"Model '{model_name}' doesn't have any pre-trained weights.\")\n\n problematic_weights = {}\n incorrect_params = []\n bad_names = []\n for w in weights_enum:\n missing_fields = fields - set(w.meta.keys())\n if missing_fields:\n problematic_weights[w] = missing_fields\n if w == weights_enum.DEFAULT:\n if module_name == \"quantization\":\n # parameters() count doesn't work well with quantization, so we check against the non-quantized\n unquantized_w = w.meta.get(\"unquantized\")\n if unquantized_w is not None and w.meta.get(\"num_params\") != unquantized_w.meta.get(\"num_params\"):\n incorrect_params.append(w)\n else:\n if w.meta.get(\"num_params\") != sum(p.numel() for p in model_fn(weights=w).parameters()):\n incorrect_params.append(w)\n else:\n if w.meta.get(\"num_params\") != weights_enum.DEFAULT.meta.get(\"num_params\"):\n if w.meta.get(\"num_params\") != sum(p.numel() for p in model_fn(weights=w).parameters()):\n incorrect_params.append(w)\n if not w.name.isupper():\n bad_names.append(w)\n\n assert not problematic_weights\n assert not incorrect_params\n assert not bad_names\n\n\n@pytest.mark.parametrize(\n \"model_fn\",\n TM.get_models_from_module(models)\n + TM.get_models_from_module(models.detection)\n + TM.get_models_from_module(models.quantization)\n + TM.get_models_from_module(models.segmentation)\n + TM.get_models_from_module(models.video)\n + TM.get_models_from_module(models.optical_flow),\n)\n@run_if_test_with_extended", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"model_fn\",\n TM.get_models_from_module(models)\n + TM.get_models_from_module(models.detection)\n + TM.get_models_from_module(models.quantization)\n + TM.get_models_from_module(models.segmentation)\n + TM.get_models_from_module(models.video)\n + TM.get_models_from_module(models.optical_flow),\n)\n@run_if_test_with_extended", "n_ast_errors": 1, "ast_levels": 21, "n_whitespaces": 518, "n_words": 175, "vocab_size": 109, "complexity": 14, "nloc": 41, "token_counts": 341, "n_ast_nodes": 697, "n_identifiers": 45, "random_cut": "def test_schema_meta_validation(model_fn):\n # TODO: add list of permitted fields\n classification_fields = [\"categories\", \"acc@1\", \"acc@5\"]\n defaults = {\n \"all\": [\"recipe\", \"num_params\", \"min_size\"],\n \"models\": classification_fields,\n \"detection\": [\"categories\", \"map\"],\n \"quantization\": classification_fields + [\"backend\", \"unquantized\"],\n \"segmentation\": [\"categories\", \"mIoU\", \"acc\"],\n \"video\": classification_fields,\n \"optical_flow\": [],\n }\n model_name = model_fn.__name__\n module_name = model_fn.__module__.split(\".\")[-2]\n fields = " }, { "id": 8293, "commit_id": "c99cab3a674e31885e5608a4aed73a64b1901c55", "repo": "ludwig", "path": "ludwig/backend/ray.py", "file_name": "ray.py", "fun_name": "create_runner", "commit_message": "Allow explicitly plumbing through nics (#2605)", "code": "def create_runner(**kwargs):\n trainer_kwargs = get_trainer_kwargs(**kwargs)\n with spread_env(**trainer_kwargs):\n trainer = Trainer(**trainer_kwargs)\n\n trainer.start()\n try:\n yield trainer\n finally:\n trainer.shutdown()\n\n\n@register_ray_trainer(\"trainer\", MODEL_ECD, default=True)", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@register_ray_trainer(\"trainer\", MODEL_ECD, default=True)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 53, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 9, "token_counts": 43, "n_ast_nodes": 97, "n_identifiers": 12, "random_cut": "def create_runner(**kwargs):\n trainer_kwargs = g" }, { "id": 56067, "commit_id": "9a83d0c051e4a461bab8ecc97312fac7c6061d78", "repo": "prefect", "path": "tests/flow_runners/test_base.py", "file_name": "test_base.py", "fun_name": "test_flow_runner_networks_config_casts_to_list", "commit_message": "Splits flow_runners into files by their execution engine. (PrefectHQ/orion#1948)\n\nOur `subprocess`, Docker, and Kubernetes runners don't share a lot of behavior,\r\nand some of them require complex imports. To make the repo easier to navigate\r\nand make room for additional future FlowRunners, I'm splitting flow_runners and\r\ntheir tests into a subpackage. All current imports should be preserved, and we\r\ncan continue to document them as coming from `prefect.flow_runners`.", "code": "def test_flow_runner_networks_config_casts_to_list(self, runner_type):\n assert type(runner_type(networks={\"a\", \"b\"}).networks) == list\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 42, "n_identifiers": 6, "random_cut": "def test_flow_runner_networks_config_casts_to_list(self, runner_type):\n assert t" }, { "id": 94288, "commit_id": "d2ed8bbdfe259eb0f316227a45b2266f41aa9ea0", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_sentry_functions.py", "file_name": "test_organization_sentry_functions.py", "fun_name": "test_get", "commit_message": "Sentry Functions: Endpoint to return list of Sentry Functions (#37626)\n\n* feat(integrations): new endpoint for fetching sentry functions for an organization\r\n\r\n* ref(integrations): add feature flag to gate endpoint\r\n\r\n* ref(integrations): remove extraneous comment", "code": "def test_get(self):\n with Feature(\"organizations:sentry-functions\"):\n response = self.client.get(self.url)\n assert response.status_code == 200\n assert response.data == []\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 36, "n_ast_nodes": 62, "n_identifiers": 9, "random_cut": "def test_get(self):\n with Feature(\"organizations:sentry-functions" }, { "id": 83620, "commit_id": "975066e3f0e3d4c3a356da3dfc1d4472f72717b7", "repo": "zulip", "path": "zerver/tests/test_link_embed.py", "file_name": "test_link_embed.py", "fun_name": "test_youtube_url_title_replaces_url", "commit_message": "actions: Split out zerver.actions.message_send.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_youtube_url_title_replaces_url(self) -> None:\n url = \"https://www.youtube.com/watch?v=eSJTXC7Ixgg\"\n with mock_queue_publish(\"zerver.actions.message_send.queue_json_publish\"):\n msg_id = self.send_personal_message(\n self.example_user(\"hamlet\"),\n self.example_user(\"cordelia\"),\n content=url,\n )\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n event = {\n \"message_id\": msg_id,\n \"urls\": [url],\n \"message_realm_id\": msg.sender.realm_id,\n \"message_content\": url,\n }\n\n mocked_data = {\"title\": \"Clearer Code at Scale - Static Types at Zulip and Dropbox\"}\n self.create_mock_response(url)\n with self.settings(TEST_SUITE=False, CACHES=TEST_CACHES):\n with self.assertLogs(level=\"INFO\") as info_logs:\n with mock.patch(\n \"zerver.lib.markdown.link_preview.link_embed_data_from_cache\",\n lambda *args, **kwargs: mocked_data,\n ):\n FetchLinksEmbedData().consume(event)\n self.assertTrue(\n \"INFO:root:Time spent on get_link_embed_data for https://www.youtube.com/watch?v=eSJTXC7Ixgg:\"\n in info_logs.output[0]\n )\n\n msg.refresh_from_db()\n expected_content = f\n self.assertEqual(expected_content, msg.rendered_content)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 415, "n_words": 78, "vocab_size": 68, "complexity": 1, "nloc": 31, "token_counts": 181, "n_ast_nodes": 329, "n_identifiers": 39, "random_cut": "def test_youtube_url_title_replaces_url(self) -> None:\n url = \"https://www.youtube.com/watch?v=eSJTXC7Ixgg\"\n with mock_queue_publish(\"zerver.actions.message_send.queue_json_publish\"):\n msg_id = self.send_personal_message(\n self.example_user(\"hamlet\"),\n self.example_user(\"cordelia\"),\n content=url,\n )\n msg = Message.objects.select_related(\"sender\").get(id=msg_id)\n event = {\n \"message_id\": msg_id,\n \"urls\": [url],\n \"message_realm_id\": msg.sender.realm_id,\n \"message_content\": ur" }, { "id": 3980, "commit_id": "bbd13802d81263d5677a4e8599d0b8708889719d", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-salesforce/integration_tests/integration_test.py", "file_name": "integration_test.py", "fun_name": "_encode_content", "commit_message": "🐛 Fix Python checker configs and Connector Base workflow (#10505)", "code": "def _encode_content(text):\n base64_bytes = base64.b64encode(text.encode(\"utf-8\"))\n return base64_bytes.decode(\"utf-8\")\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 45, "n_identifiers": 7, "random_cut": "def _encode_content(text):\n base64_bytes = base64.b64encode(text.encode(\"utf-8\"))\n return base64_bytes.decode(\"utf-8\"" }, { "id": 48141, "commit_id": "3ed07474649b1e202f9b106105fef21f7b2cfddc", "repo": "airflow", "path": "dev/breeze/tests/test_commands.py", "file_name": "test_commands.py", "fun_name": "test_get_extra_docker_flags_all", "commit_message": "Seperate provider verification as standalone breeze command (#23454)\n\nThis is another step in simplifying and converting to Python all of\r\nthe CI/local development tooling.\r\n\r\nThis PR separates out verification of providers as a separate\r\nbreeze command `verify-provider-packages`. It was previously part of\r\n\"prepare_provider_packages.py\" but it has been now\r\nextracted to a separate in-container python file and it was\r\nwrapped with breeze's `verify-provider-packages` command.\r\n\r\nNo longer provider verification is run with \"preparing provider docs\"\r\nnor \"preparing provider packages\" - it's a standaline command.\r\n\r\nThis command is also used in CI now to run the tests:\r\n\r\n* all provider packages are built and created on CI together with\r\n  airflow version\r\n* the packages are installed inside the CI image and providers are\r\n verified\r\n* the 2.1 version of Airflow is installed together with all 2.1\r\n - compatible providers and provider verification is run there too.\r\n\r\nThis all is much simpler now - we got rediof some 500 lines of bash\r\ncode again in favour of breeze python code.\r\n\r\nFixes: #23430", "code": "def test_get_extra_docker_flags_all():\n flags = get_extra_docker_flags(MOUNT_ALL)\n assert \"empty\" not in \"\".join(flags)\n assert len(flags) < 10\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 27, "n_ast_nodes": 48, "n_identifiers": 6, "random_cut": "def test_get_extra_docker_flags_all():\n flags = get_extra_docker_flags(MOUNT_ALL)\n assert \"empty\" not in \"\".join(flags)\n assert len" }, { "id": 90269, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_group_index.py", "file_name": "test_organization_group_index.py", "fun_name": "test_basic_ignore", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_basic_ignore(self):\n group = self.create_group(status=GroupStatus.RESOLVED)\n\n snooze = GroupSnooze.objects.create(group=group, until=timezone.now())\n\n self.login_as(user=self.user)\n assert not GroupHistory.objects.filter(\n group=group, status=GroupHistoryStatus.IGNORED\n ).exists()\n response = self.get_success_response(qs_params={\"id\": group.id}, status=\"ignored\")\n # existing snooze objects should be cleaned up\n assert not GroupSnooze.objects.filter(id=snooze.id).exists()\n\n group = Group.objects.get(id=group.id)\n assert group.status == GroupStatus.IGNORED\n assert GroupHistory.objects.filter(group=group, status=GroupHistoryStatus.IGNORED).exists()\n\n assert response.data == {\"status\": \"ignored\", \"statusDetails\": {}, \"inbox\": None}\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 145, "n_words": 51, "vocab_size": 40, "complexity": 1, "nloc": 13, "token_counts": 169, "n_ast_nodes": 274, "n_identifiers": 28, "random_cut": "def test_basic_ignore(self):\n group = self.create_group(status=GroupStatus.RESOLVED)\n\n snooze = GroupSnooze.objects.create(group=grou" }, { "id": 57424, "commit_id": "cb53fb90654e3adfef19e58a42be16228d0695ec", "repo": "prefect", "path": "tests/infrastructure/test_process.py", "file_name": "test_process.py", "fun_name": "test_process_runs_command", "commit_message": "Add tests for process", "code": "async def test_process_runs_command(tmp_path):\n # Perform a side-effect to demonstrate the command is run\n assert await Process(command=[\"touch\", str(tmp_path / \"canary\")]).run()\n assert (tmp_path / \"canary\").exists()\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 31, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 66, "n_identifiers": 7, "random_cut": "async def test_process_runs_command(tmp_path):\n # Perform a side-effect to demonstrate the command is run\n assert await Process(command=[\"touch\", str(tmp_path / \"canary\")]).run()\n assert (tmp_path / \"canary\").exists()\n\n" }, { "id": 162581, "commit_id": "812283199a2f05046b9b4d59c22a06051b958bf6", "repo": "yt-dlp", "path": "yt_dlp/extractor/murrtube.py", "file_name": "murrtube.py", "fun_name": "_download_gql", "commit_message": "[murrtube] Add extractor (#2387)\n\nAuthored by: cyberfox1691", "code": "def _download_gql(self, video_id, op, note=None, fatal=True):\n result = self._download_json(\n 'https://murrtube.net/graphql',\n video_id, note, data=json.dumps(op).encode(), fatal=fatal,\n headers={'Content-Type': 'application/json'})\n return result['data']\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 64, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 59, "n_ast_nodes": 91, "n_identifiers": 13, "random_cut": "def _download_gql(self, video_id, op, note=None, fatal=True):\n result = self._download_json(\n 'https://mu" }, { "id": 44995, "commit_id": "0ebd6428e6b484790bfbbe1b8687ef4e6cae10e9", "repo": "airflow", "path": "tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py", "file_name": "test_kubernetes_pod.py", "fun_name": "test_push_xcom_pod_info", "commit_message": "Switch XCom implementation to use run_id (#20975)", "code": "def test_push_xcom_pod_info(self, mock_extract_xcom, dag_maker, do_xcom_push):\n \n mock_extract_xcom.return_value = '{}'\n with dag_maker():\n KubernetesPodOperator(\n namespace=\"default\",\n image=\"ubuntu:16.04\",\n cmds=[\"bash\", \"-cx\"],\n name=\"test\",\n task_id=\"task\",\n in_cluster=False,\n do_xcom_push=do_xcom_push,\n )\n DummyOperator(task_id='task_to_pull_xcom')\n dagrun = dag_maker.create_dagrun()\n tis = {ti.task_id: ti for ti in dagrun.task_instances}\n\n pod = self.run_pod(\n tis[\"task\"].task,\n context=tis[\"task\"].get_template_context(session=dag_maker.session),\n )\n pod_name = tis[\"task_to_pull_xcom\"].xcom_pull(task_ids=\"task\", key='pod_name')\n pod_namespace = tis[\"task_to_pull_xcom\"].xcom_pull(task_ids=\"task\", key='pod_namespace')\n assert pod_name and pod_name == pod.metadata.name\n assert pod_namespace and pod_namespace == pod.metadata.namespace\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 295, "n_words": 58, "vocab_size": 43, "complexity": 4, "nloc": 23, "token_counts": 169, "n_ast_nodes": 284, "n_identifiers": 31, "random_cut": "def test_push_xcom_pod_info(self, mock_extract_xcom, dag_maker, do_xcom_push):\n \n mock_extract_xcom.return_value = '{}'\n with dag_maker():\n KubernetesPodOperator(\n namespace=\"default\",\n image=\"ubuntu:16.04\",\n cmds=[\"bash\", \"-cx\"],\n name=\"test\",\n task_id=\"task\",\n in_cluster=False,\n do_xcom_push=do_xcom_push,\n )\n DummyOperator(task_id='task_to_pull_xcom')\n dagrun = dag_maker.create_dagrun(" }, { "id": 177718, "commit_id": "c0c3426467785ffe9a8e3026fff1ef6e4faddca3", "repo": "label-studio", "path": "label_studio/tasks/serializers.py", "file_name": "serializers.py", "fun_name": "get_updated_by", "commit_message": "feat: DEV-1844: \"Last updated by\" column in Data Manager (#2119)\n\n* feat: DEV-1696: Add lead time task column in Data Manager\r\n\r\n* Fix test for test_get_task\r\n\r\n* Fix lead time annotation\r\n\r\n* Fix tests\r\n\r\n* Add migration for view and fix test\r\n\r\n* Fix test\r\n\r\n* Fix tests data\r\n\r\n* Fix annotation count\r\n\r\n* Fix annotation results in tests\r\n\r\n* Fix lead_time type to float\r\n\r\n* Fix test tasks-all-fields-postgre data\r\n\r\n* Change test data for tasks-all-fields-postgre\r\n\r\n* Change annotations_results to anystr\r\n\r\n* Change predictions_results data for tasks-all-fields-postgre\r\n\r\n* Change data in tasks-all-fields-sqlite test\r\n\r\n* feat: DEV-1844: \"Last updated by\" column in Data Manager\r\n\r\n* More\r\n\r\n* Add more functions\r\n\r\n* Add context with user\r\n\r\n* Remove updated_by from quickview\r\n\r\n* Fix label stream\r\n\r\n* Fix tests\r\n\r\n* Fix tests\r\n\r\n* Update DM\r\n\r\n* Fix pagination class\r\n\r\n* Fix\r\n\r\nCo-authored-by: Konstantin Korotaev <42615530+KonstantinKorotaev@users.noreply.github.com>", "code": "def get_updated_by(self, task):\n return [{'user_id': task.updated_by_id}] if task.updated_by_id else []\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 2, "token_counts": 24, "n_ast_nodes": 38, "n_identifiers": 4, "random_cut": "def get_updated_by(self, task):\n return [{'user_id': task.updated_by_id}] if task.updated_by_id else []\n" }, { "id": 313670, "commit_id": "51b4d15c8cb83bb715222841aa48e83f77ef38ff", "repo": "core", "path": "tests/components/mqtt/test_camera.py", "file_name": "test_camera.py", "fun_name": "camera_platform_only", "commit_message": "Speed up mqtt tests (#73423)\n\nCo-authored-by: jbouwh \r\nCo-authored-by: Jan Bouwhuis ", "code": "def camera_platform_only():\n \n with patch(\"homeassistant.components.mqtt.PLATFORMS\", [Platform.CAMERA]):\n yield\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 19, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 37, "n_identifiers": 4, "random_cut": "def camera_platform_only():\n \n with patch(\"homeassistant.components.mqtt.PLATFORMS\", [Platform.CAMERA]):\n yield\n\n" }, { "id": 166117, "commit_id": "6d165676daef078988c5a292261d7901295e21d9", "repo": "pandas", "path": "pandas/tests/strings/test_find_replace.py", "file_name": "test_find_replace.py", "fun_name": "test_match_na_kwarg", "commit_message": "WARN: PerformanceWarning for non-pyarrow fallback (#46732)", "code": "def test_match_na_kwarg(any_string_dtype):\n # GH #6609\n s = Series([\"a\", \"b\", np.nan], dtype=any_string_dtype)\n\n with maybe_perf_warn(any_string_dtype == \"string[pyarrow]\" and pa_version_under4p0):\n result = s.str.match(\"a\", na=False)\n expected_dtype = np.bool_ if any_string_dtype == \"object\" else \"boolean\"\n expected = Series([True, False, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n with maybe_perf_warn(any_string_dtype == \"string[pyarrow]\" and pa_version_under4p0):\n result = s.str.match(\"a\")\n expected_dtype = \"object\" if any_string_dtype == \"object\" else \"boolean\"\n expected = Series([True, False, np.nan], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 64, "vocab_size": 35, "complexity": 5, "nloc": 12, "token_counts": 137, "n_ast_nodes": 230, "n_identifiers": 18, "random_cut": "def test_match_na_kwarg(any_string_dtype):\n # GH #6609\n s = Series([\"a\", \"b\", np.nan], dtype=any_string_dtype)\n\n with maybe_perf_warn(any_string_dtype == \"string[pyarrow]\" and pa_version_under4p0):\n result = s.str.match(\"a\", na=False)\n expected_dtype = np.bool_ if any_string_dtype == \"object\" else \"boolean\"\n expected = Series([True, False, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n with maybe_perf_warn(any_string_dtype == \"string[pyarrow]\" and pa_version_under4p0):\n result = s.str.match(\"a\")\n expected_dtype = " }, { "id": 81022, "commit_id": "dc64168ed40bdf0d59a715ef82b2a6b46c2ab58e", "repo": "awx", "path": "awx/main/tests/functional/api/test_instance_group.py", "file_name": "test_instance_group.py", "fun_name": "test_cannot_remove_controlplane_hybrid_instances", "commit_message": "Disallows disassociate of hubrid type instances from controlplane instance group\n\nIntroduce new pattern for is_valid_removal\n\nMakes disassociate error message a bit more dynamic", "code": "def test_cannot_remove_controlplane_hybrid_instances(post, controlplane_instance_group, node_type_instance, admin_user):\n instance = node_type_instance(hostname='hybrid_node', node_type='hybrid')\n controlplane_instance_group.instances.add(instance)\n\n url = reverse('api:instance_group_instance_list', kwargs={'pk': controlplane_instance_group.pk})\n r = post(url, {'disassociate': True, 'id': instance.id}, admin_user, expect=400)\n assert 'Cannot disassociate hybrid node' in str(r.data)\n\n url = reverse('api:instance_instance_groups_list', kwargs={'pk': instance.pk})\n r = post(url, {'disassociate': True, 'id': controlplane_instance_group.id}, admin_user, expect=400)\n assert f'Cannot disassociate hybrid instance' in str(r.data)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 75, "n_words": 52, "vocab_size": 34, "complexity": 1, "nloc": 9, "token_counts": 130, "n_ast_nodes": 212, "n_identifiers": 19, "random_cut": "def test_cannot_remove_controlplane_hybrid_instances(post, controlplane_instance_group, node_type_instance, admin_user):\n instance = node_type_instance(hostname='hybrid_node', node_type='hybrid')\n controlplane_instance_group.instances.add(instance)\n\n url = reverse('api:instance_group_instance_list', kwargs={'pk': controlplane_instance_group.pk})\n r = post(url, {'disassociate': True, 'id': instance.id}, admin_user, expect=400)\n assert 'Cannot disassociate hybrid node' in str(r.data)\n\n url = reverse('api:instance_instance_groups_list', kwargs={'pk': instance.pk})\n r = post(url, {'disassociate': True, 'id': controlplane_instance_group.id}, admin_user, expect=400)\n assert f'Cannot disassociate hybrid instance' in str(r.data)\n" }, { "id": 171091, "commit_id": "b7ea7c6dfd100c40b0bc45aacf6d92c5c22f2e63", "repo": "pandas", "path": "pandas/tests/frame/test_reductions.py", "file_name": "test_reductions.py", "fun_name": "test_operators_timedelta64", "commit_message": "DEPR: Enforce deprecation of numeric_only=None in DataFrame aggregations (#49551)\n\n* WIP\r\n\r\n* DEPR: Enforce deprecation of numeric_only=None in DataFrame aggregations\r\n\r\n* Partial reverts\r\n\r\n* numeric_only in generic/series, fixup\r\n\r\n* cleanup\r\n\r\n* Remove docs warning\r\n\r\n* fixups\r\n\r\n* Fixups", "code": "def test_operators_timedelta64(self):\n df = DataFrame(\n {\n \"A\": date_range(\"2012-1-1\", periods=3, freq=\"D\"),\n \"B\": date_range(\"2012-1-2\", periods=3, freq=\"D\"),\n \"C\": Timestamp(\"20120101\") - timedelta(minutes=5, seconds=5),\n }\n )\n\n diffs = DataFrame({\"A\": df[\"A\"] - df[\"C\"], \"B\": df[\"A\"] - df[\"B\"]})\n\n # min\n result = diffs.min()\n assert result[0] == diffs.loc[0, \"A\"]\n assert result[1] == diffs.loc[0, \"B\"]\n\n result = diffs.min(axis=1)\n assert (result == diffs.loc[0, \"B\"]).all()\n\n # max\n result = diffs.max()\n assert result[0] == diffs.loc[2, \"A\"]\n assert result[1] == diffs.loc[2, \"B\"]\n\n result = diffs.max(axis=1)\n assert (result == diffs[\"A\"]).all()\n\n # abs\n result = diffs.abs()\n result2 = abs(diffs)\n expected = DataFrame({\"A\": df[\"A\"] - df[\"C\"], \"B\": df[\"B\"] - df[\"A\"]})\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # mixed frame\n mixed = diffs.copy()\n mixed[\"C\"] = \"foo\"\n mixed[\"D\"] = 1\n mixed[\"E\"] = 1.0\n mixed[\"F\"] = Timestamp(\"20130101\")\n\n # results in an object array\n result = mixed.min()\n expected = Series(\n [\n pd.Timedelta(timedelta(seconds=5 * 60 + 5)),\n pd.Timedelta(timedelta(days=-1)),\n \"foo\",\n 1,\n 1.0,\n Timestamp(\"20130101\"),\n ],\n index=mixed.columns,\n )\n tm.assert_series_equal(result, expected)\n\n # excludes non-numeric\n result = mixed.min(axis=1, numeric_only=True)\n expected = Series([1, 1, 1.0], index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n # works when only those columns are selected\n result = mixed[[\"A\", \"B\"]].min(1)\n expected = Series([timedelta(days=-1)] * 3)\n tm.assert_series_equal(result, expected)\n\n result = mixed[[\"A\", \"B\"]].min()\n expected = Series(\n [timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=[\"A\", \"B\"]\n )\n tm.assert_series_equal(result, expected)\n\n # GH 3106\n df = DataFrame(\n {\n \"time\": date_range(\"20130102\", periods=5),\n \"time2\": date_range(\"20130105\", periods=5),\n }\n )\n df[\"off1\"] = df[\"time2\"] - df[\"time\"]\n assert df[\"off1\"].dtype == \"timedelta64[ns]\"\n\n df[\"off2\"] = df[\"time\"] - df[\"time2\"]\n df._consolidate_inplace()\n assert df[\"off1\"].dtype == \"timedelta64[ns]\"\n assert df[\"off2\"].dtype == \"timedelta64[ns]\"\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 859, "n_words": 236, "vocab_size": 124, "complexity": 1, "nloc": 65, "token_counts": 605, "n_ast_nodes": 1005, "n_identifiers": 35, "random_cut": "def test_operators_timedelta64(self):\n df = DataFrame(\n {\n \"A\": date_range(\"2012-1-1\", periods=3, freq=\"D\"),\n \"B\": date_range(\"2012-1-2\", periods=3, freq=\"D\"),\n \"C\": Timestamp(\"20120101\") - timedelta(minutes=5, seconds=5),\n }\n )\n\n diffs = DataFrame({\"A\": df[\"A\"] - df[\"C\"], \"B\": df[\"A\"] - df[\"B\"]})\n\n # min\n result = diffs.min()\n assert result[0] == diffs.loc[0, \"A\"]\n assert result[1] == diffs.loc[0, \"B\"]\n\n result = diffs.min(axis=1)\n assert (result == diffs.loc[0, \"B\"]).all()\n\n # max\n result = diffs.max()\n assert result[0] == diffs.loc[2, \"A\"]\n assert result[1] == diffs.loc[2, \"B\"]\n\n result = diffs.max(axis=1)\n assert (result == diffs[\"A\"]).all()\n\n # abs\n result = diffs.abs()\n result2 = abs(diffs)\n expected = DataFrame({\"A\": df[\"A\"] - df[\"C\"], \"B\": df[\"B\"] - df[\"A\"]})\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # mixed frame\n mixed = diffs.copy()\n mixed[\"C\"] = \"foo\"\n mixed[\"D\"] = 1\n mixed[\"E\"] = 1.0\n mixed[\"F\"] = Timestamp(\"20130101\")\n\n # results in an object array\n result = mixed.min()\n expected = Series(\n [\n pd.Timedelta(timedelta(seconds=5 * 60 + 5)),\n pd.Timedelta(timedelta(days=-1)),\n \"foo\",\n 1,\n 1.0,\n Timestamp(\"20130101\"),\n ],\n index=mixed.columns,\n )\n tm.assert_series_equal(result, expected)\n\n # excludes non-numeric\n result = mixed.min(axis=1, numeric_only=True)\n expected = Series([1, 1, 1.0], index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n # works when only those columns are selected\n result = mixed[[\"A\", \"B\"]].min(1)\n expected = Series([timedelta(days=-1)] * 3)\n tm.assert_series_equal(result, expected)\n\n result = mixed[[\"A\", \"B\"]].min()\n expected = Series(\n [timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=[\"A\", \"B\"]\n )\n tm.assert_series_equal(result, expected)\n\n # GH 3106\n df = DataFrame(\n {\n \"time\": date_range(\"20130102\", periods=5),\n \"time2\": date_range(\"20130105\", periods=5),\n }\n )\n df[\"of" }, { "id": 87656, "commit_id": "3dea4b7342328fc3ce74685b481f983c7ee6599a", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_events_spans_histogram.py", "file_name": "test_organization_events_spans_histogram.py", "fun_name": "test_bad_params_outside_range_num_buckets", "commit_message": "fix(perf): Remove suspect spans flag (#40963)\n\nRe-pushing up https://github.com/getsentry/sentry/pull/38799 now that\r\ntest should be (mostly) fixed. That one closed just before I force\r\npushed so it's not re-openable 🤷", "code": "def test_bad_params_outside_range_num_buckets(self):\n query = {\n \"project\": [self.project.id],\n \"span\": self.format_span(\"django.middleware\", \"2b9cbb96dbf59baa\"),\n \"numBuckets\": -1,\n }\n\n response = self.do_request(query)\n\n assert response.status_code == 400, \"failing for numBuckets\"\n assert response.data == {\n \"numBuckets\": [\"Ensure this value is greater than or equal to 1.\"]\n }, \"failing for numBuckets\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 127, "n_words": 42, "vocab_size": 34, "complexity": 1, "nloc": 11, "token_counts": 65, "n_ast_nodes": 114, "n_identifiers": 10, "random_cut": "def test_bad_params_outside_range_num_buckets(self):\n query = {\n \"project\": [self.project.id],\n \"span\": self.format_span(\"django.middleware\", \"2b9cbb96dbf59baa\"),\n \"numBuckets\": -1,\n }\n\n response = self.do_request(query)\n\n assert response.status_code == 400, \"failing for numBuckets\"\n assert response.da" }, { "id": 283343, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/common/behavioural_analysis/test_finbrain_view.py", "file_name": "test_finbrain_view.py", "fun_name": "test_display_sentiment_analysis_empty_df", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def test_display_sentiment_analysis_empty_df(mocker):\n view = \"openbb_terminal.common.behavioural_analysis.finbrain_view\"\n\n # MOCK EXPORT_DATA\n mocker.patch(\n target=\"openbb_terminal.common.behavioural_analysis.finbrain_view.export_data\"\n )\n\n # MOCK GTFF\n mocker.patch.object(target=helper_funcs.obbff, attribute=\"USE_ION\", new=True)\n\n # MOCK GET_SENTIMENT\n mocker.patch(\n target=f\"{view}.finbrain_model.get_sentiment\",\n return_value=pd.DataFrame(),\n )\n\n finbrain_view.display_sentiment_analysis(\n ticker=\"AAPL\",\n export=\"\",\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 95, "n_words": 28, "vocab_size": 21, "complexity": 1, "nloc": 14, "token_counts": 67, "n_ast_nodes": 120, "n_identifiers": 17, "random_cut": "def test_display_sentiment_analysis_empty_df(mocker):\n view = \"openbb_terminal.common.behavioural_analysis.finbrain_view\"\n\n # MOCK EXPORT_DATA\n mocker.patch(\n target=\"openbb_terminal.common.behavioural_analysis.finbrain_view.export_data\"\n )\n\n # MOCK GTFF\n mocker.patch.object(target=helper_funcs.obbff, attribute=\"USE_ION\", new=True)\n\n # " }, { "id": 320002, "commit_id": "7aa0e5650b290cbc39e37418508863043f0de008", "repo": "paperless-ngx", "path": "src/documents/tests/test_barcodes.py", "file_name": "test_barcodes.py", "fun_name": "test_get_mime_type", "commit_message": "Updates how barcodes are detected, using pikepdf images, instead of converting each page to an image", "code": "def test_get_mime_type(self):\n tiff_file = os.path.join(\n self.SAMPLE_DIR,\n \"simple.tiff\",\n )\n pdf_file = os.path.join(\n self.SAMPLE_DIR,\n \"simple.pdf\",\n )\n png_file = os.path.join(\n self.BARCODE_SAMPLE_DIR,\n \"barcode-128-custom.png\",\n )\n tiff_file_no_extension = os.path.join(settings.SCRATCH_DIR, \"testfile1\")\n pdf_file_no_extension = os.path.join(settings.SCRATCH_DIR, \"testfile2\")\n shutil.copy(tiff_file, tiff_file_no_extension)\n shutil.copy(pdf_file, pdf_file_no_extension)\n\n self.assertEqual(barcodes.get_file_mime_type(tiff_file), \"image/tiff\")\n self.assertEqual(barcodes.get_file_mime_type(pdf_file), \"application/pdf\")\n self.assertEqual(\n barcodes.get_file_mime_type(tiff_file_no_extension),\n \"image/tiff\",\n )\n self.assertEqual(\n barcodes.get_file_mime_type(pdf_file_no_extension),\n \"application/pdf\",\n )\n self.assertEqual(barcodes.get_file_mime_type(png_file), \"image/png\")\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 274, "n_words": 46, "vocab_size": 33, "complexity": 1, "nloc": 28, "token_counts": 161, "n_ast_nodes": 263, "n_identifiers": 19, "random_cut": "def test_get_mime_type(self):\n tiff_file = os.path.join(\n self.SAMPLE_DIR,\n \"simple.tiff\",\n )\n pdf_file = os.path.join(\n " }, { "id": 41229, "commit_id": "9917c46c544fa1f1a4b76cf174206a0f35305916", "repo": "seaborn", "path": "seaborn/tests/_core/test_moves.py", "file_name": "test_moves.py", "fun_name": "test_two_semantics", "commit_message": "Reorganize how Stat transform works, following Move patterns", "code": "def test_two_semantics(self, df):\n\n groupby = GroupBy([\"x\", \"grp2\", \"grp3\"])\n res = Dodge()(df, groupby, \"x\")\n\n levels = categorical_order(df[\"grp2\"]), categorical_order(df[\"grp3\"])\n w, n = 0.8, len(levels[0]) * len(levels[1])\n\n shifts = np.linspace(0, w - w / n, n)\n shifts -= shifts.mean()\n\n assert_series_equal(res[\"y\"], df[\"y\"])\n assert_series_equal(res[\"width\"], df[\"width\"] / n)\n\n for (v2, v3), shift in zip(product(*levels), shifts):\n rows = (df[\"grp2\"] == v2) & (df[\"grp3\"] == v3)\n assert_series_equal(res.loc[rows, \"x\"], df.loc[rows, \"x\"] + shift)\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 148, "n_words": 64, "vocab_size": 54, "complexity": 2, "nloc": 12, "token_counts": 181, "n_ast_nodes": 291, "n_identifiers": 24, "random_cut": "def test_two_semantics(self, df):\n\n groupby = GroupBy([\"x\", \"grp2\", \"grp3\"])\n res = Dodge()(df, groupby, \"x\")\n\n levels = categorical_order(df[\"grp2\"])" }, { "id": 113360, "commit_id": "5f571327902c84c208482f66c2b293ad1013ee3d", "repo": "nni", "path": "examples/model_compress/pruning/taylorfo_lightning_evaluator.py", "file_name": "taylorfo_lightning_evaluator.py", "fun_name": "evaluate", "commit_message": "[Compression] Evaluator - step 3 Tutorial (#5016)", "code": "def evaluate(self, batch, stage=None):\n x, y = batch\n logits = self(x)\n loss = self.criterion(logits, y)\n preds = torch.argmax(logits, dim=1)\n acc = accuracy(preds, y)\n\n if stage:\n self.log(f\"default\", loss, prog_bar=False)\n self.log(f\"{stage}_loss\", loss, prog_bar=True)\n self.log(f\"{stage}_acc\", acc, prog_bar=True)\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 108, "n_words": 34, "vocab_size": 27, "complexity": 2, "nloc": 10, "token_counts": 94, "n_ast_nodes": 149, "n_identifiers": 17, "random_cut": "def evaluate(self, batch, stage=None):\n x, y = batch\n " }, { "id": 81246, "commit_id": "c836fafb61066d54af6f9726b00a83e6ae8451af", "repo": "awx", "path": "awx/main/models/schedules.py", "file_name": "schedules.py", "fun_name": "get_zoneinfo_with_links", "commit_message": "modifying schedules API to return a list of links", "code": "def get_zoneinfo_with_links(self):\n zone_instance = get_zonefile_instance()\n return_val = {'zones': sorted(zone_instance.zones), 'links': {}}\n for zone_name in return_val['zones']:\n if str(zone_name) != str(zone_instance.zones[zone_name]._filename):\n return_val['links'][zone_name] = zone_instance.zones[zone_name]._filename\n return return_val\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 77, "n_words": 24, "vocab_size": 21, "complexity": 3, "nloc": 7, "token_counts": 71, "n_ast_nodes": 117, "n_identifiers": 10, "random_cut": "def get_zoneinfo_with_links(self):\n zone_instance = get_zonefile_instance()\n return_val = {'zones': sorted(zone_instance.zones), 'links': {}}\n for zone_name in return_val['zones']:\n if str(zone_name) != str(zone_instance.zones[zone_name]._filename):\n return_val['links'][zone_name] = zone_instance.zones[zo" }, { "id": 244221, "commit_id": "24f2fdb38481e6c013a588660c044e410148ce1e", "repo": "mmdetection", "path": "mmdet/utils/util_distribution.py", "file_name": "util_distribution.py", "fun_name": "is_mlu_available", "commit_message": "fix lint (#7793)", "code": "def is_mlu_available():\n \n return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 2, "token_counts": 18, "n_ast_nodes": 34, "n_identifiers": 3, "random_cut": "def is_mlu_available():\n \n return hasattr(torch, 'is_mlu_available') and torch.is_mlu_a" }, { "id": 241510, "commit_id": "4b5761539e45bd0392aa49378cbaaca574006f03", "repo": "lightning", "path": "pytorch_lightning/trainer/connectors/checkpoint_connector.py", "file_name": "checkpoint_connector.py", "fun_name": "_hpc_resume_path", "commit_message": "Remove `hpc_save` (#11101)", "code": "def _hpc_resume_path(self) -> Optional[str]:\n if not os.path.isdir(self.trainer.weights_save_path):\n return None\n dir_path_hpc = str(self.trainer.weights_save_path)\n max_version = self.__max_ckpt_version_in_folder(dir_path_hpc, \"hpc_ckpt_\")\n if max_version is not None:\n return os.path.join(dir_path_hpc, f\"hpc_ckpt_{max_version}.ckpt\")\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 73, "n_words": 24, "vocab_size": 19, "complexity": 3, "nloc": 7, "token_counts": 65, "n_ast_nodes": 107, "n_identifiers": 13, "random_cut": "def _hpc_resume_path(self) -> Optional[str]:\n if not os.path.isdir(self.trainer.weights_save_path):\n return None\n dir_path_hpc = str(self.trainer.weights_save_path)\n " }, { "id": 116938, "commit_id": "82ba332ccf612ef32880a25167aba5fd69408889", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/informix_handler/tests/test_informix_handler.py", "file_name": "test_informix_handler.py", "fun_name": "test_4_get_tables", "commit_message": "cleaned up whitespace and indentation in test_informix_handler", "code": "def test_4_get_tables(self):\n tables = self.handler.get_tables()\n assert tables.type is RESPONSE_TYPE.TABLE\n ", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 36, "n_identifiers": 8, "random_cut": "def test_4_get_tables(self):\n tables = self" }, { "id": 257007, "commit_id": "d43801143221e71e868c0ac80795bb0a306778e7", "repo": "haystack", "path": "haystack/utils/doc_store.py", "file_name": "doc_store.py", "fun_name": "launch_opensearch", "commit_message": "fix launch scripts (#2341)", "code": "def launch_opensearch(sleep=15, delete_existing=False):\n # Start an OpenSearch server via docker\n\n logger.debug(\"Starting OpenSearch...\")\n # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now\n # docker rm only succeeds if the container is stopped, not if it is running\n if delete_existing:\n _ = subprocess.run([f\"docker rm --force {OPENSEARCH_CONTAINER_NAME}\"], shell=True, stdout=subprocess.DEVNULL)\n status = subprocess.run(\n [\n f'docker start {OPENSEARCH_CONTAINER_NAME} > /dev/null 2>&1 || docker run -d -p 9201:9200 -p 9600:9600 -e \"discovery.type=single-node\" --name {OPENSEARCH_CONTAINER_NAME} opensearchproject/opensearch:1.2.4'\n ],\n shell=True,\n )\n if status.returncode:\n logger.warning(\n \"Tried to start OpenSearch through Docker but this failed. \"\n \"It is likely that there is already an existing OpenSearch instance running. \"\n )\n else:\n time.sleep(sleep)\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 230, "n_words": 122, "vocab_size": 86, "complexity": 3, "nloc": 17, "token_counts": 77, "n_ast_nodes": 142, "n_identifiers": 16, "random_cut": "def launch_opensearch(sleep=15, delete_existing=False):\n # Start an OpenSearch server via docker\n\n logger.debug(\"Starting OpenSearch...\")\n # This line is needed since it is not possible to start a new docker container with the name opensearch if there is a stopped image with the same now\n # docker rm only succeeds if the container is stopped, not if it is running\n if delete_existing:\n _ = sub" }, { "id": 75504, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/backends/database/postgres/postgres.py", "file_name": "postgres.py", "fun_name": "prepare_field", "commit_message": "Reformat with black", "code": "def prepare_field(self, obj, field):\n if isinstance(field, SearchField):\n yield (\n field,\n get_weight(field.boost),\n self.prepare_value(field.get_value(obj)),\n )\n\n elif isinstance(field, AutocompleteField):\n # AutocompleteField does not define a boost parameter, so use a base weight of 'D'\n yield (field, \"D\", self.prepare_value(field.get_value(obj)))\n\n elif isinstance(field, RelatedFields):\n sub_obj = field.get_value(obj)\n if sub_obj is None:\n return\n\n if isinstance(sub_obj, Manager):\n sub_objs = sub_obj.all()\n\n else:\n if callable(sub_obj):\n sub_obj = sub_obj()\n\n sub_objs = [sub_obj]\n\n for sub_obj in sub_objs:\n for sub_field in field.fields:\n yield from self.prepare_field(sub_obj, sub_field)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 350, "n_words": 73, "vocab_size": 55, "complexity": 9, "nloc": 22, "token_counts": 144, "n_ast_nodes": 225, "n_identifiers": 19, "random_cut": "def prepare_field(self, obj, field):\n if isinstance(field, SearchField):\n yield (\n field,\n get_weight(field.boost),\n self.prepare_value(field.get_value(obj)),\n )\n\n elif isinstance(field, AutocompleteField):\n # AutocompleteField does not define a boost parameter, so use a base weight of 'D'\n yield (field, \"D\", self.prepare_value(field.get_value(obj)))\n\n elif isinstance(field, RelatedFields):\n sub_obj = field.get_value(obj)\n if sub_obj is None:\n return\n\n if isinstance(sub_obj, Manager):\n sub_objs = sub_obj.all()\n\n else:\n if callable(sub_obj):\n sub_obj = sub_obj()\n\n sub_objs = [sub_obj]\n\n for sub_obj in sub_objs:\n for su" }, { "id": 47368, "commit_id": "0367a92881e88df36dabb81ef837e5256f3db89d", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_emit_pool_starving_tasks_metrics", "commit_message": "Fix regression in pool metrics (#22939)\n\nCo-authored-by: Tanel Kiis \r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def test_emit_pool_starving_tasks_metrics(self, mock_stats_gauge, dag_maker):\n self.scheduler_job = SchedulerJob(subdir=os.devnull)\n session = settings.Session()\n\n dag_id = 'SchedulerJobTest.test_emit_pool_starving_tasks_metrics'\n with dag_maker(dag_id=dag_id):\n op = DummyOperator(task_id='op', pool_slots=2)\n\n dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)\n\n ti = dr.get_task_instance(op.task_id, session)\n ti.state = State.SCHEDULED\n\n set_default_pool_slots(1)\n session.flush()\n\n res = self.scheduler_job._executable_task_instances_to_queued(max_tis=32, session=session)\n assert 0 == len(res)\n\n mock_stats_gauge.assert_has_calls(\n [\n mock.call('scheduler.tasks.starving', 1),\n mock.call(f'pool.starving_tasks.{Pool.DEFAULT_POOL_NAME}', 1),\n ],\n any_order=True,\n )\n mock_stats_gauge.reset_mock()\n\n set_default_pool_slots(2)\n session.flush()\n\n res = self.scheduler_job._executable_task_instances_to_queued(max_tis=32, session=session)\n assert 1 == len(res)\n\n mock_stats_gauge.assert_has_calls(\n [\n mock.call('scheduler.tasks.starving', 0),\n mock.call(f'pool.starving_tasks.{Pool.DEFAULT_POOL_NAME}', 0),\n ],\n any_order=True,\n )\n\n session.rollback()\n session.close()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 360, "n_words": 70, "vocab_size": 46, "complexity": 1, "nloc": 34, "token_counts": 223, "n_ast_nodes": 369, "n_identifiers": 41, "random_cut": "def test_emit_pool_starving_tasks_metrics(self, mock_stats_gauge, dag_maker):\n self.scheduler_job = SchedulerJob(subdir=os.devnull)\n session = settings.Session()\n\n dag_id = 'SchedulerJobTest.test_emit_pool_starving_tasks_metrics'\n with dag_maker(dag_id=dag_id):\n op = DummyOperator(task_id='op', pool_slots=2)\n\n dr = dag_maker.create_dagrun(run_type=DagRunType.SCHEDULED)\n\n ti = dr.get_task_instance(op.task_id, session)\n ti.state = State.SCHEDULED\n\n set_default_pool_slots(1)\n session.flush()\n\n res = self.scheduler_job._executable_task_instances_to_queued(max_tis=32, session=session)\n assert 0 == len(res)\n\n mock_stats_gauge.assert_has_calls(\n [\n mock.call('scheduler.tasks.starving', 1),\n mock.call(f'pool.starving_tasks.{Pool.DEFAULT_POOL_NAME}', 1),\n ],\n any_order=True,\n )\n mock_stats_gauge.reset_mock()\n\n set_default_pool_slots(2)\n session.flush()\n\n res = self.scheduler_job._executable_task_instances_to_queued(max_tis=32, session=session)\n assert 1 == len(res)\n\n mock_stats_gauge.assert_has_calls(\n [\n mock.call('scheduler.tasks.starving', 0),\n mock.call(f'pool.starving_tasks.{Pool.DEFAULT_POO" }, { "id": 123134, "commit_id": "803b90729d25fda253011c505d0189e8e63cc039", "repo": "EasyOCR", "path": "easyocr/DBNet/assets/ops/dcn/modules/deform_conv.py", "file_name": "deform_conv.py", "fun_name": "forward", "commit_message": "add dbnet", "code": "def forward(self, x, offset, mask):\n return modulated_deform_conv(x, offset, mask, self.weight, self.bias,\n self.stride, self.padding, self.dilation,\n self.groups, self.deformable_groups)\n\n", "url": "https://github.com/JaidedAI/EasyOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 94, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 48, "n_ast_nodes": 62, "n_identifiers": 13, "random_cut": "def forward(self, x, offset, mask):\n return modulated_deform_conv(x, offset, mask, self.weight, self.bias,\n self.stride, self.padding, self.dilation,\n " }, { "id": 317001, "commit_id": "c2fefe03b2dc800f42de695f0b73a8f26621d882", "repo": "core", "path": "tests/components/bluetooth/test_init.py", "file_name": "test_init.py", "fun_name": "test_register_callback_by_address", "commit_message": "Add support for subscribing to bluetooth callbacks by address (#74773)", "code": "async def test_register_callback_by_address(hass, mock_bleak_scanner_start):\n \n mock_bt = []\n callbacks = []\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 10, "vocab_size": 8, "complexity": 2, "nloc": 55, "token_counts": 318, "n_ast_nodes": 30, "n_identifiers": 5, "random_cut": "async def test_register_callback_by_address(hass, mock_bleak_scanner_start):\n \n " }, { "id": 179196, "commit_id": "b065879054492fbfdfce9d767f13e02019e7764b", "repo": "gradio", "path": "website/homepage/render_html.py", "file_name": "render_html.py", "fun_name": "render_docs", "commit_message": "added emojis to navbar; added guides main page", "code": "def render_docs():\n if os.path.exists(\"generated/colab_links.json\"):\n with open(\"generated/colab_links.json\") as demo_links_file:\n try:\n demo_links = json.load(demo_links_file)\n except ValueError:\n demo_links = {}\n else: # docs will be missing demo links\n demo_links = {}\n SCREENSHOT_FOLDER = \"dist/assets/demo_screenshots\"\n os.makedirs(SCREENSHOT_FOLDER, exist_ok=True)\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 111, "n_words": 33, "vocab_size": 27, "complexity": 5, "nloc": 46, "token_counts": 300, "n_ast_nodes": 107, "n_identifiers": 13, "random_cut": "def render_docs():\n if os.path.exists(\"ge" }, { "id": 115670, "commit_id": "414b259284343c26fba31b29121c6462b3666fb9", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/mysql_handler/tests/test_mysql_handler.py", "file_name": "test_mysql_handler.py", "fun_name": "test_1_native_query_show_dbs", "commit_message": "Test added", "code": "def test_1_native_query_show_dbs(self):\n dbs = self.handler.native_query(\"SHOW DATABASES;\")\n assert dbs[' '] is not RESPONSE_TYPE.ERROR\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 7, "random_cut": "def test_1_native_query_show_dbs(self):\n dbs = self.handler.native_query(\"SHOW DATABASE" }, { "id": 178702, "commit_id": "7f9a8a2b207dfdf46e1264d6d9b61466b80875d0", "repo": "Nuitka", "path": "nuitka/build/SconsCompilerSettings.py", "file_name": "SconsCompilerSettings.py", "fun_name": "setupCCompiler", "commit_message": "macOS: Minor cleanups", "code": "def setupCCompiler(env, lto_mode, pgo_mode, job_count):\n # This is driven by many branches on purpose and has a lot of things\n # to deal with for LTO checks and flags, pylint: disable=too-many-branches,too-many-statements\n\n # Enable LTO for compiler.\n _enableLtoSettings(\n env=env,\n lto_mode=lto_mode,\n pgo_mode=pgo_mode,\n job_count=job_count,\n )\n\n _detectWindowsSDK(env)\n _enableC11Settings(env)\n\n if env.gcc_mode:\n # Support for gcc and clang, restricting visibility as much as possible.\n env.Append(CCFLAGS=[\"-fvisibility=hidden\"])\n\n if not env.c11_mode:\n env.Append(CXXFLAGS=[\"-fvisibility-inlines-hidden\"])\n\n if isWin32Windows():\n # On Windows, exporting to DLL need to be controlled.\n env.Append(LINKFLAGS=[\"-Wl,--exclude-all-symbols\"])\n\n # Make sure we handle import library on our own and put it into the\n # build directory.\n env.Append(\n LINKFLAGS=[\n \"-Wl,--out-implib,%s\" % os.path.join(env.source_dir, \"import.lib\")\n ]\n )\n\n # Make it clear how to handle integer overflows, namely by wrapping around\n # to negative values.\n env.Append(CCFLAGS=[\"-fwrapv\"])\n\n if not env.low_memory:\n # Avoid IO for compilation as much as possible, this should make the\n # compilation more memory hungry, but also faster.\n env.Append(CCFLAGS=\"-pipe\")\n\n # Support for clang.\n if \"clang\" in env.the_cc_name:\n env.Append(CCFLAGS=[\"-w\"])\n env.Append(CPPDEFINES=[\"_XOPEN_SOURCE\"])\n\n # Don't export anything by default, this should create smaller executables.\n env.Append(CCFLAGS=[\"-fvisibility=hidden\", \"-fvisibility-inlines-hidden\"])\n\n if env.debug_mode:\n env.Append(CCFLAGS=[\"-Wunused-but-set-variable\"])\n\n # Support for macOS standalone backporting.\n if isMacOS():\n setEnvironmentVariable(env, \"MACOSX_DEPLOYMENT_TARGET\", env.macos_min_version)\n\n target_flag = \"--target=%s-apple-macos%s\" % (\n env.macos_target_arch,\n env.macos_min_version,\n )\n\n env.Append(CCFLAGS=[target_flag])\n env.Append(LINKFLAGS=[target_flag])\n\n # The 32 bits MinGW does not default for API level properly, so help it.\n if env.mingw_mode:\n # Windows XP\n env.Append(CPPDEFINES=[\"_WIN32_WINNT=0x0501\"])\n\n # Unicode entry points for programs.\n if env.mingw_mode:\n env.Append(LINKFLAGS=[\"-municode\"])\n\n # Detect the gcc version\n if env.gcc_version is None and env.gcc_mode and not env.clang_mode:\n env.gcc_version = myDetectVersion(env, env.the_compiler)\n\n # Older g++ complains about aliasing with Py_True and Py_False, but we don't\n # care.\n if env.gcc_mode and not env.clang_mode and env.gcc_version < (4, 5):\n env.Append(CCFLAGS=[\"-fno-strict-aliasing\"])\n\n # For gcc 4.6 or higher, there are some new interesting functions.\n if env.gcc_mode and not env.clang_mode and env.gcc_version >= (4, 6):\n env.Append(CCFLAGS=[\"-fpartial-inlining\"])\n\n if env.debug_mode:\n env.Append(CCFLAGS=[\"-Wunused-but-set-variable\"])\n\n # Save some memory for gcc by not tracing macro code locations at all.\n if (\n not env.debug_mode\n and env.gcc_mode\n and not env.clang_mode\n and env.gcc_version >= (5,)\n ):\n env.Append(CCFLAGS=[\"-ftrack-macro-expansion=0\"])\n\n # We don't care about deprecations.\n if env.gcc_mode and not env.clang_mode:\n env.Append(CCFLAGS=[\"-Wno-deprecated-declarations\"])\n\n # The var-tracking does not scale, disable it. Should we really need it, we\n # can enable it. TODO: Does this cause a performance loss?\n if env.gcc_mode and not env.clang_mode:\n env.Append(CCFLAGS=[\"-fno-var-tracking\"])\n\n # For large files, these can issue warnings about disabling\n # itself, while we do not need it really.\n if env.gcc_mode and not env.clang_mode and env.gcc_version >= (6,):\n env.Append(CCFLAGS=[\"-Wno-misleading-indentation\"])\n\n # Disable output of notes, e.g. on struct alignment layout changes for\n # some arches, we don't care.\n if env.gcc_mode and not env.clang_mode:\n env.Append(CCFLAGS=[\"-fcompare-debug-second\"])\n\n # Prevent using LTO when told not to use it, causes errors with some\n # static link libraries.\n if (\n env.gcc_mode\n and not env.clang_mode\n and env.static_libpython\n and not env.lto_mode\n ):\n env.Append(CCFLAGS=[\"-fno-lto\"])\n env.Append(LINKFLAGS=[\"-fno-lto\"])\n\n # Set optimization level for gcc and clang in LTO mode\n if env.gcc_mode and env.lto_mode:\n if env.debug_mode:\n env.Append(LINKFLAGS=[\"-Og\"])\n else:\n # For LTO with static libpython combined, there are crashes with Python core\n # being inlined, so we must refrain from that. On Windows there is no such\n # thing, and Nuitka-Python is not affected.\n env.Append(\n LINKFLAGS=[\n \"-O3\"\n if env.nuitka_python or os.name == \"nt\" or not env.static_libpython\n else \"-O2\"\n ]\n )\n\n # When debugging, optimize less than when optimizing, when not remove\n # assertions.\n if env.debug_mode:\n if env.clang_mode or (env.gcc_mode and env.gcc_version >= (4, 8)):\n env.Append(CCFLAGS=[\"-Og\"])\n elif env.gcc_mode:\n env.Append(CCFLAGS=[\"-O1\"])\n elif env.msvc_mode:\n env.Append(CCFLAGS=[\"-O2\"])\n else:\n if env.gcc_mode:\n env.Append(\n CCFLAGS=[\n \"-O3\"\n if env.nuitka_python or os.name == \"nt\" or not env.static_libpython\n else \"-O2\"\n ]\n )\n elif env.msvc_mode:\n env.Append(\n CCFLAGS=[\n \"/Ox\", # Enable most speed optimization\n \"/GF\", # Eliminate duplicate strings.\n \"/Gy\", # Function level object storage, to allow removing unused ones\n ]\n )\n\n env.Append(CPPDEFINES=[\"__NUITKA_NO_ASSERT__\"])\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1712, "n_words": 598, "vocab_size": 323, "complexity": 54, "nloc": 108, "token_counts": 716, "n_ast_nodes": 1239, "n_identifiers": 37, "random_cut": "def setupCCompiler(env, lto_mode, pgo_mode, job_count):\n # This is driven by many branches on purpose and has a lot of things\n # to deal with for LTO checks and flags, pylint: disable=too-many-branches,too-many-statements\n\n # Enable LTO for compiler.\n _enableLtoSettings(\n env=env,\n lto_mode=lto_mode,\n pgo_mode=pgo_mode,\n job_count=job_count,\n )\n\n _detectWindowsSDK(env)\n _enableC11Settings(env)\n\n if env.gcc_mode:\n # Support for gcc and clang, restricting visibility as much as possible.\n env.Append(CCFLAGS=[\"-fvisibility=hidden\"])\n\n if not env.c11_mode:\n env.Append(CXXFLAGS=[\"-fvisibility-inlines-hidden\"])\n\n if isWin32Windows():\n # On Windows, exporting to DLL need to be controlled.\n env.Append(LINKFLAGS=[\"-Wl,--exclude-all-symbols\"])\n\n # Make sure we handle import library on our own and put it into the\n # build directory.\n env.Append(\n LINKFLAGS=[\n \"-Wl,--out-implib,%s\" % os.path.join(env.source_dir, \"import.lib\")\n ]\n )\n\n # Make it clear how to handle integer overflows, namely by wrapping around\n # to negative values.\n env.Append(CCFLAGS=[\"-fwrapv\"])\n\n if not env.low_memory:\n # Avoid IO for compilation as much as possible, this should make the\n # compilation more memory hungry, but also faster.\n env.Append(CCFLAGS=\"-pipe\")\n\n # Support for clang.\n if \"clang\" in env.the_cc_name:\n env.Append(CCFLAGS=[\"-w\"])\n env.Append(CPPDEFINES=[\"_XOPEN_SOURCE\"])\n\n # Don't export anything by default, this should create smaller executables.\n env.Append(CCFLAGS=[\"-fvisibility=hidden\", \"-fvisibility-inlines-hidden\"])\n\n if env.debug_mode:\n env.Append(CCFLAGS=[\"-Wunused-but-set-variable\"])\n\n # Support for macOS standalone backporting.\n if isMacOS():\n setEnvironmentVariable(env, \"MACOSX_DEPLOYMENT_TARGET\", env.macos_min_version)\n\n target_flag = \"--target=%s-apple-macos%s\" % (\n env.macos_target_arch,\n env.macos_min_version,\n )\n\n env.Append(CCFLAGS=[target_flag])\n env.Append(LINKFLAGS=[target_flag])\n\n # The 32 bits MinGW does not default for API level properly, so help it.\n if env.mingw_mode:\n # Windows XP\n env.Append(CPPDEFINES=[\"_WIN32_WINNT=0x0501\"])\n\n # Unicode entry points for programs.\n if env.mingw_mode:\n env.Append(LINKFLAGS=[\"-municode\"])\n\n # Detect the gcc version\n if env.gcc_version is None and env." }, { "id": 54260, "commit_id": "58b51caba356ad021a3b7b76f61d28a40884ba11", "repo": "prefect", "path": "tests/blocks/test_core.py", "file_name": "test_core.py", "fun_name": "test_registering_and_getting_blocks", "commit_message": "Require version", "code": "async def test_registering_and_getting_blocks():\n with pytest.raises(ValueError, match=\"(No block spec exists)\"):\n get_block_spec(\"is anyone home\", \"1.0\")\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 22, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 9, "token_counts": 66, "n_ast_nodes": 44, "n_identifiers": 6, "random_cut": "async def test_registering_and_getting_blocks():\n with pytest.raises(ValueError, match=\"(No block spec exists)\"):\n get_block_spec(\"is anyo" }, { "id": 47799, "commit_id": "03bef084b3f1611e1becdd6ad0ff4c0d2dd909ac", "repo": "airflow", "path": "scripts/tools/initialize_virtualenv.py", "file_name": "initialize_virtualenv.py", "fun_name": "get_python_version", "commit_message": "add script to initialise virtualenv (#22971)\n\n\r\n\r\nCo-authored-by: Jarek Potiuk ", "code": "def get_python_version() -> str:\n \n major = sys.version_info[0]\n minor = sys.version_info[1]\n return f\"{major}.{minor}\"\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 26, "n_ast_nodes": 52, "n_identifiers": 6, "random_cut": "def get_python_version() -> str:\n \n major = sys" }, { "id": 77710, "commit_id": "4b3c57d72ced0f64378cef26fa12a77bce966ac1", "repo": "wagtail", "path": "wagtail/admin/viewsets/chooser.py", "file_name": "chooser.py", "fun_name": "chosen_view", "commit_message": "Add ChooserViewSet", "code": "def chosen_view(self):\n return self.chosen_view_class.as_view(\n model=self.model,\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 30, "n_identifiers": 5, "random_cut": "def chosen_view(self):\n return self.chosen_view_class.as_view(\n " }, { "id": 53586, "commit_id": "e41e3a0b19d7fdada1c7feff4dffe9841b39269e", "repo": "prefect", "path": "src/prefect/orion/database/migrations/versions/postgresql/5f376def75c3_.py", "file_name": "5f376def75c3_.py", "fun_name": "upgrade", "commit_message": "Fix syntax error in autogenerated migration file", "code": "def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"block_data\",\n sa.Column(\n \"id\",\n prefect.orion.utilities.database.UUID(),\n server_default=sa.text(\"(GEN_RANDOM_UUID())\"),\n nullable=False,\n ),\n sa.Column(\n \"created\",\n prefect.orion.utilities.database.Timestamp(timezone=True),\n server_default=sa.text(\"CURRENT_TIMESTAMP\"),\n nullable=False,\n ),\n sa.Column(\n \"updated\",\n prefect.orion.utilities.database.Timestamp(timezone=True),\n server_default=sa.text(\"CURRENT_TIMESTAMP\"),\n nullable=False,\n ),\n sa.Column(\"name\", sa.String(), nullable=False),\n sa.Column(\"blockref\", sa.String(), nullable=False),\n sa.Column(\n \"data\",\n prefect.orion.utilities.database.JSON(astext_type=sa.Text()),\n server_default=\"{}\",\n nullable=False,\n ),\n sa.PrimaryKeyConstraint(\"id\", name=op.f(\"pk_block_data\")),\n )\n op.create_index(op.f(\"ix_block_data__name\"), \"block_data\", [\"name\"], unique=True)\n op.create_index(\n op.f(\"ix_block_data__updated\"), \"block_data\", [\"updated\"], unique=False\n )\n # ### end Alembic commands ###\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 351, "n_words": 64, "vocab_size": 42, "complexity": 1, "nloc": 35, "token_counts": 243, "n_ast_nodes": 389, "n_identifiers": 24, "random_cut": "def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"block_data\",\n sa.Column(\n \"id\",\n prefect.orion.utilities.database.UUID(),\n server_default=sa.text(\"(GEN_RANDOM_UUID())\"),\n nullable=False,\n ),\n sa.Column(\n \"created\",\n prefect.orion.utilities.database.Timestamp(timezone=True),\n server_default=sa.text(\"CURRENT_TIMESTAMP\"),\n nullable=False,\n ),\n sa.Column(\n \"updated\",\n prefect.orion.utilities.database.Timestamp(timezone=True),\n " }, { "id": 252350, "commit_id": "cd4a74fae7cbd8119afc3900597f798ec1604db7", "repo": "mitmproxy", "path": "test/mitmproxy/test_eventsequence.py", "file_name": "test_eventsequence.py", "fun_name": "test_udp_flow", "commit_message": "Add support for raw UDP. (#5414)", "code": "def test_udp_flow(err):\n f = tflow.tudpflow(err=err)\n i = eventsequence.iterate(f)\n assert isinstance(next(i), layers.udp.UdpStartHook)\n assert len(f.messages) == 0\n assert isinstance(next(i), layers.udp.UdpMessageHook)\n assert len(f.messages) == 1\n assert isinstance(next(i), layers.udp.UdpMessageHook)\n assert len(f.messages) == 2\n if err:\n assert isinstance(next(i), layers.udp.UdpErrorHook)\n else:\n assert isinstance(next(i), layers.udp.UdpEndHook)\n\n\n@pytest.mark.parametrize(\n \"resp, err\",\n [\n (False, False),\n (True, False),\n (False, True),\n (True, True),\n ],\n)", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"resp, err\",\n [\n (False, False),\n (True, False),\n (False, True),\n (True, True),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 130, "n_words": 52, "vocab_size": 31, "complexity": 2, "nloc": 13, "token_counts": 125, "n_ast_nodes": 244, "n_identifiers": 21, "random_cut": "def test_udp_flow(err):\n f = tflow.tudpflow(err=err)\n i = eventsequence.iterate(f)\n assert is" }, { "id": 274478, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/legacy_tf_layers/variable_scope_shim_test.py", "file_name": "variable_scope_shim_test.py", "fun_name": "testGetVar", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def testGetVar(self):\n vs = variable_scope._get_default_variable_store()\n v = vs.get_variable(\"v\", [1])\n v1 = vs.get_variable(\"v\", [1])\n self.assertIs(v, v1)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 15, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 44, "n_ast_nodes": 73, "n_identifiers": 9, "random_cut": "def testGetVar(self):\n vs = variable_scope._get_default_variable_store()\n v = vs.get_variable(\"v\", [1])\n v1 = vs.get_varia" }, { "id": 3855, "commit_id": "61b0e9e196ea07795d47effc670bcb981117c030", "repo": "airbyte", "path": "tools/ci_code_validator/tests/test_tools.py", "file_name": "test_tools.py", "fun_name": "test_tool", "commit_message": "🎉 Single py checker (#10246)", "code": "def test_tool(tmp_path, toml_config_file, cmd, package_dir, expected_file):\n cmd = cmd.format(package_dir=package_dir, toml_config_file=toml_config_file)\n\n proc = subprocess.Popen(cmd.split(\" \"), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, _ = proc.communicate()\n file_log = tmp_path / \"temp.log\"\n file_log.write_bytes(out)\n assert file_log.is_file() is True\n issues_file = tmp_path / \"issues.json\"\n with requests_mock.Mocker() as m:\n m.get('/api/authentication/validate', json={\"valid\": True})\n m.get(\"/api/rules/search\", json={\"rules\": []})\n m.post(\"/api/rules/create\", json={})\n parser = LogParser(issues_file, host=\"http://fake.com/\", token=\"fake_token\")\n assert getattr(parser, f'from_{cmd.split(\" \")[0]}')(file_log) == 0\n\n assert issues_file.is_file() is True\n data = json.loads(issues_file.read_text())\n for issue in data[\"issues\"]:\n issue[\"primaryLocation\"][\"filePath\"] = \"/\".join(issue[\"primaryLocation\"][\"filePath\"].split(\"/\")[-2:])\n\n expected_data = json.loads(Path(expected_file).read_text())\n assert json.dumps(data, sort_keys=True, separators=(',', ': ')) == json.dumps(expected_data, sort_keys=True, separators=(',', ': '))\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 191, "n_words": 87, "vocab_size": 67, "complexity": 2, "nloc": 20, "token_counts": 272, "n_ast_nodes": 474, "n_identifiers": 42, "random_cut": "def test_tool(tmp_path, toml_config_file, cmd, package_dir, expected_file):\n cmd = cmd.format(package_dir=package_dir, toml_config_file=toml_config_file)\n\n proc = subprocess.Popen(cmd.split(\" \"), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, _ = proc.communicate()\n file_log = tmp_path / \"temp.log\"\n file_log.write_bytes(out)\n assert file_log.is_file() is True\n issues_file = tmp_path / \"issues.json\"\n with requests_mock.Mocker() as m:\n m.get('/api/authentication/validate', json={\"valid\": True})\n m.get(\"/api/rules/search\", json={\"rules\": []})\n m.post(\"/api/rules/create\", json={})\n parser = LogParser(issues_file, host=\"http://fake.com/\", token=\"fake_token\")\n assert getattr(parser, f'from_{cmd.split(\" \")[0]}')(file_log) == 0\n\n assert issues_file.is_file() is True\n data = json.loads(issues_file.read_text())\n for issue in data[\"issues\"]:\n issue[\"primaryLocation\"][\"filePath\"] = \"/\".join(issue[\"primaryLocation\"][\"filePath\"].split(\"/\")[-2:])\n\n expected_data = json.loads(Path(expected_file).read_text())\n assert json.dumps(data, sort_keys=True, separators=(',', ': ')) == json.dumps(expected_data, sort_keys=True, separators=(',', ': '))\n" }, { "id": 133181, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/ml_utils/tests/test_mlflow.py", "file_name": "test_mlflow.py", "fun_name": "test_experiment_id", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_experiment_id(self):\n self.mlflow_util.setup_mlflow(tracking_uri=self.tracking_uri, experiment_id=\"0\")\n assert self.mlflow_util.experiment_id == \"0\"\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 6, "random_cut": "def test_experiment_id(self):\n self.mlflow_util.setup_mlflow(tracking_uri=self.tracki" }, { "id": 201941, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/bulk_create/tests.py", "file_name": "tests.py", "fun_name": "_test_update_conflicts_two_fields", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _test_update_conflicts_two_fields(self, unique_fields):\n TwoFields.objects.bulk_create(\n [\n TwoFields(f1=1, f2=1, name=\"a\"),\n TwoFields(f1=2, f2=2, name=\"b\"),\n ]\n )\n self.assertEqual(TwoFields.objects.count(), 2)\n\n conflicting_objects = [\n TwoFields(f1=1, f2=1, name=\"c\"),\n TwoFields(f1=2, f2=2, name=\"d\"),\n ]\n TwoFields.objects.bulk_create(\n conflicting_objects,\n update_conflicts=True,\n unique_fields=unique_fields,\n update_fields=[\"name\"],\n )\n self.assertEqual(TwoFields.objects.count(), 2)\n self.assertCountEqual(\n TwoFields.objects.values(\"f1\", \"f2\", \"name\"),\n [\n {\"f1\": 1, \"f2\": 1, \"name\": \"c\"},\n {\"f1\": 2, \"f2\": 2, \"name\": \"d\"},\n ],\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 302, "n_words": 52, "vocab_size": 35, "complexity": 1, "nloc": 26, "token_counts": 180, "n_ast_nodes": 287, "n_identifiers": 16, "random_cut": "def _test_update_conflicts_two_fields(self, unique_fields):\n TwoFields.objects.bulk_create(\n [\n TwoFields(f1=1, f2=1, name=\"a\"),\n TwoFields(f1=2, f2=2, name=\"b\"),\n ]\n )\n self.assertEqual(TwoFields.objects.count(), 2)\n\n conflicting_objects = [\n TwoFields(f1=1, f2=1, name=\"c\"),\n TwoFields(f1=2, f2=2, name=\"d\"),\n ]\n TwoFields.objects.bulk_create(\n conflicting_objects,\n update_conflicts=True,\n unique_fields=unique_fields,\n update_fields=[\"name\"],\n )\n self.assertEqual(TwoFields.objects.count(), 2)\n self.assertCountEqual(\n TwoFields.objects.values(\"f1\", \"f2\", \"name\"),\n " }, { "id": 303258, "commit_id": "a502a8798ff74eb6185473df7f69553fc4663634", "repo": "core", "path": "tests/components/skybell/__init__.py", "file_name": "__init__.py", "fun_name": "_patch_skybell", "commit_message": "Add config flow to skybell (#70887)", "code": "def _patch_skybell() -> None:\n return patch(\n \"homeassistant.components.skybell.config_flow.Skybell.async_send_request\",\n return_value={\"id\": USER_ID},\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 29, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 4, "random_cut": "def _patch_skybell() -> None:\n return patch(\n \"homeassistant.components.skybell.config_flow.Skybell.async_send_request\",\n return_value={\"id\": USER_ID},\n )\n" }, { "id": 18323, "commit_id": "6a6664b154a2f3a123e4a750457e1ec39fd74e22", "repo": "ccxt", "path": "python/ccxt/async_support/gateio.py", "file_name": "gateio.py", "fun_name": "parse_transaction", "commit_message": "1.72.78\n\n[ci skip]", "code": "def parse_transaction(self, transaction, currency=None):\n #\n # deposits\n #\n # {\n # \"id\": \"d33361395\",\n # \"currency\": \"USDT_TRX\",\n # \"address\": \"TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z\",\n # \"amount\": \"100\",\n # \"txid\": \"ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0\",\n # \"timestamp\": \"1626345819\",\n # \"status\": \"DONE\",\n # \"memo\": \"\"\n # }\n #\n # withdrawals\n id = self.safe_string(transaction, 'id')\n type = None\n amount = self.safe_string(transaction, 'amount')\n if id[0] == 'b':\n # GateCode handling\n type = 'deposit' if Precise.string_gt(amount, '0') else 'withdrawal'\n amount = Precise.string_abs(amount)\n elif id is not None:\n type = self.parse_transaction_type(id[0])\n currencyId = self.safe_string(transaction, 'currency')\n code = self.safe_currency_code(currencyId)\n txid = self.safe_string(transaction, 'txid')\n rawStatus = self.safe_string(transaction, 'status')\n status = self.parse_transaction_status(rawStatus)\n address = self.safe_string(transaction, 'address')\n fee = self.safe_number(transaction, 'fee')\n tag = self.safe_string(transaction, 'memo')\n if tag == '':\n tag = None\n timestamp = self.safe_timestamp(transaction, 'timestamp')\n return {\n 'info': transaction,\n 'id': id,\n 'txid': txid,\n 'currency': code,\n 'amount': self.parse_number(amount),\n 'network': None,\n 'address': address,\n 'addressTo': None,\n 'addressFrom': None,\n 'tag': tag,\n 'tagTo': None,\n 'tagFrom': None,\n 'status': status,\n 'type': type,\n 'timestamp': timestamp,\n 'datetime': self.iso8601(timestamp),\n 'updated': None,\n 'fee': fee,\n }\n", "url": "https://github.com/ccxt/ccxt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 688, "n_words": 156, "vocab_size": 102, "complexity": 5, "nloc": 40, "token_counts": 260, "n_ast_nodes": 457, "n_identifiers": 27, "random_cut": "def parse_transaction(self, transaction, currency=None):\n #\n # deposits\n #\n # {\n # \"id\": \"d33361395\",\n # \"currency\": \"USDT_TRX\",\n # \"address\": \"TErdnxenuLtXfnMafLbfappYdHtnXQ5U4z\",\n # \"amount\": \"100\",\n # \"txid\": \"ae9374de34e558562fe18cbb1bf9ab4d9eb8aa7669d65541c9fa2a532c1474a0\",\n # \"timestamp\": \"1626345819\",\n # \"status\": \"DONE\",\n # \"memo\": \"\"\n # }\n #\n # withdrawals\n id = self.safe_string(transaction, 'id')\n type = None\n amount = self.safe_string(transaction, 'amount')\n if id[0] == 'b':\n # GateCode handling\n type = 'deposit' if Precise.string_gt(amount, '0') else 'withdrawal'\n amount = Precise.string_abs(amount)\n elif id is not None:\n type = self.parse_transaction_type(id[0])\n currencyId = self.safe_string(transaction, 'currency')\n code = self.safe_currency_code(currencyId)\n txid = self.safe_string(transaction, 'txid')\n rawSta" }, { "id": 64869, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/invoice_discounting/test_invoice_discounting.py", "file_name": "test_invoice_discounting.py", "fun_name": "create_invoice_discounting", "commit_message": "style: format code with black", "code": "def create_invoice_discounting(invoices, **args):\n\targs = frappe._dict(args)\n\tinv_disc = frappe.new_doc(\"Invoice Discounting\")\n\tinv_disc.posting_date = args.posting_date or nowdate()\n\tinv_disc.company = args.company or \"_Test Company\"\n\tinv_disc.bank_account = args.bank_account\n\tinv_disc.short_term_loan = args.short_term_loan\n\tinv_disc.accounts_receivable_credit = args.accounts_receivable_credit\n\tinv_disc.accounts_receivable_discounted = args.accounts_receivable_discounted\n\tinv_disc.accounts_receivable_unpaid = args.accounts_receivable_unpaid\n\tinv_disc.short_term_loan = args.short_term_loan\n\tinv_disc.bank_charges_account = args.bank_charges_account\n\tinv_disc.bank_account = args.bank_account\n\tinv_disc.loan_start_date = args.start or nowdate()\n\tinv_disc.loan_period = args.period or 30\n\tinv_disc.bank_charges = flt(args.bank_charges)\n\n\tfor d in invoices:\n\t\tinv_disc.append(\"invoices\", {\"sales_invoice\": d})\n\tinv_disc.insert()\n\n\tif not args.do_not_submit:\n\t\tinv_disc.submit()\n\n\treturn inv_disc\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 50, "n_words": 72, "vocab_size": 49, "complexity": 7, "nloc": 22, "token_counts": 165, "n_ast_nodes": 271, "n_identifiers": 27, "random_cut": "def create_invoice_discounting(invoices, **args):\n\targs = frappe._dict(args)\n\tinv_disc = frappe.new_doc(\"Invoice Discounting\")\n\tinv_disc.posting_date = args.posting_date or nowdate()\n\tinv_disc.company = args.company or \"_Test Company\"\n\tinv_disc.bank_account = args.bank_account\n\tinv_disc.short_term_loan = args.short_term_loan\n\tinv_disc.accounts_receivable_credit = args.accounts_receivable_credit\n\tinv_disc.accounts_receivable_discounted = args.accounts_receivable_discounted\n\tinv_disc.accounts_receivable_unpaid = args.accounts_receivable_unpaid\n\tinv_disc.short_term_loan = args.short_term_loan\n\tinv_disc.bank_charges_account = args.bank_charges_account\n\tinv_disc.bank_account = args.bank_account\n\tinv_disc.loan_start_date = args.start or " }, { "id": 12415, "commit_id": "674e8121fb5dfdac4ce88a8ade1d248d16b75617", "repo": "jina", "path": "jina/orchestrate/flow/base.py", "file_name": "base.py", "fun_name": "_get_address_table", "commit_message": "fix: success box ui", "code": "def _get_address_table(self, address_table):\n _protocol = str(self.protocol)\n if self.gateway_args.ssl_certfile and self.gateway_args.ssl_keyfile:\n _protocol = f'{self.protocol}S'\n address_table.add_row(\n ':link:', 'Protocol', f':closed_lock_with_key: {_protocol}'\n )\n\n else:\n address_table.add_row(':link:', 'Protocol', _protocol)\n\n _protocol = _protocol.lower()\n address_table.add_row(\n ':house:',\n 'Local',\n f'[link={_protocol}://{self.host}:{self.port}]{self.host}:{self.port}[/]',\n )\n address_table.add_row(\n ':lock:',\n 'Private',\n f'[link={_protocol}://{self.address_private}:{self.port}]{self.address_private}:{self.port}[/]',\n )\n\n if self.address_public:\n address_table.add_row(\n ':earth_africa:',\n 'Public',\n f'[link={_protocol}://{self.address_public}:{self.port}]{self.address_public}:{self.port}[/]',\n )\n\n if self.protocol == GatewayProtocolType.HTTP:\n\n _address = [\n f'[link={_protocol}://localhost:{self.port}/docs]Local[/]',\n f'[link={_protocol}://{self.address_private}:{self.port}/docs]Private[/]',\n ]\n if self.address_public:\n _address.append(\n f'[link={_protocol}://{self.address_public}:{self.port}/docs]Public[/]'\n )\n address_table.add_row(\n ':speech_balloon:',\n 'Swagger UI [dim](/docs)[/]',\n '·'.join(_address),\n )\n\n _address = [\n f'[link={_protocol}://localhost:{self.port}/redoc]Local[/]',\n f'[link={_protocol}://{self.address_private}:{self.port}/redoc]Private[/]',\n ]\n\n if self.address_public:\n _address.append(\n f'[link={_protocol}://{self.address_public}:{self.port}/redoc]Public[/]'\n )\n\n address_table.add_row(\n ':books:',\n 'Redoc [dim](/redoc)[/]',\n '·'.join(_address),\n )\n\n if self.gateway_args.expose_graphql_endpoint:\n _address = [\n f'[link={_protocol}://localhost:{self.port}/graphql]Local[/]',\n f'[link={_protocol}://{self.address_private}:{self.port}/graphql]Private[/]',\n ]\n\n if self.address_public:\n _address.append(\n f'[link={_protocol}://{self.address_public}:{self.port}/graphql]Public[/]'\n )\n\n address_table.add_row(\n ':strawberry:',\n 'GraphQL UI [dim](/graphql)[/]',\n '·'.join(_address),\n )\n\n if self.monitoring:\n for name, deployment in self:\n _address = [\n f'[link=http://localhost:{deployment.args.port_monitoring}]Local[/]',\n f'[link=http://{self.address_private}:{deployment.args.port_monitoring}]Private[/]',\n ]\n\n if self.address_public:\n _address.append(\n f'[link=http://{self.address_public}:{deployment.args.port_monitoring}]Public[/]'\n )\n\n if deployment.args.monitoring:\n address_table.add_row(\n ':bar_chart:',\n f'Monitor [b]{name}:{deployment.args.port_monitoring}[/]',\n '·'.join(_address),\n )\n\n return self[GATEWAY_NAME].args.port_monitoring\n else:\n return self._common_kwargs.get(\n 'port_monitoring', __default_port_monitoring__\n )\n\n return address_table\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1345, "n_words": 138, "vocab_size": 78, "complexity": 13, "nloc": 89, "token_counts": 315, "n_ast_nodes": 830, "n_identifiers": 30, "random_cut": "def _get_address_table(self, address_table):\n _protocol = str(self.protocol)\n if self.gateway_args.ssl_certfile and self.gateway_args.ssl_keyfile:\n _protocol = f'{self.protocol}S'\n address_table.add_row(\n ':link:', 'Protocol', f':closed_lock_with_key: {_protocol}'\n )\n\n else:\n address_table.add_row(':link:', 'Protocol', _protocol)\n\n _protocol = _protocol.lower()\n address_table.add_row(\n ':house:',\n 'Local',\n f'[link={_protocol}://{self.host}:{self.port}]{self.host}:{self.port}[/]',\n )\n address_table.add_row(\n ':lock:',\n 'Private',\n f'[link={_protocol}://{self.address_private}:{self.port}]{self.address_private}:{self.port}[/]',\n )\n\n if self.address_public:\n address_table.add_row(\n ':earth_africa:',\n 'Public',\n f'[link={_protocol}://{self.address_public}:{self.port}]{self.address_public}:{self.port}[/]',\n )\n\n if self.protocol == GatewayProtocolType.HTTP:\n\n _address = [\n f'[link={_protocol}://localhost:{self.port}/docs]Local[/]',\n f'[link={_protocol}://{self.address_private}:{self.port}/docs]Private[/]',\n ]\n if self.address_public:\n _address.append(\n f'[link={_protocol}://{self.address_public}:{self.port}/docs]Public[/]'\n )\n address_table.add_row(\n ':speech_balloon:',\n 'Swagger UI [dim](/docs)[/]',\n '·'.join(_address),\n )\n\n _address = [\n f'[link={_protocol}://localhost:{self.port}/redoc]Local[/]',\n f'[link={_protocol}://{self.address_private}:{self.port}/redoc]Private[/]',\n ]\n\n if self.address_public:\n _address.append(\n f'[link={_protocol}" }, { "id": 123582, "commit_id": "df4293473d2fb6e887e31522cab5aff95e201581", "repo": "sqlmap", "path": "plugins/dbms/extremedb/enumeration.py", "file_name": "enumeration.py", "fun_name": "searchColumn", "commit_message": "Fixing DeprecationWarning (logger.warn)", "code": "def searchColumn(self):\n warnMsg = \"on eXtremeDB it is not possible to search columns\"\n logger.warning(warnMsg)\n\n return []\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 36, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 5, "random_cut": "def searchColumn(self):\n warnMsg = \"on eXtremeDB it is not possible to search columns\"\n logger.warning(warnMsg" }, { "id": 75114, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_simple_with_collection_nesting", "commit_message": "Reformat with black", "code": "def test_simple_with_collection_nesting(self):\n root_collection = Collection.get_first_root_node()\n evil_plans = root_collection.add_child(name=\"Evil plans\")\n evil_plans.add_child(name=\"Eviler plans\")\n\n response = self.get()\n # \"Eviler Plans\" should be prefixed with ↳ (↳) and 4 non-breaking spaces.\n self.assertContains(response, \"    ↳ Eviler plans\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 72, "n_words": 31, "vocab_size": 27, "complexity": 1, "nloc": 6, "token_counts": 45, "n_ast_nodes": 81, "n_identifiers": 11, "random_cut": "def test_simple_with_collection_nesting(self):\n root_collection = Collection.get_first_root_node()\n evil_plans = root_collection.add_child(name=\"Evil plans\")\n evil_plans.add_child(name=\"Eviler plans\")\n\n response = self.get()\n # \"Eviler Plans\" should be prefixed with ↳ (↳) and 4 non-breaking spaces.\n " }, { "id": 268503, "commit_id": "76b746655a36807fa9198064ca9fe7c6cc00083a", "repo": "ansible", "path": "test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/plugin_utils/connection_base.py", "file_name": "connection_base.py", "fun_name": "__getattr__", "commit_message": "Add `use_rsa_sha2_algorithms` option for paramiko (#78789)\n\nFixes #76737\r\nFixes #77673\r\n\r\nCo-authored-by: Matt Clay ", "code": "def __getattr__(self, name):\n try:\n return self.__dict__[name]\n except KeyError:\n if not name.startswith(\"_\"):\n plugin = self._sub_plugin.get(\"obj\")\n if plugin:\n method = getattr(plugin, name, None)\n if method is not None:\n return method\n raise AttributeError(\n \"'%s' object has no attribute '%s'\"\n % (self.__class__.__name__, name)\n )\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 218, "n_words": 40, "vocab_size": 33, "complexity": 5, "nloc": 14, "token_counts": 74, "n_ast_nodes": 120, "n_identifiers": 14, "random_cut": "def __getattr__(self, name):\n try:\n " }, { "id": 129575, "commit_id": "d5bfb7b7da6f8ec505dd8ed69f0be419decfdcc0", "repo": "ray", "path": "rllib/agents/sac/tests/test_sac.py", "file_name": "test_sac.py", "fun_name": "test_sac_loss_function", "commit_message": "[RLlib] Preparatory PR for multi-agent multi-GPU learner (alpha-star style) #03 (#21652)", "code": "def test_sac_loss_function(self):\n \n config = sac.DEFAULT_CONFIG.copy()\n # Run locally.\n config[\"num_workers\"] = 0\n config[\"learning_starts\"] = 0\n config[\"twin_q\"] = False\n config[\"gamma\"] = 0.99\n # Switch on deterministic loss so we can compare the loss values.\n config[\"_deterministic_loss\"] = True\n # Use very simple nets.\n config[\"Q_model\"][\"fcnet_hiddens\"] = [10]\n config[\"policy_model\"][\"fcnet_hiddens\"] = [10]\n # Make sure, timing differences do not affect trainer.train().\n config[\"min_time_s_per_reporting\"] = 0\n # Test SAC with Simplex action space.\n config[\"env_config\"] = {\"simplex_actions\": True}\n\n map_ = {\n # Action net.\n \"default_policy/fc_1/kernel\": \"action_model._hidden_layers.0.\"\n \"_model.0.weight\",\n \"default_policy/fc_1/bias\": \"action_model._hidden_layers.0.\"\n \"_model.0.bias\",\n \"default_policy/fc_out/kernel\": \"action_model.\"\n \"_logits._model.0.weight\",\n \"default_policy/fc_out/bias\": \"action_model._logits._model.0.bias\",\n \"default_policy/value_out/kernel\": \"action_model.\"\n \"_value_branch._model.0.weight\",\n \"default_policy/value_out/bias\": \"action_model.\"\n \"_value_branch._model.0.bias\",\n # Q-net.\n \"default_policy/fc_1_1/kernel\": \"q_net.\"\n \"_hidden_layers.0._model.0.weight\",\n \"default_policy/fc_1_1/bias\": \"q_net.\"\n \"_hidden_layers.0._model.0.bias\",\n \"default_policy/fc_out_1/kernel\": \"q_net._logits._model.0.weight\",\n \"default_policy/fc_out_1/bias\": \"q_net._logits._model.0.bias\",\n \"default_policy/value_out_1/kernel\": \"q_net.\"\n \"_value_branch._model.0.weight\",\n \"default_policy/value_out_1/bias\": \"q_net.\"\n \"_value_branch._model.0.bias\",\n \"default_policy/log_alpha\": \"log_alpha\",\n # Target action-net.\n \"default_policy/fc_1_2/kernel\": \"action_model.\"\n \"_hidden_layers.0._model.0.weight\",\n \"default_policy/fc_1_2/bias\": \"action_model.\"\n \"_hidden_layers.0._model.0.bias\",\n \"default_policy/fc_out_2/kernel\": \"action_model.\"\n \"_logits._model.0.weight\",\n \"default_policy/fc_out_2/bias\": \"action_model.\"\n \"_logits._model.0.bias\",\n \"default_policy/value_out_2/kernel\": \"action_model.\"\n \"_value_branch._model.0.weight\",\n \"default_policy/value_out_2/bias\": \"action_model.\"\n \"_value_branch._model.0.bias\",\n # Target Q-net\n \"default_policy/fc_1_3/kernel\": \"q_net.\"\n \"_hidden_layers.0._model.0.weight\",\n \"default_policy/fc_1_3/bias\": \"q_net.\"\n \"_hidden_layers.0._model.0.bias\",\n \"default_policy/fc_out_3/kernel\": \"q_net.\"\n \"_logits._model.0.weight\",\n \"default_policy/fc_out_3/bias\": \"q_net.\"\n \"_logits._model.0.bias\",\n \"default_policy/value_out_3/kernel\": \"q_net.\"\n \"_value_branch._model.0.weight\",\n \"default_policy/value_out_3/bias\": \"q_net.\"\n \"_value_branch._model.0.bias\",\n \"default_policy/log_alpha_1\": \"log_alpha\",\n }\n\n env = SimpleEnv\n batch_size = 100\n obs_size = (batch_size, 1)\n actions = np.random.random(size=(batch_size, 2))\n\n # Batch of size=n.\n input_ = self._get_batch_helper(obs_size, actions, batch_size)\n\n # Simply compare loss values AND grads of all frameworks with each\n # other.\n prev_fw_loss = weights_dict = None\n expect_c, expect_a, expect_e, expect_t = None, None, None, None\n # History of tf-updated NN-weights over n training steps.\n tf_updated_weights = []\n # History of input batches used.\n tf_inputs = []\n for fw, sess in framework_iterator(\n config, frameworks=(\"tf\", \"torch\"), session=True):\n # Generate Trainer and get its default Policy object.\n trainer = sac.SACTrainer(config=config, env=env)\n policy = trainer.get_policy()\n p_sess = None\n if sess:\n p_sess = policy.get_session()\n\n # Set all weights (of all nets) to fixed values.\n if weights_dict is None:\n # Start with the tf vars-dict.\n assert fw in [\"tf2\", \"tf\", \"tfe\"]\n weights_dict = policy.get_weights()\n if fw == \"tfe\":\n log_alpha = weights_dict[10]\n weights_dict = self._translate_tfe_weights(\n weights_dict, map_)\n else:\n assert fw == \"torch\" # Then transfer that to torch Model.\n model_dict = self._translate_weights_to_torch(\n weights_dict, map_)\n # Have to add this here (not a parameter in tf, but must be\n # one in torch, so it gets properly copied to the GPU(s)).\n model_dict[\"target_entropy\"] = policy.model.target_entropy\n policy.model.load_state_dict(model_dict)\n policy.target_model.load_state_dict(model_dict)\n\n if fw == \"tf\":\n log_alpha = weights_dict[\"default_policy/log_alpha\"]\n elif fw == \"torch\":\n # Actually convert to torch tensors (by accessing everything).\n input_ = policy._lazy_tensor_dict(input_)\n input_ = {k: input_[k] for k in input_.keys()}\n log_alpha = policy.model.log_alpha.detach().cpu().numpy()[0]\n\n # Only run the expectation once, should be the same anyways\n # for all frameworks.\n if expect_c is None:\n expect_c, expect_a, expect_e, expect_t = \\\n self._sac_loss_helper(input_, weights_dict,\n sorted(weights_dict.keys()),\n log_alpha, fw,\n gamma=config[\"gamma\"], sess=sess)\n\n # Get actual outs and compare to expectation AND previous\n # framework. c=critic, a=actor, e=entropy, t=td-error.\n if fw == \"tf\":\n c, a, e, t, tf_c_grads, tf_a_grads, tf_e_grads = \\\n p_sess.run([\n policy.critic_loss,\n policy.actor_loss,\n policy.alpha_loss,\n policy.td_error,\n policy.optimizer().compute_gradients(\n policy.critic_loss[0],\n [v for v in policy.model.q_variables() if\n \"value_\" not in v.name]),\n policy.optimizer().compute_gradients(\n policy.actor_loss,\n [v for v in policy.model.policy_variables() if\n \"value_\" not in v.name]),\n policy.optimizer().compute_gradients(\n policy.alpha_loss, policy.model.log_alpha)],\n feed_dict=policy._get_loss_inputs_dict(\n input_, shuffle=False))\n tf_c_grads = [g for g, v in tf_c_grads]\n tf_a_grads = [g for g, v in tf_a_grads]\n tf_e_grads = [g for g, v in tf_e_grads]\n\n elif fw == \"tfe\":\n with tf.GradientTape() as tape:\n tf_loss(policy, policy.model, None, input_)\n c, a, e, t = policy.critic_loss, policy.actor_loss, \\\n policy.alpha_loss, policy.td_error\n vars = tape.watched_variables()\n tf_c_grads = tape.gradient(c[0], vars[6:10])\n tf_a_grads = tape.gradient(a, vars[2:6])\n tf_e_grads = tape.gradient(e, vars[10])\n\n elif fw == \"torch\":\n loss_torch(policy, policy.model, None, input_)\n c, a, e, t = policy.get_tower_stats(\"critic_loss\")[0], \\\n policy.get_tower_stats(\"actor_loss\")[0], \\\n policy.get_tower_stats(\"alpha_loss\")[0], \\\n policy.get_tower_stats(\"td_error\")[0]\n\n # Test actor gradients.\n policy.actor_optim.zero_grad()\n assert all(v.grad is None for v in policy.model.q_variables())\n assert all(\n v.grad is None for v in policy.model.policy_variables())\n assert policy.model.log_alpha.grad is None\n a.backward()\n # `actor_loss` depends on Q-net vars (but these grads must\n # be ignored and overridden in critic_loss.backward!).\n assert not all(\n torch.mean(v.grad) == 0\n for v in policy.model.policy_variables())\n assert not all(\n torch.min(v.grad) == 0\n for v in policy.model.policy_variables())\n assert policy.model.log_alpha.grad is None\n # Compare with tf ones.\n torch_a_grads = [\n v.grad for v in policy.model.policy_variables()\n if v.grad is not None\n ]\n check(tf_a_grads[2],\n np.transpose(torch_a_grads[0].detach().cpu()))\n\n # Test critic gradients.\n policy.critic_optims[0].zero_grad()\n assert all(\n torch.mean(v.grad) == 0.0\n for v in policy.model.q_variables() if v.grad is not None)\n assert all(\n torch.min(v.grad) == 0.0\n for v in policy.model.q_variables() if v.grad is not None)\n assert policy.model.log_alpha.grad is None\n c[0].backward()\n assert not all(\n torch.mean(v.grad) == 0\n for v in policy.model.q_variables() if v.grad is not None)\n assert not all(\n torch.min(v.grad) == 0 for v in policy.model.q_variables()\n if v.grad is not None)\n assert policy.model.log_alpha.grad is None\n # Compare with tf ones.\n torch_c_grads = [v.grad for v in policy.model.q_variables()]\n check(tf_c_grads[0],\n np.transpose(torch_c_grads[2].detach().cpu()))\n # Compare (unchanged(!) actor grads) with tf ones.\n torch_a_grads = [\n v.grad for v in policy.model.policy_variables()\n ]\n check(tf_a_grads[2],\n np.transpose(torch_a_grads[0].detach().cpu()))\n\n # Test alpha gradient.\n policy.alpha_optim.zero_grad()\n assert policy.model.log_alpha.grad is None\n e.backward()\n assert policy.model.log_alpha.grad is not None\n check(policy.model.log_alpha.grad, tf_e_grads)\n\n check(c, expect_c)\n check(a, expect_a)\n check(e, expect_e)\n check(t, expect_t)\n\n # Store this framework's losses in prev_fw_loss to compare with\n # next framework's outputs.\n if prev_fw_loss is not None:\n check(c, prev_fw_loss[0])\n check(a, prev_fw_loss[1])\n check(e, prev_fw_loss[2])\n check(t, prev_fw_loss[3])\n\n prev_fw_loss = (c, a, e, t)\n\n # Update weights from our batch (n times).\n for update_iteration in range(5):\n print(\"train iteration {}\".format(update_iteration))\n if fw == \"tf\":\n in_ = self._get_batch_helper(obs_size, actions, batch_size)\n tf_inputs.append(in_)\n # Set a fake-batch to use\n # (instead of sampling from replay buffer).\n buf = MultiAgentReplayBuffer.get_instance_for_testing()\n buf._fake_batch = in_\n trainer.train()\n updated_weights = policy.get_weights()\n # Net must have changed.\n if tf_updated_weights:\n check(\n updated_weights[\"default_policy/fc_1/kernel\"],\n tf_updated_weights[-1][\n \"default_policy/fc_1/kernel\"],\n false=True)\n tf_updated_weights.append(updated_weights)\n\n # Compare with updated tf-weights. Must all be the same.\n else:\n tf_weights = tf_updated_weights[update_iteration]\n in_ = tf_inputs[update_iteration]\n # Set a fake-batch to use\n # (instead of sampling from replay buffer).\n buf = MultiAgentReplayBuffer.get_instance_for_testing()\n buf._fake_batch = in_\n trainer.train()\n # Compare updated model.\n for tf_key in sorted(tf_weights.keys()):\n if re.search(\"_[23]|alpha\", tf_key):\n continue\n tf_var = tf_weights[tf_key]\n torch_var = policy.model.state_dict()[map_[tf_key]]\n if tf_var.shape != torch_var.shape:\n check(\n tf_var,\n np.transpose(torch_var.detach().cpu()),\n atol=0.003)\n else:\n check(tf_var, torch_var, atol=0.003)\n # And alpha.\n check(policy.model.log_alpha,\n tf_weights[\"default_policy/log_alpha\"])\n # Compare target nets.\n for tf_key in sorted(tf_weights.keys()):\n if not re.search(\"_[23]\", tf_key):\n continue\n tf_var = tf_weights[tf_key]\n torch_var = policy.target_model.state_dict()[map_[\n tf_key]]\n if tf_var.shape != torch_var.shape:\n check(\n tf_var,\n np.transpose(torch_var.detach().cpu()),\n atol=0.003)\n else:\n check(tf_var, torch_var, atol=0.003)\n trainer.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 5558, "n_words": 980, "vocab_size": 452, "complexity": 45, "nloc": 247, "token_counts": 1752, "n_ast_nodes": 2879, "n_identifiers": 122, "random_cut": "def test_sac_loss_function(self):\n \n config = sac.DEFAULT_CONFIG.copy()\n # Run locally.\n config[\"num_workers\"] = 0\n config[\"learning_starts\"] = 0\n config[\"twin_q\"] = False\n config[\"gamma\"] = 0.99\n # Switch on deterministic loss so we can compare the loss values.\n config[\"_deterministic_loss\"] = True\n # Use very simple nets.\n config[\"Q_model\"][\"fcnet_hiddens\"] = [10]\n config[\"policy_model\"][\"fcnet_hiddens\"] = [10]\n # Make sure, timing differences do not affect trainer.train().\n config[\"min_time_s_per_reporting\"] = 0\n # Test SAC with Simplex action space.\n config[\"env_config\"] = {\"simplex_actions\": True}\n\n map_ = {\n # Action net.\n \"default_policy/fc_1/kernel\": \"action_model._hidden_layers.0.\"\n \"_model.0.weight\",\n \"default_policy/fc_1/bias\": \"action_model._hidden_layers.0.\"\n \"_model.0.bias\",\n \"default_policy/fc_out/kernel\": \"action_model.\"\n \"_logits._model.0.weight\",\n \"default_policy/fc_out/bias\": \"action_model._logits._model.0.bias\",\n \"default_policy/value_out/kernel\": \"action_model.\"\n \"_value_branch._model.0.weig" }, { "id": 54456, "commit_id": "ccb4cc008efa24ee39a85830c330f83d1fe2477a", "repo": "prefect", "path": "src/prefect/agent.py", "file_name": "agent.py", "fun_name": "work_queue_id_from_name", "commit_message": "Capture 404 errors explicitly so other http errors are not hidden", "code": "async def work_queue_id_from_name(self) -> Optional[UUID]:\n \n if not self.work_queue_name:\n raise ValueError(\"No work queue name provided.\")\n try:\n work_queue = await self.client.read_work_queue_by_name(self.work_queue_name)\n return work_queue.id\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code == status.HTTP_404_NOT_FOUND:\n self.logger.warn(f'No work queue found named \"{self.work_queue_name}\"')\n return None\n else:\n raise\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 167, "n_words": 39, "vocab_size": 34, "complexity": 4, "nloc": 17, "token_counts": 73, "n_ast_nodes": 132, "n_identifiers": 19, "random_cut": "async def work_queue_id_from_name(self) -> Optional[UUID]:\n \n if not self.work_queue_name:\n raise ValueError(\"No work queue name provided.\")\n try:\n work_queue = await self.client.read_work_queue_by_name(self.work_queue_name)\n return work_queue.id\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code == status.HTTP_404_NOT_FOUND:\n self.logger.warn(f'No work queue found n" }, { "id": 159093, "commit_id": "f00148b089d326c952880a0e5e6bd4b2dcb98ce5", "repo": "rasa", "path": "tests/utils/test_common.py", "file_name": "test_common.py", "fun_name": "test_cli_missing_log_level_default_used", "commit_message": "Configurable logging for libraries (#10614)\n\n* Make library level logging to be configurable\r\n\r\nFixes https://github.com/RasaHQ/rasa/issues/10203\r\n\r\n* Create log level documentation under cheatsheet in Rasa docs\r\n\r\n* Add log docs to `rasa shell --debug` (and others)", "code": "def test_cli_missing_log_level_default_used():\n \n configure_logging_and_warnings()\n rasa_logger = logging.getLogger(\"rasa\")\n # Default log level is currently INFO\n rasa_logger.level == logging.INFO\n matplotlib_logger = logging.getLogger(\"matplotlib\")\n # Default log level for libraries is currently ERROR\n matplotlib_logger.level == logging.ERROR\n\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 31, "vocab_size": 23, "complexity": 1, "nloc": 6, "token_counts": 38, "n_ast_nodes": 72, "n_identifiers": 9, "random_cut": "def test_cli_missing_log_level_default_used():\n \n configure_logging_and_warnings()\n rasa_logger = logging.getLogger(\"rasa\")\n # Default log level is currently INFO\n rasa_logger.level == logging.INFO\n matplotlib_logger = logging.getLogger(\"matplotlib\")\n # Default log level for libraries is currently ERROR\n matplotlib_logger.level == logging.ERROR\n\n" }, { "id": 105458, "commit_id": "6c398c1098feaa6bac2a9ee5cb7dea63ed8dd37b", "repo": "datasets", "path": "tests/test_py_utils.py", "file_name": "test_py_utils.py", "fun_name": "test_asdict", "commit_message": "Fix to dict conversion of `DatasetInfo`/`Features` (#4741)\n\n* Add custom asdict\r\n\r\n* Add test\r\n\r\n* One more test\r\n\r\n* Comment", "code": "def test_asdict():\n input = A(x=1, y=\"foobar\")\n expected_output = {\"x\": 1, \"y\": \"foobar\"}\n assert asdict(input) == expected_output\n\n input = {\"a\": {\"b\": A(x=10, y=\"foo\")}, \"c\": [A(x=20, y=\"bar\")]}\n expected_output = {\"a\": {\"b\": {\"x\": 10, \"y\": \"foo\"}}, \"c\": [{\"x\": 20, \"y\": \"bar\"}]}\n assert asdict(input) == expected_output\n\n with pytest.raises(TypeError):\n asdict([1, A(x=10, y=\"foo\")])\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 74, "n_words": 47, "vocab_size": 30, "complexity": 1, "nloc": 9, "token_counts": 134, "n_ast_nodes": 240, "n_identifiers": 10, "random_cut": "def test_asdict():\n input = A(x=1, y=\"foobar\")\n expected_output = {\"x\": 1, \"y\": \"foobar\"}\n assert asdict(input) == expected_output\n\n " }, { "id": 288190, "commit_id": "75510b8e90162a5b7a530d36d141cbada3df644c", "repo": "core", "path": "homeassistant/components/switchbee/entity.py", "file_name": "entity.py", "fun_name": "_get_coordinator_device", "commit_message": "Add cover platform for switchbee integration (#78383)\n\n* Added Platform cover for switchbee integration\r\n\r\n* added cover to .coveragerc\r\n\r\n* Applied code review feedback from other PR\r\n\r\n* Addressed comments from other PRs\r\n\r\n* rebased\r\n\r\n* Re-add carriage return\r\n\r\n* Update homeassistant/components/switchbee/cover.py\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>\r\n\r\n* Update homeassistant/components/switchbee/cover.py\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>\r\n\r\n* Update homeassistant/components/switchbee/cover.py\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>\r\n\r\n* Update homeassistant/components/switchbee/cover.py\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>\r\n\r\n* addressed CR comments\r\n\r\n* fixes\r\n\r\n* fixes\r\n\r\n* more fixes\r\n\r\n* more fixes\r\n\r\n* separate entities for cover and somfy cover\r\n\r\n* fixed isort\r\n\r\n* more fixes\r\n\r\n* more fixes\r\n\r\n* Update homeassistant/components/switchbee/cover.py\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>\r\n\r\n* Update homeassistant/components/switchbee/cover.py\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>\r\n\r\n* more fixes\r\n\r\n* more fixes\r\n\r\n* more\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>", "code": "def _get_coordinator_device(self) -> _DeviceTypeT:\n return cast(_DeviceTypeT, self.coordinator.data[self._device.id])\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 38, "n_identifiers": 8, "random_cut": "def _get_coordinator_device(self) -> _DeviceTypeT:\n return cast(_DeviceTypeT, self.coordinator.data[self._device.id])\n" }, { "id": 20152, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/idna/uts46data.py", "file_name": "uts46data.py", "fun_name": "_seg_59", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:\n return [\n (0x11FF2, 'X'),\n (0x11FFF, 'V'),\n (0x1239A, 'X'),\n (0x12400, 'V'),\n (0x1246F, 'X'),\n (0x12470, 'V'),\n (0x12475, 'X'),\n (0x12480, 'V'),\n (0x12544, 'X'),\n (0x12F90, 'V'),\n (0x12FF3, 'X'),\n (0x13000, 'V'),\n (0x1342F, 'X'),\n (0x14400, 'V'),\n (0x14647, 'X'),\n (0x16800, 'V'),\n (0x16A39, 'X'),\n (0x16A40, 'V'),\n (0x16A5F, 'X'),\n (0x16A60, 'V'),\n (0x16A6A, 'X'),\n (0x16A6E, 'V'),\n (0x16ABF, 'X'),\n (0x16AC0, 'V'),\n (0x16ACA, 'X'),\n (0x16AD0, 'V'),\n (0x16AEE, 'X'),\n (0x16AF0, 'V'),\n (0x16AF6, 'X'),\n (0x16B00, 'V'),\n (0x16B46, 'X'),\n (0x16B50, 'V'),\n (0x16B5A, 'X'),\n (0x16B5B, 'V'),\n (0x16B62, 'X'),\n (0x16B63, 'V'),\n (0x16B78, 'X'),\n (0x16B7D, 'V'),\n (0x16B90, 'X'),\n (0x16E40, 'M', '𖹠'),\n (0x16E41, 'M', '𖹡'),\n (0x16E42, 'M', '𖹢'),\n (0x16E43, 'M', '𖹣'),\n (0x16E44, 'M', '𖹤'),\n (0x16E45, 'M', '𖹥'),\n (0x16E46, 'M', '𖹦'),\n (0x16E47, 'M', '𖹧'),\n (0x16E48, 'M', '𖹨'),\n (0x16E49, 'M', '𖹩'),\n (0x16E4A, 'M', '𖹪'),\n (0x16E4B, 'M', '𖹫'),\n (0x16E4C, 'M', '𖹬'),\n (0x16E4D, 'M', '𖹭'),\n (0x16E4E, 'M', '𖹮'),\n (0x16E4F, 'M', '𖹯'),\n (0x16E50, 'M', '𖹰'),\n (0x16E51, 'M', '𖹱'),\n (0x16E52, 'M', '𖹲'),\n (0x16E53, 'M', '𖹳'),\n (0x16E54, 'M', '𖹴'),\n (0x16E55, 'M', '𖹵'),\n (0x16E56, 'M', '𖹶'),\n (0x16E57, 'M', '𖹷'),\n (0x16E58, 'M', '𖹸'),\n (0x16E59, 'M', '𖹹'),\n (0x16E5A, 'M', '𖹺'),\n (0x16E5B, 'M', '𖹻'),\n (0x16E5C, 'M', '𖹼'),\n (0x16E5D, 'M', '𖹽'),\n (0x16E5E, 'M', '𖹾'),\n (0x16E5F, 'M', '𖹿'),\n (0x16E60, 'V'),\n (0x16E9B, 'X'),\n (0x16F00, 'V'),\n (0x16F4B, 'X'),\n (0x16F4F, 'V'),\n (0x16F88, 'X'),\n (0x16F8F, 'V'),\n (0x16FA0, 'X'),\n (0x16FE0, 'V'),\n (0x16FE5, 'X'),\n (0x16FF0, 'V'),\n (0x16FF2, 'X'),\n (0x17000, 'V'),\n (0x187F8, 'X'),\n (0x18800, 'V'),\n (0x18CD6, 'X'),\n (0x18D00, 'V'),\n (0x18D09, 'X'),\n (0x1AFF0, 'V'),\n (0x1AFF4, 'X'),\n (0x1AFF5, 'V'),\n (0x1AFFC, 'X'),\n (0x1AFFD, 'V'),\n (0x1AFFF, 'X'),\n (0x1B000, 'V'),\n (0x1B123, 'X'),\n (0x1B150, 'V'),\n (0x1B153, 'X'),\n (0x1B164, 'V'),\n ]\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 548, "n_words": 243, "vocab_size": 146, "complexity": 1, "nloc": 103, "token_counts": 693, "n_ast_nodes": 1068, "n_identifiers": 6, "random_cut": "def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:\n return [\n (0x11FF2, 'X'),\n (0x11FFF, 'V'),\n (0x1239A, 'X'),\n (0x12400, 'V'),\n (0x1246F, 'X'),\n (0x12470, 'V'),\n (0x12475, 'X'),\n (0x12480, 'V'),\n (0x12544, 'X'),\n (0x12F90, 'V'),\n (0x12FF3, 'X'),\n (0x13000, 'V'),\n (0x1342F, 'X'),\n (0x14400, 'V'),\n (0x14647, 'X'),\n (0x16800, 'V'),\n (0x16A39, 'X'),\n (0x16A40, 'V'),\n (0x16A5F, 'X'),\n (0x16A60, 'V'),\n (0x16A6A, 'X'),\n (0x16A6E, 'V'),\n (0x16ABF, 'X'),\n (0x16AC0, 'V'),\n (0x16ACA, 'X'),\n (0x16AD0, 'V'),\n (0x16AEE, 'X'),\n (0x16AF0, 'V'),\n (0x16AF6, 'X'),\n (0x16B00, 'V'),\n (0x16B46, 'X'),\n (0x16B50, 'V'),\n (0x16B5A, 'X'),\n (0x16B5B, 'V'),\n (0x16B62, 'X'),\n (0x16B63, 'V'),\n (0x16B78, 'X'),\n (0x16B7D, 'V'),\n (0x16B90, 'X'),\n (0x16E40, 'M', '𖹠'),\n (0x16E41, 'M', '𖹡'),\n (0x16E42, 'M', '𖹢'),\n (0x16E43, 'M', '𖹣'),\n (0x16E44, 'M', '𖹤'),\n (0x16E45, 'M', '𖹥'),\n (0x16E46, 'M', '𖹦'),\n (0x16E47, 'M', '𖹧'),\n (0x16E48, 'M', '𖹨'),\n (0x16E49, 'M', '𖹩'),\n (0x16E4A, 'M', '𖹪'),\n (0x16E4B, 'M', '𖹫'),\n (0x16E4C, 'M', '𖹬'),\n (0x16E4D, 'M', '𖹭'),\n (0x16E4E, 'M', '𖹮'),\n (0x16E4F, 'M', '𖹯'),\n (0x16E50, 'M', '𖹰'),\n (0x16E51, 'M', '𖹱'),\n (0x16E52, 'M', '𖹲'),\n (0x16E53, 'M', '𖹳'),\n (0x16E54, 'M', '𖹴'),\n (0x16E55, 'M', '𖹵'),\n (0x16E56, 'M', '𖹶'),\n (0x16E57, 'M', '𖹷'),\n (0x16E58, 'M', '𖹸'),\n (0x16E59, 'M', '𖹹'),\n (0x16E5A, 'M', '𖹺'),\n (0x16E5B, 'M', '𖹻'),\n (0x16E5C, 'M', '𖹼'),\n (0x16E5D, 'M', '𖹽'),\n (0x16E5E, 'M', '𖹾'),\n (0x16E5F, 'M', '𖹿'),\n (0x16E60, 'V'),\n (0x16E9B, 'X'),\n (0x16F00, 'V'),\n (0x16F4B, 'X'),\n (0x16F4F, 'V'),\n (0x16F88, 'X'),\n (0x16F8F, 'V'),\n (0x16FA0, 'X'),\n (0x16FE0, 'V'),\n (0x16FE5, 'X'),\n (0x16FF0, 'V'),\n (0x16FF2, 'X'),\n (0x17000, 'V'),\n (0x187F8, 'X'),\n (0x18800, 'V'),\n (0x18CD6, 'X'),\n (0x18D00, 'V'),\n (0x18D09, 'X'),\n (0x1AFF0, 'V'),\n (0x1AFF4, 'X'),\n (0x1AFF5, 'V'),\n (0x1AFFC, 'X'),\n (0x1AFFD, 'V'),\n (0x1AFFF, 'X'),\n (0x1B000, 'V'),\n (0x1B123, 'X'),\n (0x1B150, 'V'),\n (0x1B153, 'X'),\n (0x1B164, 'V'),\n ]\n" }, { "id": 169557, "commit_id": "ac05d29cf8cae186e96c83a03e2e80542ce2ad38", "repo": "pandas", "path": "pandas/tests/scalar/timedelta/test_constructors.py", "file_name": "test_constructors.py", "fun_name": "test_from_pytimedelta_us_reso", "commit_message": "API: Timedelta constructor pytimedelta, Tick preserve reso (#48918)\n\n* BUG: Timedelta.__new__\r\n\r\n* remove assertion\r\n\r\n* GH refs\r\n\r\n* API: Timedelta(td64_obj) retain resolution\r\n\r\n* API: Timedelta constructor pytimedelta, Tick preserve reso\r\n\r\n* remove debugging variable\r\n\r\n* remove duplicate", "code": "def test_from_pytimedelta_us_reso():\n # pytimedelta has microsecond resolution, so Timedelta(pytd) inherits that\n td = timedelta(days=4, minutes=3)\n result = Timedelta(td)\n assert result.to_pytimedelta() == td\n assert result._reso == NpyDatetimeUnit.NPY_FR_us.value\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 26, "vocab_size": 22, "complexity": 1, "nloc": 5, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 12, "random_cut": "def test_from_pytimedelta_us_reso():\n # pytimedelta has microsecond resolution, so Timedelta(pytd) inherits that\n td = timedelta(days=4, minutes=3)\n result = Timedelta(td)\n assert result.to_pytimedelta() == td\n assert result._reso == NpyDate" }, { "id": 300944, "commit_id": "c4fc84ec1e77a18ff392b34389baa86d52388246", "repo": "core", "path": "homeassistant/components/logbook/queries/common.py", "file_name": "common.py", "fun_name": "select_states", "commit_message": "Add support for selecting device_ids from the logbook (#72039)\n\nCo-authored-by: Paulus Schoutsen ", "code": "def select_states() -> Select:\n \n return select(\n *EVENT_COLUMNS_FOR_STATE_SELECT,\n *STATE_COLUMNS,\n NOT_CONTEXT_ONLY,\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 7, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 6, "random_cut": "def select_states() -> Select:\n \n return select(\n *EVENT_COLUMNS_FOR_S" }, { "id": 257337, "commit_id": "738e008020f146ff9820c290311782f515749c48", "repo": "haystack", "path": "test/test_table_reader.py", "file_name": "test_table_reader.py", "fun_name": "test_table_reader_batch_single_query_single_doc_list", "commit_message": "Add `run_batch` method to all nodes and `Pipeline` to allow batch querying (#2481)\n\n* Add run_batch methods for batch querying\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix mypy\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix mypy\r\n\r\n* Fix linter\r\n\r\n* Fix tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix mypy\r\n\r\n* Fix rest api test\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Add Doc strings\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Add batch_size as attribute to nodes supporting batching\r\n\r\n* Adapt error messages\r\n\r\n* Adapt type of filters in retrievers\r\n\r\n* Revert change about truncation_warning in summarizer\r\n\r\n* Unify multiple_doc_lists tests\r\n\r\n* Use smaller models in extractor tests\r\n\r\n* Add return types to JoinAnswers and RouteDocuments\r\n\r\n* Adapt return statements in reader's run_batch method\r\n\r\n* Allow list of filters\r\n\r\n* Adapt error messages\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix tests\r\n\r\n* Fix mypy\r\n\r\n* Adapt print_questions\r\n\r\n* Remove disabling warning about too many public methods\r\n\r\n* Add flag for pylint to disable warning about too many public methods in pipelines/base.py and document_stores/base.py\r\n\r\n* Add type check\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Adapt tutorial 11\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Add query_batch method for DCDocStore\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def test_table_reader_batch_single_query_single_doc_list(table_reader):\n data = {\n \"actors\": [\"brad pitt\", \"leonardo di caprio\", \"george clooney\"],\n \"age\": [\"58\", \"47\", \"60\"],\n \"number of movies\": [\"87\", \"53\", \"69\"],\n \"date of birth\": [\"18 december 1963\", \"11 november 1974\", \"6 may 1961\"],\n }\n table = pd.DataFrame(data)\n\n query = \"When was Di Caprio born?\"\n prediction = table_reader.predict_batch(queries=query, documents=[Document(content=table, content_type=\"table\")])\n # Expected output: List of lists of answers\n assert isinstance(prediction[\"answers\"], list)\n assert isinstance(prediction[\"answers\"][0], list)\n assert isinstance(prediction[\"answers\"][0][0], Answer)\n assert len(prediction[\"answers\"]) == 1 # Predictions for 5 docs\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 135, "n_words": 77, "vocab_size": 66, "complexity": 1, "nloc": 14, "token_counts": 134, "n_ast_nodes": 234, "n_identifiers": 18, "random_cut": "def test_table_reader_batch_single_query_single_doc_list(table_reader):\n data = {\n \"actors\": [\"brad pitt\", \"leonardo di caprio\", \"george clooney\"],\n \"age\": [\"58\", \"47\", \"60\"],\n \"number of movies\": [\"87\", \"53\", \"69\"],\n \"date of birth\": [\"18 december 1963\", \"11 november 1974" }, { "id": 102389, "commit_id": "7b8f73dd32a8a893dfb794433ce501e76c53bc89", "repo": "pytorch", "path": "test/test_nn.py", "file_name": "test_nn.py", "fun_name": "test_conv_modules_raise_error_on_incorrect_input_size", "commit_message": "No-batch-dim support for ConvNd (#70506)\n\nSummary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/70506\n\nTest Plan: Imported from OSS\n\nReviewed By: albanD\n\nDifferential Revision: D33355034\n\nPulled By: jbschlosser\n\nfbshipit-source-id: 5a42645299b1d82cee7d461826acca1c5b35a71c", "code": "def test_conv_modules_raise_error_on_incorrect_input_size(self):\n for dtype in [torch.bfloat16, torch.double, torch.float]:\n modules = [nn.Conv1d(3, 8, 3).to(dtype), nn.ConvTranspose1d(3, 8, 3).to(dtype),\n nn.Conv2d(3, 8, 3).to(dtype), nn.ConvTranspose2d(3, 8, 3).to(dtype),\n nn.Conv3d(3, 8, 3).to(dtype), nn.ConvTranspose3d(3, 8, 3).to(dtype)]\n\n invalid_input_dims = [(1, 4), (1, 4),\n (2, 5), (2, 5),\n (3, 6), (3, 6)]\n\n for invalid_dims, module in zip(invalid_input_dims, modules):\n for dims in invalid_dims:\n input = torch.empty(torch.Size((3, ) * dims))\n self.assertRaises(RuntimeError, lambda: module(input))\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 263, "n_words": 61, "vocab_size": 42, "complexity": 4, "nloc": 12, "token_counts": 208, "n_ast_nodes": 290, "n_identifiers": 26, "random_cut": "def test_conv_modules_raise_error_on_incorrect_input_size(self):\n for dtype in [torch.bfloat16, torch.double, torch.float]:\n modules = [nn.Conv1d(3, 8, 3).to(dtype), nn.ConvTranspose1d(3, 8, 3).to(dtype),\n nn.Conv2d(3, 8, 3).to(dtype), nn.ConvTranspose2d(3, 8, 3).to(dtype),\n nn.Conv3d(3, 8, 3).to(dtype), nn.ConvTranspose3d(3, 8, 3).to(dtype)]\n\n invalid_input_dims = [(1, 4), (1, 4),\n " }, { "id": 270007, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks_test.py", "file_name": "callbacks_test.py", "fun_name": "test_TensorBoard_autoTrace_profileTwiceGraphMode", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_TensorBoard_autoTrace_profileTwiceGraphMode(self):\n tf.compat.v1.disable_eager_execution()\n inp = keras.Input((1,))\n out = keras.layers.Dense(units=1)(inp)\n model = keras.Model(inp, out)\n\n model.compile(gradient_descent.SGD(1), \"mse\")\n\n logdir = os.path.join(self.get_temp_dir(), \"tb1\")\n model.fit(\n np.zeros((64, 1)),\n np.zeros((64, 1)),\n batch_size=32,\n callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=1)],\n )\n # Verifies trace exists in the first logdir.\n self.assertEqual(1, self._count_trace_file(logdir=logdir))\n logdir = os.path.join(self.get_temp_dir(), \"tb2\")\n model.fit(\n np.zeros((64, 1)),\n np.zeros((64, 1)),\n batch_size=32,\n callbacks=[keras.callbacks.TensorBoard(logdir, profile_batch=2)],\n )\n # Verifies trace exists in the second logdir.\n self.assertEqual(1, self._count_trace_file(logdir=logdir))\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 253, "n_words": 61, "vocab_size": 36, "complexity": 1, "nloc": 22, "token_counts": 221, "n_ast_nodes": 340, "n_identifiers": 32, "random_cut": "def test_TensorBoard_autoTrace_profileTwiceGraphMode(self):\n tf.compat.v1.disable_eager_execution()\n inp = keras.In" }, { "id": 155999, "commit_id": "e0d34a54ce4930528bbe3c8ded1d85c0c2be7fe6", "repo": "dask", "path": "dask/dataframe/io/tests/test_hdf.py", "file_name": "test_hdf.py", "fun_name": "test_hdf_empty_dataframe", "commit_message": "If hdf file is empty, don't fail on meta creation (#8809)", "code": "def test_hdf_empty_dataframe():\n pytest.importorskip(\"tables\")\n # https://github.com/dask/dask/issues/8707\n from dask.dataframe.io.hdf import dont_use_fixed_error_message\n\n df = pd.DataFrame({\"A\": [], \"B\": []}, index=[])\n df.to_hdf(\"data.h5\", format=\"fixed\", key=\"df\", mode=\"w\")\n with pytest.raises(TypeError, match=dont_use_fixed_error_message):\n dd.read_hdf(\"data.h5\", \"df\")\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 25, "vocab_size": 25, "complexity": 1, "nloc": 7, "token_counts": 81, "n_ast_nodes": 142, "n_identifiers": 21, "random_cut": "def test_hdf_empty_dataframe():\n pytest.importorskip(\"tables\")\n " }, { "id": 213538, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy/backends/numpy/core/random.py", "file_name": "random.py", "fun_name": "multinomial", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def multinomial(population_size, num_samples, batch_size, probs=None, replace=True, dev=None):\n if probs is None:\n probs = _np.ones((batch_size, population_size,)) / population_size\n orig_probs_shape = list(probs.shape)\n num_classes = orig_probs_shape[-1]\n probs_flat = _np.reshape(probs, (-1, orig_probs_shape[-1]))\n probs_flat = probs_flat / _np.sum(probs_flat, -1, keepdims=True)\n probs_stack = _np.split(probs_flat, probs_flat.shape[0])\n samples_stack = [_np.random.choice(num_classes, num_samples, replace, p=prob[0]) for prob in probs_stack]\n samples_flat = _np.stack(samples_stack)\n return _np.asarray(_np.reshape(samples_flat, orig_probs_shape[:-1] + [num_samples]))\n\n\nrandint = lambda low, high, shape, dev=None: _np.random.randint(low, high, shape)\nseed = lambda seed_value=0: _np.random.seed(seed_value)\nshuffle = _np.random.permutation\n", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 105, "n_words": 75, "vocab_size": 58, "complexity": 3, "nloc": 11, "token_counts": 165, "n_ast_nodes": 311, "n_identifiers": 34, "random_cut": "def multinomial(population_size, num_samples, batch_size, probs=None, replace=True, dev=None):\n if probs is None:\n probs = _np.ones((batch_size, population_size,)) / population_size\n orig_probs_shape = list(probs.shape)\n num_classes = orig_probs_shape[-1]\n probs_flat = _np.reshape(probs, (-1, orig_probs_shape[-1]))\n probs_flat = probs_flat / _np.sum(probs_flat, -1, keepdims=True)\n probs_stack = _np.split(probs_flat, probs_flat.shape[0])\n samples_stack " }, { "id": 2882, "commit_id": "a8c5abf1494356f854a81631b814e5928bc0eb8b", "repo": "PySyft", "path": "tests/integration/smpc/tensor/tensor_abstraction_test.py", "file_name": "tensor_abstraction_test.py", "fun_name": "test_tensor_abstraction_pointer", "commit_message": "Fix requested changes: Replace block_timeout() -> get(timeout)", "code": "def test_tensor_abstraction_pointer(get_clients, op_str) -> None:\n clients = get_clients(3)\n\n op = getattr(operator, op_str)\n\n data_1 = Tensor(child=np.array([[15, 34], [32, 89]], dtype=DEFAULT_INT_NUMPY_TYPE))\n data_2 = Tensor(child=np.array([[567, 98], [78, 25]], dtype=DEFAULT_INT_NUMPY_TYPE))\n data_3 = Tensor(\n child=np.array([[125, 10], [124, 28]], dtype=DEFAULT_INT_NUMPY_TYPE)\n )\n\n tensor_pointer_1 = data_1.send(clients[0])\n tensor_pointer_2 = data_2.send(clients[1])\n tensor_pointer_3 = data_3.send(clients[2])\n\n # creates an MPCTensor between party 1 and party 2\n mpc_1_2 = op(tensor_pointer_1, tensor_pointer_2)\n\n # creates and MPCTensor between party 1,2,3\n mpc_1_2_3 = op(mpc_1_2, tensor_pointer_3)\n\n exp_res = op(data_1, data_2)\n\n assert (mpc_1_2.reconstruct(timeout_secs=40) == exp_res.child).all()\n\n exp_res = op(exp_res, data_3)\n\n assert (mpc_1_2_3.reconstruct(timeout_secs=40) == exp_res.child).all()\n\n\n@pytest.mark.smpc_abstract\n@pytest.mark.parametrize(\"op_str\", [\"add\", \"sub\", \"mul\"])", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "@pytest.mark.smpc_abstract\n@pytest.mark.parametrize(\"op_str\", [\"add\", \"sub\", \"mul\"])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 145, "n_words": 90, "vocab_size": 66, "complexity": 1, "nloc": 17, "token_counts": 213, "n_ast_nodes": 361, "n_identifiers": 30, "random_cut": "def test_tensor_abstraction_pointer(get_clients, op_str) -> None:\n clients = get_clients(3)\n\n op = getattr(operator, op_str)\n\n data_1 = Tensor(child=np.array([[15, 34], [32, 89]], dtype=DEFAULT_INT_NUMPY_TYPE))\n data_2 = Tensor(child=np.array([[567, 98], [78, 25]], dtype=DEFAULT_INT_NUM" }, { "id": 241438, "commit_id": "a51932f920c5f2407827f10b89b5569c27c13b4b", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_figure.py", "file_name": "_figure.py", "fun_name": "select_selections", "commit_message": "upgrade Plotly.js to 2.13.2", "code": "def select_selections(self, selector=None, row=None, col=None, secondary_y=None):\n \n return self._select_annotations_like(\n \"selections\", selector=selector, row=row, col=col, secondary_y=secondary_y\n )\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 45, "n_ast_nodes": 66, "n_identifiers": 7, "random_cut": "def select_selections(self, selector=None, row=None, col=None, secondary_y=None):\n \n return self._select_annotations_like(\n \"selections\", selector=selector, r" }, { "id": 172149, "commit_id": "026a83e06447b749385beddd3d03abe97d48e8f5", "repo": "pandas", "path": "asv_bench/benchmarks/array.py", "file_name": "array.py", "fun_name": "setup", "commit_message": "PERF: ArrowExtensionArray.to_numpy (#49973)", "code": "def setup(self, dtype, hasna):\n N = 100_000\n if dtype == \"boolean[pyarrow]\":\n data = np.random.choice([True, False], N, replace=True)\n elif dtype == \"float64[pyarrow]\":\n data = np.random.randn(N)\n elif dtype == \"int64[pyarrow]\":\n data = np.arange(N)\n elif dtype == \"string[pyarrow]\":\n data = tm.rands_array(10, N)\n elif dtype == \"timestamp[ns][pyarrow]\":\n data = pd.date_range(\"2000-01-01\", freq=\"s\", periods=N)\n else:\n raise NotImplementedError\n\n arr = pd.array(data, dtype=dtype)\n if hasna:\n arr[::2] = pd.NA\n self.arr = arr\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 209, "n_words": 63, "vocab_size": 38, "complexity": 7, "nloc": 18, "token_counts": 134, "n_ast_nodes": 220, "n_identifiers": 22, "random_cut": "def setup(self, dtype, hasna):\n N = 100_000\n if dtype == \"boolean[pyarrow]\":\n data = np.random.choice([True, False], N, replace=True)\n elif dtype == \"float64[pyarrow]\":\n data = np.random.randn(N)\n elif dtype == \"int64[pyarrow]\":\n data = " }, { "id": 143421, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/env/wrappers/tests/test_kaggle_wrapper.py", "file_name": "test_kaggle_wrapper.py", "fun_name": "test_football_env_run_30_steps", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_football_env_run_30_steps(self):\n from ray.rllib.env.wrappers.kaggle_wrapper import KaggleFootballMultiAgentEnv\n\n env = KaggleFootballMultiAgentEnv()\n\n # use the built-in agents in the kaggle environment\n run_right_agent = env.kaggle_env.agents[\"run_right\"]\n do_nothing_agent = env.kaggle_env.agents[\"do_nothing\"]\n\n obs = env.reset()\n self.assertEqual(list(obs.keys()), [\"agent0\", \"agent1\"])\n done = {\"__all__\": False}\n num_steps_completed = 0\n while not done[\"__all__\"] and num_steps_completed <= 30:\n action0 = run_right_agent(structify(obs[\"agent0\"]))[0]\n action1 = do_nothing_agent(structify(obs[\"agent1\"]))[0]\n action_dict = {\"agent0\": action0, \"agent1\": action1}\n obs, _, done, _ = env.step(action_dict)\n num_steps_completed += 1\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 189, "n_words": 65, "vocab_size": 53, "complexity": 3, "nloc": 15, "token_counts": 147, "n_ast_nodes": 245, "n_identifiers": 25, "random_cut": "def test_football_env_run_30_steps(self):\n from ray.rllib.env.wrappers.kaggle_wrapper import KaggleFootballMultiAgentEnv\n\n env = KaggleFootballMultiAgentEnv()\n\n # use the built-in agents in the kaggle environment\n run_right_agent = env.kaggle_env.agents[\"run_right\"]\n do_nothing_agent = env.kaggle_env.agents[\"do_nothing\"]\n\n obs = env.reset()\n self.assertEqual(list(obs.keys()), [\"agent0\", \"agent1\"])\n done = {\"__all__\": False}\n num_steps_completed = 0\n while not done[\"__all__\"] and num_steps_completed <= 30:\n action0 = run_right_agent(structify(obs[\"agent0\"]))[0]\n action1 = do_nothing_agent(structify(obs[\"agent1\"]))[0]\n action_dict = {\"agent0\": action0, \"agent1\": action1}\n obs, _, done" }, { "id": 108365, "commit_id": "1bc33e99efc9e4be433f99c6a74c7e3b30147dac", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_ticker.py", "file_name": "test_ticker.py", "fun_name": "test_bad_locator_subs", "commit_message": "Improve consistency in LogLocator and LogFormatter API", "code": "def test_bad_locator_subs(sub):\n ll = mticker.LogLocator()\n with pytest.raises(ValueError):\n ll.set_params(subs=sub)\n\n\n@pytest.mark.parametrize('numticks', [1, 2, 3, 9])\n@mpl.style.context('default')", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('numticks', [1, 2, 3, 9])\n@mpl.style.context('default')", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 24, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 28, "n_ast_nodes": 93, "n_identifiers": 15, "random_cut": "def test_bad_locator_subs(sub):\n ll = mticker.LogLocator()\n with pytest.raises(ValueError):\n ll.set_params(subs=sub)\n\n\n@pytest.mark.parametrize('numticks', [1, 2, 3, 9])\n@mpl" }, { "id": 130341, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/_kubernetes/config.py", "file_name": "config.py", "fun_name": "_configure_autoscaler_role_binding", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _configure_autoscaler_role_binding(namespace, provider_config):\n binding_field = \"autoscaler_role_binding\"\n if binding_field not in provider_config:\n logger.info(log_prefix + not_provided_msg(binding_field))\n return\n\n binding = provider_config[binding_field]\n if \"namespace\" not in binding[\"metadata\"]:\n binding[\"metadata\"][\"namespace\"] = namespace\n elif binding[\"metadata\"][\"namespace\"] != namespace:\n raise InvalidNamespaceError(binding_field, namespace)\n for subject in binding[\"subjects\"]:\n if \"namespace\" not in subject:\n subject[\"namespace\"] = namespace\n elif subject[\"namespace\"] != namespace:\n raise InvalidNamespaceError(\n binding_field + \" subject '{}'\".format(subject[\"name\"]), namespace\n )\n\n name = binding[\"metadata\"][\"name\"]\n field_selector = \"metadata.name={}\".format(name)\n accounts = (\n auth_api()\n .list_namespaced_role_binding(namespace, field_selector=field_selector)\n .items\n )\n if len(accounts) > 0:\n assert len(accounts) == 1\n logger.info(log_prefix + using_existing_msg(binding_field, name))\n return\n\n logger.info(log_prefix + not_found_msg(binding_field, name))\n auth_api().create_namespaced_role_binding(namespace, binding)\n logger.info(log_prefix + created_msg(binding_field, name))\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 268, "n_words": 95, "vocab_size": 57, "complexity": 8, "nloc": 31, "token_counts": 215, "n_ast_nodes": 360, "n_identifiers": 23, "random_cut": "def _configure_autoscaler_role_binding(namespace, provider_config):\n binding_field = \"autoscaler_role_binding\"\n if binding_field not in provider_config:\n logger.info(log_prefix + not_provided_msg(binding_field))\n return\n\n binding = provider_config[binding_field]\n if \"namespace\" not in binding[\"metadata\"]:\n binding[\"metadata\"][\"namespace\"] = namespace\n elif binding[\"metadata\"][\"namespace\"] != namespace:\n raise InvalidNamespaceError(binding_field, namespace)\n for subject in binding[\"subjects\"]:\n if \"namespace\" not in subject:\n subject[\"namespace\"] = namespace\n elif subject[\"namespace\"] != namespace:\n raise InvalidNamespaceError(\n binding_field + \" subject '{}'\".format(subject[\"name\"]), namespace\n )\n\n name = binding[\"metadata\"][\"name\"]\n field_selector = \"metadata.name={}\".format(name)\n accounts = (\n auth_api()\n .list_namespaced_role_binding(namespace, field_selector=field_selector)\n .items\n )\n " }, { "id": 197842, "commit_id": "af44b30d68265acb25340374b648e198fb5570e7", "repo": "sympy", "path": "sympy/polys/numberfields/tests/test_primes.py", "file_name": "test_primes.py", "fun_name": "test_PrimeIdeal_reduce", "commit_message": "Improve `PrimeIdeal` reduction methods.", "code": "def test_PrimeIdeal_reduce():\n k = QQ.alg_field_from_poly(Poly(x ** 3 + x ** 2 - 2 * x + 8))\n Zk = k.maximal_order()\n P = k.primes_above(2)\n frp = P[2]\n\n # reduce_element\n a = Zk.parent(to_col([23, 20, 11]), denom=6)\n a_bar_expected = Zk.parent(to_col([11, 5, 2]), denom=6)\n a_bar = frp.reduce_element(a)\n assert a_bar == a_bar_expected\n\n # reduce_ANP\n a = k([QQ(11, 6), QQ(20, 6), QQ(23, 6)])\n a_bar_expected = k([QQ(2, 6), QQ(5, 6), QQ(11, 6)])\n a_bar = frp.reduce_ANP(a)\n assert a_bar == a_bar_expected\n\n # reduce_alg_num\n a = k.to_alg_num(a)\n a_bar_expected = k.to_alg_num(a_bar_expected)\n a_bar = frp.reduce_alg_num(a)\n assert a_bar == a_bar_expected\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 143, "n_words": 87, "vocab_size": 48, "complexity": 1, "nloc": 17, "token_counts": 196, "n_ast_nodes": 300, "n_identifiers": 21, "random_cut": "def test_PrimeIdeal_reduce():\n k = QQ.alg_field_from_poly(Poly(x ** 3 + x ** 2 - 2 * x + 8))\n Zk = k.maximal_order()\n P = k.primes_above(2)\n frp = P[2]\n\n # reduce_element\n a = Zk.parent(to_col([23, 20, 11]), denom=6)\n a_bar_expected = Zk.parent(to_col([11, 5, 2]), denom=6)\n a_bar = frp.reduce_element(a)\n assert a_bar == a_bar_expected\n\n # reduce_ANP\n a = k([QQ(11, 6), QQ(20, 6), QQ(23, 6)])\n a_bar_expected = k([QQ(2, 6), QQ(5, 6), QQ(11, 6)])\n a_bar = frp.reduce_ANP(a)\n assert a_bar == a_bar_expected\n\n # reduce_alg_num\n a = k.to_alg_num(a)\n a_bar_expected = k.to_alg_num(a_bar_expected)\n a_" }, { "id": 223281, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/test_register.py", "file_name": "test_register.py", "fun_name": "test_register_invalid_long_description", "commit_message": "add python 3.10.4 for windows", "code": "def test_register_invalid_long_description(self):\n description = ':funkie:`str`' # mimic Sphinx-specific markup\n metadata = {'url': 'xxx', 'author': 'xxx',\n 'author_email': 'xxx',\n 'name': 'xxx', 'version': 'xxx',\n 'long_description': description}\n cmd = self._get_cmd(metadata)\n cmd.ensure_finalized()\n cmd.strict = True\n inputs = Inputs('2', 'tarek', 'tarek@ziade.org')\n register_module.input = inputs\n self.addCleanup(delattr, register_module, 'input')\n\n self.assertRaises(DistutilsSetupError, cmd.run)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 163, "n_words": 43, "vocab_size": 33, "complexity": 1, "nloc": 13, "token_counts": 88, "n_ast_nodes": 164, "n_identifiers": 17, "random_cut": "def test_register_invalid_long_description(self):\n description = ':funkie:`str`' # mimic Sphinx-specific markup\n metadata = {'url': 'xxx', 'author': 'xxx',\n 'author_email': 'xxx',\n 'name': 'xxx', 'version': 'xxx',\n 'long_description': description}\n cmd = self._get_cmd(metadata)\n cmd.ensure_finalized()\n cmd.strict = True\n inputs" }, { "id": 161832, "commit_id": "90a7224ee672ca7f58399f3c8bec9d38341b1423", "repo": "rich", "path": "tests/test_markup.py", "file_name": "test_markup.py", "fun_name": "test_markup_escape", "commit_message": "fix invalid escapes", "code": "def test_markup_escape():\n result = str(render(\"[dim white][url=[/]\"))\n assert result == \"[url=\"\n\n", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 15, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 33, "n_identifiers": 4, "random_cut": "def test_markup_escape():\n result = str(rende" }, { "id": 86506, "commit_id": "e0dddfaa4b466e7eccff4ed075cc319fcc922688", "repo": "sentry", "path": "tests/sentry/lang/javascript/test_processor.py", "file_name": "test_processor.py", "fun_name": "test_no_suspected_console_error", "commit_message": "feat(empty-stacktraces): Tag events with stack traces from JS console errors (#39335)\n\nAdd a new tag, `empty_stacktrace.js_console`, to tag JavaScript console errors.", "code": "def test_no_suspected_console_error(self):\n project = self.create_project()\n release = self.create_release(project=project, version=\"12.31.12\")\n\n data = {\n \"is_exception\": True,\n \"platform\": \"javascript\",\n \"project\": project.id,\n \"exception\": {\n \"values\": [\n {\n \"type\": \"SyntaxError\",\n \"mechanism\": {\n \"type\": \"onerror\",\n },\n \"value\": (\"value\"),\n \"stacktrace\": {\n \"frames\": [\n {\n \"abs_path\": \"http://example.com/foo.js\",\n \"filename\": \"\",\n \"function\": \"name\",\n \"lineno\": 4,\n \"colno\": 0,\n },\n {\n \"abs_path\": \"http://example.com/foo.js\",\n \"filename\": \"\",\n \"function\": \"new name\",\n \"lineno\": 4,\n \"colno\": 0,\n },\n ]\n },\n }\n ]\n },\n }\n\n stacktrace_infos = [\n stacktrace for stacktrace in find_stacktraces_in_data(data, with_exceptions=True)\n ]\n\n processor = JavaScriptStacktraceProcessor(\n data={\"release\": release.version, \"dist\": \"foo\", \"timestamp\": 123.4},\n project=project,\n stacktrace_infos=stacktrace_infos,\n )\n\n frames = processor.get_valid_frames()\n assert processor.suspected_console_errors(frames) is False\n\n processor.tag_suspected_console_errors(frames)\n assert get_tag(processor.data, \"empty_stacktrace.js_console\") is False\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1045, "n_words": 102, "vocab_size": 68, "complexity": 2, "nloc": 49, "token_counts": 211, "n_ast_nodes": 371, "n_identifiers": 20, "random_cut": "def test_no_suspected_console_error(self):\n project = self.create_project()\n release = self.create_release(project=project, version=\"12.31.12\")\n\n data = {\n \"is_exception\": True,\n \"platform\": \"javascript\",\n \"project\": project.id,\n \"exception\": {\n \"values\": [\n {\n \"type\": \"SyntaxError\",\n \"mechanism\": {\n \"type\": \"onerror\",\n },\n \"value\": (\"value\"),\n \"stacktrace\": {\n \"frames\": [\n {\n \"abs_path\": \"http://example.com/foo.js\",\n \"filename\": \"\",\n \"function\": \"name\",\n \"lineno\": 4,\n \"colno\": 0,\n },\n {\n \"abs_path\": \"http://example.com/foo.js\",\n \"filename\": \"\",\n \"function\": \"new name\",\n \"lineno\": 4,\n \"colno\": 0,\n },\n ]\n },\n }\n ]\n },\n }\n\n stacktrace_infos = [\n stacktrace for stacktrace in find_stacktraces_in_data(data, with_exceptions=True)" }, { "id": 178301, "commit_id": "56eb59d93f13815e66d0dea07e7669dfe275fa10", "repo": "Nuitka", "path": "nuitka/importing/Importing.py", "file_name": "Importing.py", "fun_name": "locateModule", "commit_message": "Plugins: Massive cleanup and API improvements and Kivy support\n\n* Added method to locate a DLL and to create a DLL entry point\n as expected, removing need for imports and making it more\n clear as an API.\n\n* The location of modules had already an API, but it wasn'\n used where it could be.\n\n* Moved implicit imports and DLL usage for Gi to its plugin,\n solving a TODO for it.\n\n* Make sure sure to only yield, and not return, that is just\n more error prone.\n\n* Also allow generators for implicit dependencies, such that\n generators work in a yield from fashion.\n\n* With this, Kivy apps work on at least Linux.", "code": "def locateModule(module_name, parent_package, level):\n \n module_package, module_filename, finding = findModule(\n module_name=module_name,\n parent_package=parent_package,\n level=level,\n )\n\n assert module_package is None or (\n type(module_package) is ModuleName and module_package != \"\"\n ), repr(module_package)\n\n if module_filename is not None:\n module_filename = os.path.normpath(module_filename)\n\n module_name, module_kind = getModuleNameAndKindFromFilename(module_filename)\n\n assert module_kind is not None, module_filename\n\n module_name = ModuleName.makeModuleNameInPackage(module_name, module_package)\n\n return module_name, module_filename, finding\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 131, "n_words": 54, "vocab_size": 39, "complexity": 4, "nloc": 15, "token_counts": 100, "n_ast_nodes": 151, "n_identifiers": 17, "random_cut": "def locateModule(module_name, parent_package, level):\n \n module_package, module_filename, finding = findModule(\n module_name=module_name,\n parent_package=parent_package,\n level=level,\n )\n\n assert module_package is None or (\n type(module_package) is ModuleName and module_package != \"\"\n ), repr(module_package)\n\n if module_filename is not None:\n module_filename = os.path.normpath(module_filename)\n\n module_name, module_kind = getModuleNameAndKindFromFilename(module_filename)\n\n assert" }, { "id": 140389, "commit_id": "692335440b10b487641641d71413d4c03c85a362", "repo": "ray", "path": "python/ray/ml/tests/test_torch_predictor.py", "file_name": "test_torch_predictor.py", "fun_name": "test_predict_dataframe", "commit_message": "[AIR] Directly convert `TorchPredictor` `ndarray` inputs to tensors (#25190)\n\nIf you pass a multidimensional input to `TorchPredictor.predict`, AIR errors. For more information about the error, see #25194.\r\n\r\nCo-authored-by: Amog Kamsetty ", "code": "def test_predict_dataframe():\n predictor = TorchPredictor(model=torch.nn.Linear(2, 1, bias=False))\n\n data_batch = pd.DataFrame({\"X0\": [0.0, 0.0, 0.0], \"X1\": [0.0, 0.0, 0.0]})\n predictions = predictor.predict(data_batch, dtype=torch.float)\n\n assert len(predictions) == 3\n assert predictions.to_numpy().flatten().tolist() == [0.0, 0.0, 0.0]\n\n\n@pytest.mark.parametrize(\n (\"input_dtype\", \"expected_output_dtype\"),\n (\n (torch.float16, np.float16),\n (torch.float64, np.float64),\n (torch.int32, np.int32),\n (torch.int64, np.int64),\n ),\n)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n (\"input_dtype\", \"expected_output_dtype\"),\n (\n (torch.float16, np.float16),\n (torch.float64, np.float64),\n (torch.int32, np.int32),\n (torch.int64, np.int64),\n ),\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 94, "n_words": 45, "vocab_size": 37, "complexity": 1, "nloc": 6, "token_counts": 114, "n_ast_nodes": 228, "n_identifiers": 27, "random_cut": "def test_predict_dataframe():\n predictor = TorchPredictor(model=torch.nn.Linear(2, 1, bias=False))\n\n data_batch = pd.DataFrame({\"X0\": [0.0, 0.0, 0.0], \"X1\": [0.0, 0.0, 0.0]})\n predictions = predictor.predict(data_batch, dtype=torch.float)\n\n assert len(predictions) == 3\n assert predictions.to_numpy().flatten().tolist() == [0.0, 0.0, 0.0]\n\n\n@pytest.mark.parametrize(\n (\"input_dtype\"," }, { "id": 183689, "commit_id": "55543479ad3049c6f9d1507d034c7c5bedf3981a", "repo": "textual", "path": "src/textual/message_pump.py", "file_name": "message_pump.py", "fun_name": "_process_messages", "commit_message": "combine updates, cache arrangements", "code": "async def _process_messages(self) -> None:\n \n _rich_traceback_guard = True\n\n while not self._closed:\n try:\n message = await self.get_message()\n except MessagePumpClosed:\n break\n except CancelledError:\n raise\n except Exception as error:\n raise error from None\n\n # Combine any pending messages that may supersede this one\n while not (self._closed or self._closing):\n try:\n pending = self.peek_message()\n except MessagePumpClosed:\n break\n if pending is None or not message.can_replace(pending):\n break\n try:\n message = await self.get_message()\n except MessagePumpClosed:\n break\n\n try:\n await self.dispatch_message(message)\n except CancelledError:\n raise\n except Exception as error:\n self.app.on_exception(error)\n break\n finally:\n if self._message_queue.empty():\n if not self._closed:\n event = events.Idle(self)\n for _cls, method in self._get_dispatch_methods(\n \"on_idle\", event\n ):\n try:\n await invoke(method, event)\n except Exception as error:\n self.app.on_exception(error)\n break\n\n log(\"CLOSED\", self)\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 814, "n_words": 109, "vocab_size": 60, "complexity": 18, "nloc": 43, "token_counts": 192, "n_ast_nodes": 330, "n_identifiers": 27, "random_cut": "async def _process_messages(self) -> None:\n \n _rich_traceback_guard = True\n\n while not self._closed:\n try:\n message = await self.get_message()\n except MessagePumpClosed" }, { "id": 110787, "commit_id": "756eb1e539aff1aa7c9a73c42b527c6b6f204419", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_contour.py", "file_name": "test_contour.py", "fun_name": "test_contour_no_args", "commit_message": "Support only positional args for data in contour", "code": "def test_contour_no_args():\n fig, ax = plt.subplots()\n data = [[0, 1], [1, 0]]\n with pytest.raises(TypeError, match=r\"contour\\(\\) takes from 1 to 4\"):\n ax.contour(Z=data)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 36, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 49, "n_ast_nodes": 78, "n_identifiers": 12, "random_cut": "def test_contour_no_args():\n fig, ax = plt.subplots()\n data = [[0, 1], [1, 0]]\n with pytest.raises(TypeError, match=r\"contour\\(\\) takes from 1 to 4\"):\n ax.contour" }, { "id": 255088, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/case/node/stringnormalizer.py", "file_name": "stringnormalizer.py", "fun_name": "export_monday_casesensintive_lower", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def export_monday_casesensintive_lower() -> None:\n input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(object)\n output = np.array([u'tuesday', u'wednesday', u'thursday']).astype(object)\n stopwords = [u'monday']\n\n node = onnx.helper.make_node(\n 'StringNormalizer',\n inputs=['x'],\n outputs=['y'],\n case_change_action='LOWER',\n is_case_sensitive=1,\n stopwords=stopwords\n )\n expect(node, inputs=[input], outputs=[output], name='test_strnormalizer_export_monday_casesensintive_lower')\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 139, "n_words": 32, "vocab_size": 27, "complexity": 1, "nloc": 13, "token_counts": 113, "n_ast_nodes": 178, "n_identifiers": 18, "random_cut": "def export_monday_casesensintive_lower() -> None:\n input = np.array([u'monday', u'tuesday', u'wednesday', u'thursday']).astype(object)\n output = np.array([u'tuesday', u'wednesday', u'thursday']" }, { "id": 44086, "commit_id": "928dafe6c495bbf3e03d14473753fce915134a46", "repo": "airflow", "path": "tests/www/views/test_views_tasks.py", "file_name": "test_views_tasks.py", "fun_name": "test_dag_details_trigger_origin_dag_details_view", "commit_message": "Return to the same place when triggering a DAG (#20955)", "code": "def test_dag_details_trigger_origin_dag_details_view(app, admin_client):\n app.dag_bag.get_dag('test_graph_view').create_dagrun(\n run_type=DagRunType.SCHEDULED,\n execution_date=DEFAULT_DATE,\n data_interval=(DEFAULT_DATE, DEFAULT_DATE),\n start_date=timezone.utcnow(),\n state=State.RUNNING,\n )\n\n url = 'dag_details?dag_id=test_graph_view'\n resp = admin_client.get(url, follow_redirects=True)\n params = {'dag_id': 'test_graph_view', 'origin': '/dag_details?dag_id=test_graph_view'}\n href = f\"/trigger?{html.escape(urllib.parse.urlencode(params))}\"\n check_content_in_response(href, resp)\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 84, "n_words": 29, "vocab_size": 26, "complexity": 1, "nloc": 13, "token_counts": 87, "n_ast_nodes": 164, "n_identifiers": 30, "random_cut": "def test_dag_details_trigger_origin_dag_details_view(app, admin_client):\n app.dag_bag.get_dag('test_graph_view').create_da" }, { "id": 122115, "commit_id": "aed46f3312c970de257afbeb6cd775e79dd8e04e", "repo": "jax", "path": "jax/_src/random.py", "file_name": "random.py", "fun_name": "_truncated_normal", "commit_message": "[typing] use jax.Array annotations in random.py", "code": "def _truncated_normal(key, lower, upper, shape, dtype) -> Array:\n if shape is None:\n shape = lax.broadcast_shapes(np.shape(lower), np.shape(upper))\n else:\n _check_shape(\"truncated_normal\", shape, np.shape(lower), np.shape(upper))\n\n sqrt2 = np.array(np.sqrt(2), dtype)\n lower = lax.convert_element_type(lower, dtype)\n upper = lax.convert_element_type(upper, dtype)\n a = lax.erf(lower / sqrt2)\n b = lax.erf(upper / sqrt2)\n if not jnp.issubdtype(dtype, np.floating):\n raise TypeError(\"truncated_normal only accepts floating point dtypes.\")\n u = uniform(key, shape, dtype, minval=a, maxval=b)\n out = sqrt2 * lax.erf_inv(u)\n # Clamp the value to the open interval (lower, upper) to make sure that\n # rounding (or if we chose `a` for `u`) doesn't push us outside of the range.\n return jnp.clip(\n out,\n lax.nextafter(lax.stop_gradient(lower), np.array(np.inf, dtype=dtype)),\n lax.nextafter(lax.stop_gradient(upper), np.array(-np.inf, dtype=dtype)))\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 141, "n_words": 105, "vocab_size": 82, "complexity": 3, "nloc": 18, "token_counts": 221, "n_ast_nodes": 336, "n_identifiers": 32, "random_cut": "def _truncated_normal(key, lower, upper, shape, dtype) -> Array:\n if shape is None:\n shape = lax.broadcast_shapes(np.shape(lower), np.shape(upper))\n else:\n _check_shape(\"truncated_normal\", shape, np.shape(lower), np.shape(upper))\n\n sqrt2 = np.array(np.sqrt(2), dtype)\n lower = lax.convert_element_type(lower, dtype)\n upper = lax.convert_element_type(upper, dtype)\n a = lax.erf(lower / sqrt2)\n b = lax.erf(upper / sqrt2)\n if not jnp.issubdtype(dtype, np.floating):\n raise TypeError(\"truncated_normal only accepts floating point dtypes.\")\n u = uniform(key, shape, dtype, minval=a, maxval=b)\n out = sqrt2 * lax.erf_inv(u)\n # Clamp the value to the open interval (lower, upper) to make sure that\n # rounding (or if we chose `a` for `u`) doesn't push us outside of the range.\n return jnp.clip(\n out,\n lax.nextafter(lax.stop_gradient(lower)," }, { "id": 62572, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py", "file_name": "__init__.py", "fun_name": "getTreeBuilder", "commit_message": "upd; format", "code": "def getTreeBuilder(treeType, implementation=None, **kwargs):\n \n\n treeType = treeType.lower()\n if treeType not in treeBuilderCache:\n if treeType == \"dom\":\n from . import dom\n # Come up with a sane default (pref. from the stdlib)\n if implementation is None:\n from xml.dom import minidom\n implementation = minidom\n # NEVER cache here, caching is done in the dom submodule\n return dom.getDomModule(implementation, **kwargs).TreeBuilder\n elif treeType == \"lxml\":\n from . import etree_lxml\n treeBuilderCache[treeType] = etree_lxml.TreeBuilder\n elif treeType == \"etree\":\n from . import etree\n if implementation is None:\n implementation = default_etree\n # NEVER cache here, caching is done in the etree submodule\n return etree.getETreeModule(implementation, **kwargs).TreeBuilder\n else:\n raise ValueError( % treeType)\n return treeBuilderCache.get(treeType)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 320, "n_words": 103, "vocab_size": 56, "complexity": 7, "nloc": 20, "token_counts": 123, "n_ast_nodes": 211, "n_identifiers": 17, "random_cut": "def getTreeBuilder(treeType, implementation=None, **kwargs):\n \n\n treeType = treeType.lower()\n if treeType not in treeBuilderCache:\n if treeType == \"dom\":\n from . import dom\n # Come up with a sane default (pref. from the stdlib)\n if implementation is None:\n from xml.dom import minidom\n implementation = minidom\n # NEVER cache here, caching is done in the dom submodule\n return dom.getDomModule(implementation, **kwargs).TreeBuilder\n elif treeType == \"lxml\":\n from . import etree_lxml\n treeBuilderCache[treeType] = etree_lxml.TreeBuilder\n elif treeT" }, { "id": 260449, "commit_id": "a0623cec4a253ce3b5c5e4cf3b080651c84a53a9", "repo": "scikit-learn", "path": "sklearn/linear_model/_quantile.py", "file_name": "_quantile.py", "fun_name": "fit", "commit_message": "MAINT Param validation for QuantileRegressor (#23808)\n\nCo-authored-by: jeremie du boisberranger ", "code": "def fit(self, X, y, sample_weight=None):\n \n self._validate_params()\n X, y = self._validate_data(\n X,\n y,\n accept_sparse=[\"csc\", \"csr\", \"coo\"],\n y_numeric=True,\n multi_output=False,\n )\n sample_weight = _check_sample_weight(sample_weight, X)\n\n n_features = X.shape[1]\n n_params = n_features\n\n if self.fit_intercept:\n n_params += 1\n # Note that centering y and X with _preprocess_data does not work\n # for quantile regression.\n\n # The objective is defined as 1/n * sum(pinball loss) + alpha * L1.\n # So we rescale the penalty term, which is equivalent.\n alpha = np.sum(sample_weight) * self.alpha\n\n if self.solver == \"warn\":\n warnings.warn(\n \"The default solver will change from 'interior-point' to 'highs' in \"\n \"version 1.4. Set `solver='highs'` or to the desired solver to silence \"\n \"this warning.\",\n FutureWarning,\n )\n solver = \"interior-point\"\n elif self.solver in (\n \"highs-ds\",\n \"highs-ipm\",\n \"highs\",\n ) and sp_version < parse_version(\"1.6.0\"):\n raise ValueError(\n f\"Solver {self.solver} is only available \"\n f\"with scipy>=1.6.0, got {sp_version}\"\n )\n else:\n solver = self.solver\n\n if solver == \"interior-point\" and sp_version >= parse_version(\"1.11.0\"):\n raise ValueError(\n f\"Solver {solver} is not anymore available in SciPy >= 1.11.0.\"\n )\n\n if sparse.issparse(X) and solver not in [\"highs\", \"highs-ds\", \"highs-ipm\"]:\n raise ValueError(\n f\"Solver {self.solver} does not support sparse X. \"\n \"Use solver 'highs' for example.\"\n )\n # make default solver more stable\n if self.solver_options is None and solver == \"interior-point\":\n solver_options = {\"lstsq\": True}\n else:\n solver_options = self.solver_options\n\n # After rescaling alpha, the minimization problem is\n # min sum(pinball loss) + alpha * L1\n # Use linear programming formulation of quantile regression\n # min_x c x\n # A_eq x = b_eq\n # 0 <= x\n # x = (s0, s, t0, t, u, v) = slack variables >= 0\n # intercept = s0 - t0\n # coef = s - t\n # c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n)\n # residual = y - X@coef - intercept = u - v\n # A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n))\n # b_eq = y\n # p = n_features\n # n = n_samples\n # 1_n = vector of length n with entries equal one\n # see https://stats.stackexchange.com/questions/384909/\n #\n # Filtering out zero sample weights from the beginning makes life\n # easier for the linprog solver.\n indices = np.nonzero(sample_weight)[0]\n n_indices = len(indices) # use n_mask instead of n_samples\n if n_indices < len(sample_weight):\n sample_weight = sample_weight[indices]\n X = _safe_indexing(X, indices)\n y = _safe_indexing(y, indices)\n c = np.concatenate(\n [\n np.full(2 * n_params, fill_value=alpha),\n sample_weight * self.quantile,\n sample_weight * (1 - self.quantile),\n ]\n )\n if self.fit_intercept:\n # do not penalize the intercept\n c[0] = 0\n c[n_params] = 0\n\n if solver in [\"highs\", \"highs-ds\", \"highs-ipm\"]:\n # Note that highs methods always use a sparse CSC memory layout internally,\n # even for optimization problems parametrized using dense numpy arrays.\n # Therefore, we work with CSC matrices as early as possible to limit\n # unnecessary repeated memory copies.\n eye = sparse.eye(n_indices, dtype=X.dtype, format=\"csc\")\n if self.fit_intercept:\n ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype))\n A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format=\"csc\")\n else:\n A_eq = sparse.hstack([X, -X, eye, -eye], format=\"csc\")\n else:\n eye = np.eye(n_indices)\n if self.fit_intercept:\n ones = np.ones((n_indices, 1))\n A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1)\n else:\n A_eq = np.concatenate([X, -X, eye, -eye], axis=1)\n\n b_eq = y\n\n result = linprog(\n c=c,\n A_eq=A_eq,\n b_eq=b_eq,\n method=solver,\n options=solver_options,\n )\n solution = result.x\n if not result.success:\n failure = {\n 1: \"Iteration limit reached.\",\n 2: \"Problem appears to be infeasible.\",\n 3: \"Problem appears to be unbounded.\",\n 4: \"Numerical difficulties encountered.\",\n }\n warnings.warn(\n \"Linear programming for QuantileRegressor did not succeed.\\n\"\n f\"Status is {result.status}: \"\n + failure.setdefault(result.status, \"unknown reason\")\n + \"\\n\"\n + \"Result message of linprog:\\n\"\n + result.message,\n ConvergenceWarning,\n )\n\n # positive slack - negative slack\n # solution is an array with (params_pos, params_neg, u, v)\n params = solution[:n_params] - solution[n_params : 2 * n_params]\n\n self.n_iter_ = result.nit\n\n if self.fit_intercept:\n self.coef_ = params[1:]\n self.intercept_ = params[0]\n else:\n self.coef_ = params\n self.intercept_ = 0.0\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 2114, "n_words": 631, "vocab_size": 332, "complexity": 18, "nloc": 111, "token_counts": 655, "n_ast_nodes": 1110, "n_identifiers": 64, "random_cut": "def fit(self, X, y, sample_weight=None):\n \n self._validate_params()\n X, y = self._validate_data(\n X,\n y,\n accept_sparse=[\"csc\", \"csr\", \"coo\"],\n y_numeric=True,\n multi_output=False,\n )\n sample_weight = _check_sample_weight(sample_weight, X)\n\n n_features = X.shape[1]\n n_params = n_features\n\n if self.fit_intercept:\n n_params += 1\n # Note that centering y and X with _preprocess_data does not work\n # for quantile regression.\n\n # The objective is defined as 1/n * sum(pinball loss) + alpha * L1.\n # So we rescale the penalty term, which is equivalent.\n alpha = np.sum(sample_weight) * self.alpha\n\n if self.solver == \"warn\":\n warnings.warn(\n \"The default solver will change from 'interior-point' to 'highs' in \"\n \"version 1.4. Set `solver='highs'` or to the desired solver to silence \"\n \"this warning.\",\n FutureWarning,\n )\n solver = \"interior-point\"\n elif self.solver in (\n \"highs-ds\",\n \"highs-ipm\",\n \"highs\",\n ) and sp_version < parse_version(\"1.6.0\"):\n raise ValueError(\n f\"Solver {self.solver} is only available \"\n f\"with scipy>=1.6.0, got {sp_version}\"\n )\n else:\n solver = self.solver\n\n if solver == \"interior-point\" and sp_version >= parse_version(\"1.11.0\"):\n raise ValueError(\n f\"Solver {solver} is not anymore available in SciPy >= 1.11.0.\"\n )\n\n if sparse.issparse(X) and solver not in [\"highs\", \"highs-ds\", \"highs-ipm\"]:\n raise ValueError(\n f\"Solver {self.solver} does not support sparse X. \"\n \"Use solver 'highs' for example.\"\n )\n # make default solver more stable\n if self.solver_options is None and solver == \"interior-point\":\n solver_options = {\"lstsq\": True}\n else:\n solver_options = self.solver_options\n\n # After rescaling alpha, the minimization problem is\n # min sum(pinb" }, { "id": 261542, "commit_id": "63f92d4adb61aed58d656544cc6caa9d68cb6065", "repo": "scikit-learn", "path": "sklearn/datasets/tests/test_lfw.py", "file_name": "test_lfw.py", "fun_name": "setup_module", "commit_message": "MAINT bump up CI dependencies (#24803)\n\n[scipy-dev] [pypy]", "code": "def setup_module():\n \n Image = pytest.importorskip(\"PIL.Image\")\n\n global SCIKIT_LEARN_DATA, SCIKIT_LEARN_EMPTY_DATA, LFW_HOME\n\n SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix=\"scikit_learn_lfw_test_\")\n LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, \"lfw_home\")\n\n SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix=\"scikit_learn_empty_test_\")\n\n if not os.path.exists(LFW_HOME):\n os.makedirs(LFW_HOME)\n\n random_state = random.Random(42)\n np_rng = np.random.RandomState(42)\n\n # generate some random jpeg files for each person\n counts = {}\n for name in FAKE_NAMES:\n folder_name = os.path.join(LFW_HOME, \"lfw_funneled\", name)\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n\n n_faces = np_rng.randint(1, 5)\n counts[name] = n_faces\n for i in range(n_faces):\n file_path = os.path.join(folder_name, name + \"_%04d.jpg\" % i)\n uniface = np_rng.randint(0, 255, size=(250, 250, 3))\n img = Image.fromarray(uniface.astype(np.uint8))\n img.save(file_path)\n\n # add some random file pollution to test robustness\n with open(os.path.join(LFW_HOME, \"lfw_funneled\", \".test.swp\"), \"wb\") as f:\n f.write(b\"Text file to be ignored by the dataset loader.\")\n\n # generate some pairing metadata files using the same format as LFW\n with open(os.path.join(LFW_HOME, \"pairsDevTrain.txt\"), \"wb\") as f:\n f.write(b\"10\\n\")\n more_than_two = [name for name, count in counts.items() if count >= 2]\n for i in range(5):\n name = random_state.choice(more_than_two)\n first, second = random_state.sample(range(counts[name]), 2)\n f.write((\"%s\\t%d\\t%d\\n\" % (name, first, second)).encode())\n\n for i in range(5):\n first_name, second_name = random_state.sample(FAKE_NAMES, 2)\n first_index = np_rng.choice(np.arange(counts[first_name]))\n second_index = np_rng.choice(np.arange(counts[second_name]))\n f.write(\n (\n \"%s\\t%d\\t%s\\t%d\\n\"\n % (first_name, first_index, second_name, second_index)\n ).encode()\n )\n\n with open(os.path.join(LFW_HOME, \"pairsDevTest.txt\"), \"wb\") as f:\n f.write(b\"Fake place holder that won't be tested\")\n\n with open(os.path.join(LFW_HOME, \"pairs.txt\"), \"wb\") as f:\n f.write(b\"Fake place holder that won't be tested\")\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 564, "n_words": 208, "vocab_size": 131, "complexity": 9, "nloc": 45, "token_counts": 460, "n_ast_nodes": 764, "n_identifiers": 53, "random_cut": "def setup_module():\n \n Image = pytest.importorskip(\"PIL.Image\")\n\n global SCIKIT_LEARN_DATA, SCIKIT_LEARN_EMPTY_DATA, LFW_HOME\n\n SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix=\"scikit_learn_lfw_test_\")\n LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, \"lfw_home\")\n\n SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix=\"scikit_learn_empty_test_\")\n\n if not os.path.exists(LFW_HOME):\n os.makedirs(LFW_HOME)\n\n random_state = random.Random(42)\n np_rng = np.random.RandomState(42)\n\n # generate some random jpeg files for each person\n counts = {}\n for name in FAKE_NAMES:\n folder_name = os.path.join(LFW_HOME, \"lfw_funneled\", name)\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n\n n_faces = np_rng.randint(1, 5)\n counts[name] = n_faces\n for i in range(n_faces):\n file_path = os.path.join(folder_name, name + \"_%04d.jpg\" % i)\n uniface = np_rng.randint(0, 255, size=(250, 250, 3))\n img = Image.fromarray(uniface.astype(np.uint8))\n img.save(file_path)\n\n # add some random file pollution to test robustness\n with open(os.path.join(LFW_HOME, \"lfw_funneled\", \".test.swp\"), \"wb\") as f:\n f.write(b\"Text file to be ignored by the dataset loader.\")\n\n # generate some pairing metadata files using the same format as LFW\n with open(os.path.join(LFW_HOME, \"pairsDevTrain.txt\"), \"wb\") as f:\n f.write(b\"10\\n\")\n more_than_two = [name for name, count in counts.items() if count >= 2]\n for i in range(5):\n name = random_state.choice(more_than_two)\n first, second = random_state.sample(range(counts[name]), 2)\n f.write((\"%s\\t%d\\t%d\\n\" % (name, first, second)).encode())\n\n for i in range(5):\n first_name, second_name = random_state.sample(FAKE_NAMES, 2)\n first_index = np_rng.choice(np.arange(counts[first_name]))\n second_index = np_rng.choice(np.arange(counts[second_name]))\n f.write(\n (\n \"%s\\t%d\\t%s\\t%d\\n\"\n % (first_name, first_index, second_name, second_index)\n ).encode()\n )\n\n with open(os.path.join(LFW_HOME, \"pairsDevTest.txt\"), \"wb\") as f:\n f.write(b\"Fake place holder that won't b" }, { "id": 313041, "commit_id": "bd920aa43de584f6a4db934902d64b39aabbd6d6", "repo": "core", "path": "tests/components/plugwise/conftest.py", "file_name": "conftest.py", "fun_name": "mock_stretch", "commit_message": "Cleanup existing Plugwise tests and test fixtures (#66282)\n\n* Cleanup existing Plugwise tests and test fixtures\r\n\r\n* More cleanup", "code": "def mock_stretch() -> Generator[None, MagicMock, None]:\n \n chosen_env = \"stretch_v31\"\n with patch(\n \"homeassistant.components.plugwise.gateway.Smile\", autospec=True\n ) as smile_mock:\n smile = smile_mock.return_value\n\n smile.gateway_id = \"259882df3c05415b99c2d962534ce820\"\n smile.heater_id = None\n smile.smile_version = \"3.1.11\"\n smile.smile_type = \"stretch\"\n smile.smile_hostname = \"stretch98765\"\n smile.smile_name = \"Stretch\"\n\n smile.connect.return_value = True\n smile.async_update.return_value = _read_json(chosen_env, \"all_data\")\n\n yield smile\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 135, "n_words": 47, "vocab_size": 37, "complexity": 1, "nloc": 16, "token_counts": 85, "n_ast_nodes": 161, "n_identifiers": 20, "random_cut": "def mock_stretch() -> Generator[None, MagicMock, None]:\n \n chosen_env = \"stretch_v31\"\n with patch(\n \"homeassistant.components.plugwise.gateway.Smile\", autospec=True\n ) as smile_mock:\n smile = smile_mock.return_value\n\n smile.gateway_id = \"259882df3c" }, { "id": 178587, "commit_id": "e188ede8767cda1750cd41c08bed82c00888aebe", "repo": "Nuitka", "path": "nuitka/utils/FileOperations.py", "file_name": "FileOperations.py", "fun_name": "withPreserveFileMode", "commit_message": "macOS: Proper adhoc signing of created distribution\n\n* With this homebrew works on M1 and macOS 12", "code": "def withPreserveFileMode(filenames):\n if type(filenames) is str:\n filenames = [filenames]\n\n old_modes = {}\n for filename in filenames:\n old_modes[filename] = os.stat(filename).st_mode\n\n yield\n\n for filename in filenames:\n os.chmod(filename, old_modes[filename])\n\n\n@contextmanager", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "@contextmanager", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 61, "n_words": 27, "vocab_size": 21, "complexity": 4, "nloc": 9, "token_counts": 57, "n_ast_nodes": 94, "n_identifiers": 11, "random_cut": "def withPreserveFileMode(filenames):\n if type(filenames) is str:\n filenames = [filenames]\n\n old_modes = {}\n for filename in filenames:\n old_modes[filename] = os.stat(filename).st_mode\n\n yield\n" }, { "id": 281475, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/economy/fred/prediction/pred_controller.py", "file_name": "pred_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n id_string = \"\"\n for s_id, sub_dict in self.current_series.items():\n id_string += f\" [cyan]{s_id.upper()}[/cyan] : {sub_dict['title']}\"\n help_string = f\n console.print(help_string)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 69, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 22, "token_counts": 36, "n_ast_nodes": 96, "n_identifiers": 12, "random_cut": "def print_help(self):\n \n id_string = \"\"\n for s_id, sub_dict in self.current_series.items():\n id_string += f\" [cyan]{s_id.upper()}[/cyan]" }, { "id": 155297, "commit_id": "eb99c500a40c5565012e3fe83c5e6ef333d1b487", "repo": "modin", "path": "modin/test/interchange/dataframe_protocol/hdk/test_protocol.py", "file_name": "test_protocol.py", "fun_name": "test_zero_copy_export_for_primitives", "commit_message": "REFACTOR-#5303: Fix code scanning alert - Unused local variable (#5304)\n\nSigned-off-by: Myachev \r\nCo-authored-by: Mahesh Vashishtha ", "code": "def test_zero_copy_export_for_primitives(data_has_nulls):\n \n data = get_data_of_all_types(\n has_nulls=data_has_nulls, include_dtypes=[\"int\", \"uint\", \"float\"]\n )\n at = pa.Table.from_pydict(data)\n\n md_df = from_arrow(at)\n protocol_df = md_df.__dataframe__(allow_copy=False)\n\n for i, col in enumerate(protocol_df.get_columns()):\n col_arr, _ = primitive_column_to_ndarray(col)\n\n exported_ptr = col_arr.__array_interface__[\"data\"][0]\n producer_ptr = at.column(i).chunks[0].buffers()[-1].address\n # Verify that the pointers of produce and exported objects point to the same data\n assert producer_ptr == exported_ptr\n\n # Can't export `md_df` zero-copy no more as it has delayed 'fillna' operation\n md_df = md_df.fillna({\"float32\": 32.0})\n non_zero_copy_protocol_df = md_df.__dataframe__(allow_copy=False)\n\n with pytest.raises(RuntimeError):\n primitive_column_to_ndarray(\n non_zero_copy_protocol_df.get_column_by_name(\"float32\")\n )\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 178, "n_words": 78, "vocab_size": 62, "complexity": 2, "nloc": 18, "token_counts": 151, "n_ast_nodes": 252, "n_identifiers": 35, "random_cut": "def test_zero_copy_export_for_primitives(data_has_nulls):\n \n data = get_data_of_all_types(\n has_nulls=data_has_nulls, include_dtypes=[\"int\", \"uint\", \"float\"]\n )\n at = pa.Table.from_pydict(data)\n\n md_df = from_arrow(at)\n protocol_df = md_df.__dataframe__(allow_copy=False)\n\n for i, col in enumerate(protocol_df.get_columns()):\n col_arr, _ = pri" }, { "id": 179102, "commit_id": "fa7fddca2869dec8fb1c7c9691fb77f1cc8805b6", "repo": "DeepFaceLive", "path": "apps/DeepFaceLive/backend/CameraSource.py", "file_name": "CameraSource.py", "fun_name": "on_cs_device_idx_selected", "commit_message": "CameraSource now shows names of video input devices in Windows", "code": "def on_cs_device_idx_selected(self, device_idx, device_name):\n cs, state = self.get_control_sheet(), self.get_state()\n if state.device_idx != device_idx:\n state.device_idx = device_idx\n self.save_state()\n if self.is_started():\n self.restart()\n", "url": "https://github.com/iperov/DeepFaceLive.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 81, "n_words": 20, "vocab_size": 17, "complexity": 3, "nloc": 7, "token_counts": 53, "n_ast_nodes": 87, "n_identifiers": 11, "random_cut": "def on_cs_device_idx_selected(self, device_idx, device_name):\n cs, state = self.get_control_sheet(), self.get_state()\n if state.device_idx != device_idx:\n state.device_idx = devic" }, { "id": 76211, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/users/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_user_can_delete_other_superuser", "commit_message": "Reformat with black", "code": "def test_user_can_delete_other_superuser(self):\n response = self.client.get(\n reverse(\"wagtailusers_users:delete\", args=(self.superuser.pk,))\n )\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"wagtailusers/users/confirm_delete.html\")\n\n response = self.client.post(\n reverse(\"wagtailusers_users:delete\", args=(self.superuser.pk,))\n )\n # Should redirect back to index\n self.assertRedirects(response, reverse(\"wagtailusers_users:index\"))\n\n # Check that the user was deleted\n users = get_user_model().objects.filter(email=\"testsuperuser@email.com\")\n self.assertEqual(users.count(), 0)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 136, "n_words": 38, "vocab_size": 31, "complexity": 1, "nloc": 12, "token_counts": 108, "n_ast_nodes": 179, "n_identifiers": 20, "random_cut": "def test_user_can_delete_other_superuser(self):\n response = self.client.get(\n reverse(" }, { "id": 172506, "commit_id": "d8f5bdea6df3a0217f49062d4209cedc80caad0e", "repo": "calibre-web", "path": "cps/gdriveutils.py", "file_name": "gdriveutils.py", "fun_name": "moveGdriveFolderRemote", "commit_message": "Refactor rename author/title on gdrive", "code": "def moveGdriveFolderRemote(origin_file, target_folder):\n drive = getDrive(Gdrive.Instance().drive)\n previous_parents = \",\".join([parent[\"id\"] for parent in origin_file.get('parents')])\n children = drive.auth.service.children().list(folderId=previous_parents).execute()\n gFileTargetDir = getFileFromEbooksFolder(None, target_folder)\n if not gFileTargetDir or gFileTargetDir['title'] != target_folder:\n # Folder is not existing, create, and move folder\n drive.auth.service.files().patch(fileId=origin_file['id'],\n body={'title': target_folder},\n fields='title').execute()\n #gFileTargetDir = drive.CreateFile(\n # {'title': target_folder, 'parents': [{\"kind\": \"drive#fileLink\", 'id': getEbooksFolderId()}],\n # \"mimeType\": \"application/vnd.google-apps.folder\"})\n #gFileTargetDir.Upload()\n else:\n # Move the file to the new folder\n drive.auth.service.files().update(fileId=origin_file['id'],\n addParents=gFileTargetDir['id'],\n removeParents=previous_parents,\n fields='id, parents').execute()\n # if previous_parents has no children anymore, delete original fileparent\n if len(children['items']) == 1:\n deleteDatabaseEntry(previous_parents)\n drive.auth.service.files().delete(fileId=previous_parents).execute()\n\n", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 388, "n_words": 85, "vocab_size": 69, "complexity": 5, "nloc": 17, "token_counts": 192, "n_ast_nodes": 328, "n_identifiers": 30, "random_cut": "def moveGdriveFolderRemote(origin_file, target_folder):\n drive = getDrive(Gdrive.Instance().drive)\n previous_parents = \",\".join([parent[\"id\"] for parent in origin_file.get('parents')])\n children = drive.auth.service.children().list(folderId=previous_parents).execute()\n gFileTargetDir = getFileFromEbooksFolder(None, target_folder)\n if not gFileTargetDir or gFileTargetDir['title'] != target_folder:\n # Folder is not existing, create, and move folder\n drive.auth.service.files().patch(fileId=origin_file['id'],\n body={'title': target_folder},\n fields='title').execute()\n #gFileTargetDir = drive.CreateFile(\n # {'title': target_folder, 'parents': [{\"kind\": \"drive#fileLink\", 'id': getEbooksFolderId()}],\n # \"mimeType\": \"application/vnd.google-apps.folder\"})\n #gFi" }, { "id": 290489, "commit_id": "9ded2325223de3918e3f69aab8732487323b2214", "repo": "core", "path": "tests/components/hassio/test_addon_manager.py", "file_name": "test_addon_manager.py", "fun_name": "stop_addon_fixture", "commit_message": "Move zwave_js addon manager to hassio integration (#81354)", "code": "def stop_addon_fixture() -> Generator[AsyncMock, None, None]:\n \n with patch(\n \"homeassistant.components.hassio.addon_manager.async_stop_addon\"\n ) as stop_addon:\n yield stop_addon\n\n\n@pytest.fixture(name=\"create_backup\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"create_backup\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 37, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 24, "n_ast_nodes": 62, "n_identifiers": 8, "random_cut": "def stop_addon_fixture() -> Generator[AsyncMock, None, None]:\n \n " }, { "id": 6750, "commit_id": "5209b1aed23a98c092a0e2682ed13b7f61623e20", "repo": "ludwig", "path": "ludwig/features/audio_feature.py", "file_name": "audio_feature.py", "fun_name": "preprocessing_defaults", "commit_message": "Torchaudio fixes (#2007)\n\n* hotfix for shape broadcast issue\r\n\r\n* Reverted [1] index on padded audio feature, set up test for feature creation observation\r\n\r\n* Changed default audio type since raw takes too long\r\n\r\n* Removed debug code", "code": "def preprocessing_defaults():\n return {\n \"audio_file_length_limit_in_s\": 7.5,\n \"missing_value_strategy\": BACKFILL,\n \"in_memory\": True,\n \"padding_value\": 0,\n \"norm\": None,\n \"audio_feature\": {\n TYPE: \"fbank\",\n \"window_length_in_s\": 0.04,\n \"window_shift_in_s\": 0.02,\n \"num_filter_bands\": 80,\n },\n }\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 176, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 14, "token_counts": 54, "n_ast_nodes": 85, "n_identifiers": 3, "random_cut": "def preprocessing_defaults():\n return {\n \"audio_file_length_limit_in_s\": 7.5,\n \"missing_value_strategy\": BACKFILL,\n \"in_memory\": True,\n \"padding_value\": 0,\n \"norm\": None,\n \"audio_feature\": {\n TYPE: \"fbank\",\n \"window_length_in_s\": 0.04,\n \"window_shift_in_s\": 0.02,\n \"num_filter_bands\": 80,\n " }, { "id": 224959, "commit_id": "ad43f1b8b7c5280dd8679af1d9624eed3b1bce1b", "repo": "mkdocs", "path": "mkdocs/config/config_options.py", "file_name": "config_options.py", "fun_name": "run_validation", "commit_message": "Add `edit_uri_template` config", "code": "def run_validation(self, value):\n try:\n return self.Template(self.Formatter(), value)\n except Exception as e:\n raise ValidationError(e)\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 48, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 32, "n_ast_nodes": 52, "n_identifiers": 8, "random_cut": "def run_validation(self, value):\n try" }, { "id": 253135, "commit_id": "a308d3dabcbb938a79902bfd02cf2be0b711c308", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/http/test_http3.py", "file_name": "test_http3.py", "fun_name": "__rshift__", "commit_message": "[quic] first test for H3", "code": "def __rshift__(self, e):\n if isinstance(e, collections.abc.Iterable):\n for e_i in e:\n super().__rshift__(e_i)\n else:\n super().__rshift__(e)\n return self\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 72, "n_words": 15, "vocab_size": 15, "complexity": 3, "nloc": 7, "token_counts": 44, "n_ast_nodes": 71, "n_identifiers": 9, "random_cut": "def __rshift__(self, e):\n if isinstance(e, collections.abc.Iterable):\n for e_i in e:\n super().__rshift__(e_i)\n else:\n " }, { "id": 321809, "commit_id": "ed19d7f58b2664bb310c7cb6b52c5b9a06ea60b2", "repo": "qutebrowser", "path": "tests/unit/browser/test_qutescheme.py", "file_name": "test_qutescheme.py", "fun_name": "prepare_config", "commit_message": "Add --include-hidden for :config-diff\n\nNeeded it for debugging, so why not implement it properly.\nTODO: Changelog, pick to master?", "code": "def prepare_config(self, config_stub):\n config_stub.set_obj(\n \"content.javascript.enabled\",\n True,\n pattern=urlmatch.UrlPattern(\"chrome-devtools://*\"),\n hide_userconfig=True,\n )\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 66, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 7, "token_counts": 29, "n_ast_nodes": 46, "n_identifiers": 8, "random_cut": "def prepare_config(self, config_stub):\n config_stub.set_obj(\n \"content.javascript.enabled\",\n True,\n pattern=urlmatch.UrlPattern(\"" }, { "id": 116224, "commit_id": "d58879d9595ca64a4d9ef22ec81c3d3b9d4de864", "repo": "mindsdb", "path": "tests/unit/test_mongodb_server.py", "file_name": "test_mongodb_server.py", "fun_name": "unload_module", "commit_message": "fix test concurrency: imported executor_commands didn't allow to mock SQLQuery inside MongoServer", "code": "def unload_module(path):\n # remove all modules started with path\n import sys\n to_remove = []\n for module_name in sys.modules:\n if module_name.startswith(path):\n to_remove.append(module_name)\n for module_name in to_remove:\n sys.modules.pop(module_name)\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 26, "vocab_size": 23, "complexity": 4, "nloc": 8, "token_counts": 45, "n_ast_nodes": 74, "n_identifiers": 9, "random_cut": "def unload_module(path):\n # remove all modules started with path\n import sys\n to_remove = []\n for module_name in sys.modules:\n if module_na" }, { "id": 214899, "commit_id": "98cbbaffd1bc0c191951e0b09c4f9ff8e083a61c", "repo": "flair", "path": "tests/embedding_test_utils.py", "file_name": "embedding_test_utils.py", "fun_name": "test_keep_batch_order", "commit_message": "unify embedding tests", "code": "def test_keep_batch_order(self):\n embeddings = self.create_embedding_with_args(self.default_args)\n embedding_names = embeddings.get_names()\n\n sentences_1 = [Sentence(\"First sentence\"), Sentence(\"This is second sentence\")]\n sentences_2 = [Sentence(\"This is second sentence\"), Sentence(\"First sentence\")]\n\n embeddings.embed(sentences_1)\n embeddings.embed(sentences_2)\n\n assert sentences_1[0].to_original_text() == \"First sentence\"\n assert sentences_1[1].to_original_text() == \"This is second sentence\"\n\n if self.is_document_embedding:\n assert (\n torch.norm(\n sentences_1[0].get_embedding(embedding_names) - sentences_2[1].get_embedding(embedding_names)\n )\n == 0.0\n )\n assert (\n torch.norm(\n sentences_1[1].get_embedding(embedding_names) - sentences_2[0].get_embedding(embedding_names)\n )\n == 0.0\n )\n if self.is_token_embedding:\n for i in range(len(sentences_1[0])):\n assert (\n torch.norm(\n sentences_1[0][i].get_embedding(embedding_names)\n - sentences_2[1][i].get_embedding(embedding_names)\n )\n == 0.0\n )\n for i in range(len(sentences_1[1])):\n assert (\n torch.norm(\n sentences_1[1][i].get_embedding(embedding_names)\n - sentences_2[0][i].get_embedding(embedding_names)\n )\n == 0.0\n )\n del embeddings\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 628, "n_words": 92, "vocab_size": 48, "complexity": 5, "nloc": 40, "token_counts": 258, "n_ast_nodes": 396, "n_identifiers": 20, "random_cut": "def test_keep_batch_order(self):\n embeddings " }, { "id": 69025, "commit_id": "5c0a25012c602ed0d47136468e3b0bee11ddf5dd", "repo": "erpnext", "path": "erpnext/loan_management/doctype/loan_balance_adjustment/loan_balance_adjustment.py", "file_name": "loan_balance_adjustment.py", "fun_name": "validate", "commit_message": "feat: add adjustment amount to loan\n- fix: bugs in loan balance adjustment", "code": "def validate(self):\n if self.amount == 0:\n frappe.throw(_(\"Amount cannot be zero\"))\n if self.amount < 0:\n frappe.throw(_(\"Amount cannot be negative\"))\n self.set_missing_values()\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 61, "n_words": 19, "vocab_size": 13, "complexity": 3, "nloc": 6, "token_counts": 42, "n_ast_nodes": 74, "n_identifiers": 7, "random_cut": "def validate(self):\n if self.amount == 0:\n frappe.throw(_(\"Amount cannot be zero\"))\n if self.amount < 0:\n frappe.throw(_(\"Amount cannot be negative\"))\n self.set_missing_values()\n" }, { "id": 188559, "commit_id": "a930f3aab3b7b084a5fb4b60fd1b8722fee890be", "repo": "jumpserver", "path": "apps/users/serializers/user.py", "file_name": "user.py", "fun_name": "save_and_set_custom_m2m_fields", "commit_message": "fix: 修复创建更新用户给定默认权限", "code": "def save_and_set_custom_m2m_fields(self, validated_data, save_handler, created):\n m2m_values = {}\n for f, default_roles in self.custom_m2m_fields.items():\n roles = validated_data.pop(f, None)\n if created and not roles:\n roles = [\n Role.objects.filter(id=role.id).first()\n for role in default_roles\n ]\n m2m_values[f] = roles\n\n instance = save_handler(validated_data)\n for field_name, value in m2m_values.items():\n if value is None:\n continue\n field = getattr(instance, field_name)\n field.set(value)\n return instance\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 237, "n_words": 54, "vocab_size": 39, "complexity": 7, "nloc": 17, "token_counts": 113, "n_ast_nodes": 175, "n_identifiers": 24, "random_cut": "def save_and_set_custom_m2m_fields(self, validated_data, save_handler, created):\n m2m_values = {}\n for f, default_roles in self.custom_m2m_fields.items():\n roles = validated_data.pop(f, None)\n if created and not roles:\n roles = [\n Role.objects.filter(id=role.id).first()\n " }, { "id": 137357, "commit_id": "3a1bee28a19e81416ec2f2112cb6dcbc6e7ab845", "repo": "ray", "path": "python/ray/air/checkpoint.py", "file_name": "checkpoint.py", "fun_name": "to_bytes", "commit_message": "[AIR] `Checkpoint` improvements (#30948)\n\nBoston dataset (used in tests) is/will be removed from sklearn.\r\n\r\nSigned-off-by: Antoni Baum \r\nCo-authored-by: Balaji Veeramani ", "code": "def to_bytes(self) -> bytes:\n \n # Todo: Add support for stream in the future (to_bytes(file_like))\n data_dict = self.to_dict()\n if \"bytes_data\" in data_dict:\n return data_dict[\"bytes_data\"]\n return pickle.dumps(data_dict)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 71, "n_words": 25, "vocab_size": 23, "complexity": 2, "nloc": 10, "token_counts": 32, "n_ast_nodes": 59, "n_identifiers": 7, "random_cut": "def to_bytes(self) -> bytes:\n " }, { "id": 35928, "commit_id": "040c11f6dac72bc3088498aa19184da677563424", "repo": "transformers", "path": "tests/maskformer/test_feature_extraction_maskformer.py", "file_name": "test_feature_extraction_maskformer.py", "fun_name": "get_fake_maskformer_outputs", "commit_message": "Tests for MaskFormerFeatureExtractor's post_process*** methods (#15929)\n\n* proper tests for post_process*** methods in feature extractor\r\n\r\n* mask th == 0\r\n\r\n* Update tests/maskformer/test_feature_extraction_maskformer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* make style\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def get_fake_maskformer_outputs(self):\n return MaskFormerForInstanceSegmentationOutput(\n # +1 for null class\n class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1)),\n masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width)),\n )\n\n\n@require_torch\n@require_vision", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@require_torch\n@require_vision", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 65, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 57, "n_ast_nodes": 90, "n_identifiers": 14, "random_cut": "def get_fake_maskformer_outputs(self):\n return MaskFormerForInstanceSegmentationOutput(\n # +1 for n" }, { "id": 119733, "commit_id": "8f93629e8780148511ad60d3f7e034ebf4319d9b", "repo": "jax", "path": "jax/_src/numpy/linalg.py", "file_name": "linalg.py", "fun_name": "_promote_arg_dtypes", "commit_message": "remove `_convert_element_type` from public `jax.lax` module", "code": "def _promote_arg_dtypes(*args):\n \n dtype, weak_type = dtypes._lattice_result_type(*args)\n if not jnp.issubdtype(dtype, jnp.inexact):\n dtype, weak_type = jnp.float_, False\n dtype = dtypes.canonicalize_dtype(dtype)\n args = [lax_internal._convert_element_type(arg, dtype, weak_type)\n for arg in args]\n if len(args) == 1:\n return args[0]\n else:\n return args\n\n\n@_wraps(np.linalg.cholesky)\n@jit", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_wraps(np.linalg.cholesky)\n@jit", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 61, "n_words": 38, "vocab_size": 29, "complexity": 4, "nloc": 11, "token_counts": 83, "n_ast_nodes": 150, "n_identifiers": 20, "random_cut": "def _promote_arg_dtypes(*args):\n \n dtype, weak_type = dtypes._lattice_result_type(*args)\n if not jnp.issubdtype(dtype, jnp.inexact):\n dtype, weak_type = jnp.float_, False\n dtype = dtypes.canonicalize_dtype(dtype)\n args = [lax_internal._convert_element_type(arg, dtype, weak_type)\n" }, { "id": 263260, "commit_id": "64ccb7aea824fbec57f7ed1bbe483ec486183c13", "repo": "pyinstaller", "path": "bootloader/waflib/Tools/c_config.py", "file_name": "c_config.py", "fun_name": "check", "commit_message": "Bootloader: Building: Unpack waf's lib archive.\n\nDoing so makes it easier to modify. This is a temporary measure until the next\nwaf version is released (although I'm tempted to keep it since it's much more\nIDE completion friendly).", "code": "def check(self, *k, **kw):\n self.validate_c(kw)\n self.start_msg(kw['msg'], **kw)\n ret = None\n try:\n ret = self.run_build(*k, **kw)\n except self.errors.ConfigurationError:\n self.end_msg(kw['errmsg'], 'YELLOW', **kw)\n if Logs.verbose > 1:\n raise\n else:\n self.fatal('The configuration failed')\n else:\n kw['success'] = ret\n ret = self.post_check(*k, **kw)\n if not ret:\n self.end_msg(kw['errmsg'], 'YELLOW', **kw)\n self.fatal('The configuration failed %r' % ret)\n else:\n self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw)\n return ret\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 162, "n_words": 55, "vocab_size": 36, "complexity": 4, "nloc": 21, "token_counts": 152, "n_ast_nodes": 256, "n_identifiers": 16, "random_cut": "def check(self, *k, **kw):\n self.validate_c(kw)\n self.start_msg(kw['msg'], **kw)\n ret = None\n try:\n ret = self.run_build(*k, **kw)\n except self.errors.ConfigurationError:\n self.end_msg(kw['errmsg'], 'YELLOW', **kw)\n if Logs.verbose > 1:\n raise\n else:\n self.fatal('The configuration failed')\n else:\n " }, { "id": 284196, "commit_id": "8994168e45c91698e4fd20c862e11c9b55e0d03b", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/test_keys_controller.py", "file_name": "test_keys_controller.py", "fun_name": "test_call_bitquery", "commit_message": "openbb_terminal tests: coverage (61% -> 65%) (#1664)\n\n* Attempting more tests\r\n\r\n* Began adding tests\r\n\r\n* Added keys controller tests\r\n\r\n* Added tests for settings and econometrics controller\r\n\r\n* Prevented tests from changing .env files\r\n\r\n* Fixed issues with tests\r\n\r\n* Added econometrics tests\r\n\r\n* fixed pylint issues\r\n\r\n* Added folder\r\n\r\n* Added QA tests\r\n\r\n* Fixed qa tests\r\n\r\n* Fixed qa tests\r\n\r\n* Fixed tests\r\n\r\n* Removed skip\r\n\r\n* Added mock to tests\r\n\r\n* Update helper_funcs.py\r\n\r\nFix any windows path issues\r\n\r\n* Update helper_funcs.py\r\n\r\noops forgot import lol\r\n\r\n* Skipped display_hist\r\n\r\n* expanded tests\r\n\r\n* Maybe mocked matplotlib everywhere\r\n\r\nCo-authored-by: teh_coderer ", "code": "def test_call_bitquery(other):\n controller.call_bitquery(other)\n\n\n@pytest.mark.parametrize(\"other\", [[], [\"-k\", \"1234\"], [\"1234\"]])", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"other\", [[], [\"-k\", \"1234\"], [\"1234\"]])", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 9, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 61, "n_identifiers": 7, "random_cut": "def test_call_bitquery(other):\n controller.call_bitquery(other)\n\n\n@pytest.mark.parametr" }, { "id": 106051, "commit_id": "c78559cacbb0ca6e0bc8bfc313cc0359f8c23ead", "repo": "datasets", "path": "src/datasets/features/features.py", "file_name": "features.py", "fun_name": "encode_example", "commit_message": "Clean up remaining Main Classes docstrings (#5349)\n\nclean up docstrings", "code": "def encode_example(self, example):\n \n example = cast_to_python_objects(example)\n return encode_nested_example(self, example)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 35, "n_identifiers": 5, "random_cut": "def encode_example(self, example):\n \n example = cast_to_python_objects(example)\n return encode_nested_" }, { "id": 207686, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_basic_edit_GET", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_basic_edit_GET(self):\n \n response = self.client.get(\n reverse(\"admin:admin_views_section_change\", args=(self.s1.pk,))\n )\n self.assertIsInstance(response, TemplateResponse)\n self.assertEqual(response.status_code, 200)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 58, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 78, "n_identifiers": 13, "random_cut": "def test_basic_edit_GET(self):\n \n response = self.client.get(\n reverse(\"admin:admin_views_section_change\", args=(self.s1.pk,))\n )\n self.assertIsInstance(response, TemplateResponse)\n self.assertEqual(response" }, { "id": 49857, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/transforms.py", "file_name": "transforms.py", "fun_name": "_setup_angle", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def _setup_angle(x, name, req_sizes=(2, )):\n if isinstance(x, numbers.Number):\n if x < 0:\n raise ValueError(f\"If {name} is a single number, it must be positive.\")\n x = [-x, x]\n else:\n _check_sequence_input(x, name, req_sizes)\n\n return [float(d) for d in x]\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 77, "n_words": 37, "vocab_size": 33, "complexity": 4, "nloc": 8, "token_counts": 64, "n_ast_nodes": 100, "n_identifiers": 11, "random_cut": "def _setup_angle(x, name, req_sizes=(2, )):\n if isinstance(x, num" }, { "id": 293194, "commit_id": "1358aed01641292805c94e0d3c50ec097df86746", "repo": "core", "path": "homeassistant/components/yale_smart_alarm/lock.py", "file_name": "lock.py", "fun_name": "async_lock", "commit_message": "Code cleanup yale_smart_alarm (#67701)", "code": "async def async_lock(self, **kwargs) -> None:\n \n return await self.async_set_lock(\"locked\", None)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 38, "n_identifiers": 4, "random_cut": "async def async_lock(self, **kwargs) -> None:\n \n return await self.as" }, { "id": 96338, "commit_id": "36196bd7178783a0b78c8bda39b76e4fa6b1a5e6", "repo": "sentry", "path": "tests/acceptance/test_organization_group_index.py", "file_name": "test_organization_group_index.py", "fun_name": "test_resolve_issues_multi_projects", "commit_message": "feat(ui): Remove issues from issue stream when action taken (#31701)\n\n* feat(ui): Remove issues from issue stream when action taken\r\n\r\nRemove issues from the issue stream when an action like Resolve, Ignore, or Delete is taken on the issue. This will remove the issue from the stream and bring in an issue from the next page (if applicable) and will not refresh the page.\r\n\r\nFIXES WOR-1588\r\n\r\n* refactor so resolved, ignored, and deleted issues are removed immediately\r\n\r\n* refactor marked reviewed group id removal\r\n\r\n* add acceptance tests", "code": "def test_resolve_issues_multi_projects(self, mock_now):\n mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)\n self.create_issues()\n\n group1 = self.event_a.group\n\n with self.feature(\"organizations:global-views\"):\n self.page.visit_issue_list(self.org.slug)\n self.page.wait_for_stream()\n\n self.page.select_issue(1)\n self.page.resolve_issues()\n\n group1.update(status=GroupStatus.RESOLVED)\n\n self.page.wait_for_issue_removal()\n groups = self.browser.elements('[data-test-id=\"event-issue-header\"]')\n\n assert len(groups) == 1\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 140, "n_words": 25, "vocab_size": 23, "complexity": 1, "nloc": 13, "token_counts": 113, "n_ast_nodes": 190, "n_identifiers": 31, "random_cut": "def test_resolve_issues_multi_projects(self, mock_now):\n mock_now.return_value = datetime.utcnow().replace(tzinfo=pytz.utc)\n self.create_issues()\n\n group1 = self.event_a.group\n\n with self.feature(\"organizations:global-views\"):\n self.page.visit_issue_list(self.org.slug)\n self.page.wait_for_stream()\n\n self.page.select_issue(1)\n self.page.resolve_issues()\n\n group1.update(status=GroupStatus.RESOLVED)\n\n self.page.wait_f" }, { "id": 281382, "commit_id": "f77ad02d24c0ecf515a9f42c128d0c3158cc7d27", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/portfolio/portfolio_analysis/yfinance_model.py", "file_name": "yfinance_model.py", "fun_name": "get_country", "commit_message": "Step towards portfolio allocation analysis (#1134)\n\n* Making pa active + pa minor features\r\n\r\nMakes pa actice and adds country to the df. The groupby command also gets percents of holding allocation. It also fixes warnings and prepares for a later pr that I'm currently working on.\r\n\r\n* Fix linting\r\n\r\n* black linter\r\n\r\n* Fixes\r\n\r\nShould fix everything\r\n\r\n* Linting\r\n\r\n* Making pa controller to base class standard\r\n\r\n* Fix linting\r\n\r\nCo-authored-by: DidierRLopes ", "code": "def get_country(ticker):\n country = \"NA\"\n data = yf.utils.get_json(f\"https://finance.yahoo.com/quote/{ticker}\")\n\n if \"summaryProfile\" in data:\n country = data[\"summaryProfile\"][\"country\"]\n if country not in financedatabase_model.get_countries():\n similar_cmd = difflib.get_close_matches(\n country,\n financedatabase_model.get_countries(),\n n=1,\n cutoff=0.7,\n )\n if similar_cmd:\n country = similar_cmd[0]\n return country\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 168, "n_words": 35, "vocab_size": 24, "complexity": 4, "nloc": 15, "token_counts": 79, "n_ast_nodes": 131, "n_identifiers": 14, "random_cut": "def get_country(ticker):\n country = \"NA\"\n data = yf.utils.get_json(f\"https://finance.yahoo.com/quote/{ticker}\")\n\n if \"summaryProfile\" in data:\n country = data[\"summaryProfile\"][\"country\"]\n if country not in financedatabase_model.get_countries():\n similar_cmd = difflib.get_close_matches(\n country,\n financedatabase_model.get_cou" }, { "id": 181987, "commit_id": "fd47ef491b7700a4414d85bf573f1e719cfae555", "repo": "textual", "path": "tests/test_css_parse.py", "file_name": "test_css_parse.py", "fun_name": "test_parse_offset_composite_rule", "commit_message": "Separate parsing of scalar, number, duration", "code": "def test_parse_offset_composite_rule(offset_x, parsed_x, offset_y, parsed_y):\n css = f\n stylesheet = Stylesheet()\n stylesheet.parse(css)\n\n styles = stylesheet.rules[0].styles\n\n assert len(stylesheet.rules) == 1\n assert stylesheet.rules[0].errors == []\n assert styles.offset.x == parsed_x\n assert styles.offset.y == parsed_y\n\n\n@pytest.mark.parametrize(\n \"offset_x, parsed_x, offset_y, parsed_y\",\n [\n [\n \"-5.5%\",\n Scalar(-5.5, Unit.PERCENT, Unit.WIDTH),\n \"-30%\",\n Scalar(-30, Unit.PERCENT, Unit.HEIGHT),\n ],\n [\n \"5%\",\n Scalar(5, Unit.PERCENT, Unit.WIDTH),\n \"40%\",\n Scalar(40, Unit.PERCENT, Unit.HEIGHT),\n ],\n [\n \"10\",\n Scalar(10, Unit.CELLS, Unit.WIDTH),\n \"40\",\n Scalar(40, Unit.CELLS, Unit.HEIGHT),\n ],\n ],\n)", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"offset_x, parsed_x, offset_y, parsed_y\",\n [\n [\n \"-5.5%\",\n Scalar(-5.5, Unit.PERCENT, Unit.WIDTH),\n \"-30%\",\n Scalar(-30, Unit.PERCENT, Unit.HEIGHT),\n ],\n [\n \"5%\",\n Scalar(5, Unit.PERCENT, Unit.WIDTH),\n \"40%\",\n Scalar(40, Unit.PERCENT, Unit.HEIGHT),\n ],\n [\n \"10\",\n Scalar(10, Unit.CELLS, Unit.WIDTH),\n \"40\",\n Scalar(40, Unit.CELLS, Unit.HEIGHT),\n ],\n ],\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 273, "n_words": 69, "vocab_size": 44, "complexity": 1, "nloc": 12, "token_counts": 73, "n_ast_nodes": 286, "n_identifiers": 25, "random_cut": "def test_parse_offset_composite_rule(offset_x, parsed_x, offset_y, parsed_y):\n css = f\n stylesheet = Stylesheet()\n stylesheet.parse(css)\n\n styles = stylesheet.rules[0].styles\n\n assert len(stylesheet.rules) == 1\n assert stylesheet.rules[0].errors == []\n assert styles.offset.x == parsed_x\n assert styles.offset.y == parsed_y\n\n\n@pytest.mark.parametrize(\n \"offset_x, parsed_x, offset_y, parsed_y\",\n [\n [\n \"-5.5%\",\n Scalar(-5.5, Unit.PERCENT, Unit.WIDTH),\n \"-30%\",\n Scalar(-30, Unit.PERCENT, Unit.HEIGHT),\n ],\n [\n \"5%\",\n Scalar(5, Unit.PERCENT, Unit.WIDTH),\n \"40%\",\n Scalar(40, Unit.PERCENT, Unit.HEIGHT),\n ],\n [\n \"10\",\n Scalar(10, Unit.CELLS, Unit.WIDTH),\n \"40\",\n Scalar(40, Unit.CELLS, Unit.HEIGHT),\n ]" }, { "id": 257083, "commit_id": "96a538b18238ce723208cd18a1c11034ee5a90d1", "repo": "haystack", "path": "haystack/modeling/model/adaptive_model.py", "file_name": "adaptive_model.py", "fun_name": "convert_to_transformers", "commit_message": "Pylint (import related warnings) and REST API improvements (#2326)\n\n* remove duplicate imports\r\n\r\n* fix ungrouped-imports\r\n\r\n* Fix wrong-import-position\r\n\r\n* Fix unused-import\r\n\r\n* pyproject.toml\r\n\r\n* Working on wrong-import-order\r\n\r\n* Solve wrong-import-order\r\n\r\n* fix Pool import\r\n\r\n* Move open_search_index_to_document_store and elasticsearch_index_to_document_store in elasticsearch.py\r\n\r\n* remove Converter from modeling\r\n\r\n* Fix mypy issues on adaptive_model.py\r\n\r\n* create es_converter.py\r\n\r\n* remove converter import\r\n\r\n* change import path in tests\r\n\r\n* Restructure REST API to not rely on global vars from search.apy and improve tests\r\n\r\n* Fix openapi generator\r\n\r\n* Move variable initialization\r\n\r\n* Change type of FilterRequest.filters\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def convert_to_transformers(self):\n \n converted_models = []\n\n # convert model for each prediction head\n for prediction_head in self.prediction_heads:\n if len(prediction_head.layer_dims) != 2:\n logger.error(\n f\"Currently conversion only works for PredictionHeads that are a single layer Feed Forward NN with dimensions [LM_output_dim, number_classes].\\n\"\n f\" Your PredictionHead has {str(prediction_head.layer_dims)} dimensions.\"\n )\n continue\n if prediction_head.model_type == \"span_classification\":\n transformers_model = self._convert_to_transformers_qa(prediction_head)\n converted_models.append(transformers_model)\n else:\n logger.error(\n f\"Haystack -> Transformers conversion is not supported yet for\"\n f\" prediction heads of type {prediction_head.model_type}\"\n )\n\n return converted_models\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 335, "n_words": 75, "vocab_size": 65, "complexity": 4, "nloc": 18, "token_counts": 71, "n_ast_nodes": 144, "n_identifiers": 14, "random_cut": "def convert_to_transformers(self):\n \n converted_models = []\n\n # convert model for each prediction head\n for prediction_head in self.prediction_heads:\n if len(prediction_head.layer_dims) != 2:\n logger.error(\n f\"Currently conversion only works for PredictionHeads that are a single layer Feed Forward NN with dimensions [LM_output_dim, number_classes].\\n\"\n f\" Your PredictionHead has {str(prediction_head.layer_dims)} dimensions.\"\n )\n continue\n if prediction_head.model_type == \"span_classification\":\n transformers_model = self._convert_to_transformers_qa(prediction_head)\n converted_models.appe" }, { "id": 199414, "commit_id": "c41964db333afe27571ac399e823df29063d8c83", "repo": "sympy", "path": "sympy/printing/smtlib.py", "file_name": "smtlib.py", "fun_name": "emptyPrinter", "commit_message": "Implement preliminary Sympy to SMT-Lib printer.", "code": "def emptyPrinter(self, expr):\n raise NotImplementedError(f'Cannot convert `{repr(expr)}` of type `{type(expr)}` to SMT.')\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 18, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 39, "n_identifiers": 6, "random_cut": "def emptyPrinter(self, expr):\n raise NotImplementedError(f'Cannot convert `{repr(expr)}` of type `{type" }, { "id": 219505, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_collections_abc.py", "file_name": "_collections_abc.py", "fun_name": "__le__", "commit_message": "add python 3.10.4 for windows", "code": "def __le__(self, other):\n if not isinstance(other, Set):\n return NotImplemented\n if len(self) > len(other):\n return False\n for elem in self:\n if elem not in other:\n return False\n return True\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 103, "n_words": 28, "vocab_size": 19, "complexity": 5, "nloc": 9, "token_counts": 46, "n_ast_nodes": 71, "n_identifiers": 8, "random_cut": "def __le__(self, other):\n if not isinstance(other, Set):\n return NotImplemented\n if len(self) > len(other):\n " }, { "id": 183081, "commit_id": "44c1f2373aaa61c5262882a61064fa5c084ae21e", "repo": "textual", "path": "src/textual/widget.py", "file_name": "widget.py", "fun_name": "render_styled", "commit_message": "button widget", "code": "def render_styled(self) -> RenderableType:\n \n\n renderable = self.render()\n\n styles = self.styles\n parent_styles = self.parent.styles\n\n parent_text_style = self.parent.rich_text_style\n text_style = styles.rich_style\n\n content_align = (styles.content_align_horizontal, styles.content_align_vertical)\n if content_align != (\"left\", \"top\"):\n horizontal, vertical = content_align\n renderable = Align(renderable, horizontal, vertical=vertical)\n\n renderable_text_style = parent_text_style + text_style\n if renderable_text_style:\n renderable = Styled(renderable, renderable_text_style)\n\n renderable = Padding(renderable, styles.padding, style=renderable_text_style)\n\n if styles.border:\n renderable = Border(\n renderable,\n styles.border,\n inner_color=styles.background,\n outer_color=Color.from_rich_color(parent_text_style.bgcolor),\n )\n\n if styles.outline:\n renderable = Border(\n renderable,\n styles.outline,\n inner_color=styles.background,\n outer_color=parent_styles.background,\n outline=True,\n )\n\n if styles.opacity != 1.0:\n renderable = Opacity(renderable, opacity=styles.opacity)\n\n return renderable\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 412, "n_words": 84, "vocab_size": 50, "complexity": 6, "nloc": 37, "token_counts": 194, "n_ast_nodes": 294, "n_identifiers": 34, "random_cut": "def render_styled(self) -> RenderableType:\n \n\n renderable = self.render()\n\n styles = self.styles\n parent_styles = self.parent.styles\n\n pare" }, { "id": 64272, "commit_id": "bd3b47fd5081dc592850001ff077bffb0ed3fdb9", "repo": "erpnext", "path": "erpnext/patches/v13_0/shopping_cart_to_ecommerce.py", "file_name": "shopping_cart_to_ecommerce.py", "fun_name": "notify_users", "commit_message": "fix: remove stale doctypes and add msg for ecommerce refactor (#27700)", "code": "def notify_users():\n\n\tclick.secho(\n\t\t\"Shopping cart and Product settings are merged into E-commerce settings.\\n\"\n\t\t\"Checkout the documentation to learn more:\"\n\t\t\"https://docs.erpnext.com/docs/v13/user/manual/en/e_commerce/set_up_e_commerce\",\n\t\tfg=\"yellow\",\n\t)\n\n\tnote = frappe.new_doc(\"Note\")\n\tnote.title = \"New E-Commerce Module\"\n\tnote.public = 1\n\tnote.notify_on_login = 1\n\tnote.content = \n\tnote.save()\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 39, "vocab_size": 34, "complexity": 1, "nloc": 13, "token_counts": 50, "n_ast_nodes": 96, "n_identifiers": 12, "random_cut": "def notify_users():\n\n\tclick.secho(\n\t\t\"Shopping cart and " }, { "id": 252972, "commit_id": "2d495c093c2e499f2510a3c6d66db7afed7394af", "repo": "mitmproxy", "path": "mitmproxy/proxy/mode_servers.py", "file_name": "mode_servers.py", "fun_name": "client_conf", "commit_message": "add transparent server mode based on WireGuard (#5562)\n\n* add mode spec for WireGuard mode\r\n\r\n* add WireGuard server implementation\r\n\r\n* remove coverage excludes\r\n\r\n* simplify wireguard spec\r\n\r\n* lint!\r\n\r\n* remove superfluous tests\r\n\r\n* bump to mitmproxy_wireguard 0.1.1\r\n\r\n* proxy/test_mode_specs: remove unused import\r\n\r\n* fix wireguard server mode\r\n\r\n* WireGuard: move keyfile gen into `.start()`\r\n\r\nThis way any file format errors result in `.last_exception` being set.\r\n\r\n* fixup UDP support\r\n\r\n* bump to mitmproxy_wireguard v0.1.2\r\n\r\nThis release fixes TCP connections which were broken in v0.1.1.\r\n\r\n* fix crash handler\r\n\r\n* add simple test for WireGuard server instances\r\n\r\n* bump to mitmproxy_wireguard v0.1.5 and fix launching wg-test-client\r\n\r\n* fixups\r\n\r\n - monkeypatch `handle_client` instead of the handlers.\r\n - fix OS detection\r\n - ctx.log -> logging\r\n\r\n* nits\r\n\r\n* bump to mitmproxy_wireguard 0.1.6 for fixed test client\r\n\r\n* move WireGuardDatagramTransport into dedicated module\r\n\r\nthis allows us to exclude it from individual coverage, which makes no sense.\r\nAlso improve type checking to make sure that it's a full replacement.\r\n\r\n* cover WireGuardServerInstance.is_running property with tests\r\n\r\n* enable specialized server instance creation\r\n\r\n* test wireguard conf generation\r\n\r\n* deduplicate tcp/udp handlers\r\n\r\n* update CHANGELOG\r\n\r\nCo-authored-by: Maximilian Hils ", "code": "def client_conf(self) -> str | None:\n if not self._server:\n return None\n host = local_ip.get_local_ip() or local_ip.get_local_ip6()\n port = self.mode.listen_port(ctx.options.listen_port)\n return textwrap.dedent(f).strip()\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 59, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 17, "token_counts": 56, "n_ast_nodes": 122, "n_identifiers": 20, "random_cut": "def client_conf(self) -> str | None:\n if not self._server:\n return None\n host = local_ip.get_local_ip() or local_ip.get_local_ip6()\n port = self.mode.listen_port(ctx.options.listen_port)\n return textwrap.dedent(f).strip()\n" }, { "id": 242485, "commit_id": "5c6212052cc735b5aabc895bb18264143b8408c7", "repo": "Pillow", "path": "Tests/test_file_gif.py", "file_name": "test_file_gif.py", "fun_name": "test_l_mode_transparency", "commit_message": "Convert subsequent frames of L mode GIF to LA if transparency is present", "code": "def test_l_mode_transparency():\n with Image.open(\"Tests/images/no_palette_with_transparency.gif\") as im:\n assert im.mode == \"L\"\n assert im.load()[0, 0] == 0\n assert im.info[\"transparency\"] == 255\n\n im.seek(1)\n assert im.mode == \"LA\"\n assert im.load()[0, 0] == (0, 255)\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 30, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 71, "n_ast_nodes": 119, "n_identifiers": 8, "random_cut": "def test_l_mode_transparency():\n with Imag" }, { "id": 192097, "commit_id": "59c723cb45d0f8ab897cc7836d408e9fdde4b552", "repo": "vision", "path": "torchvision/transforms/transforms.py", "file_name": "transforms.py", "fun_name": "forward", "commit_message": "Added center arg to F.affine and RandomAffine ops (#5208)\n\n* Added center option to F.affine and RandomAffine ops\r\n\r\n* Updates according to the review", "code": "def forward(self, img):\n \n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F.get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n\n img_size = F.get_image_size(img)\n\n ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)\n\n return F.affine(img, *ret, interpolation=self.interpolation, fill=fill, center=self.center)\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 136, "n_words": 42, "vocab_size": 35, "complexity": 4, "nloc": 10, "token_counts": 120, "n_ast_nodes": 180, "n_identifiers": 22, "random_cut": "def forward(self, img):\n \n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F.get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n\n img_size = F.get_image_size(img)\n\n ret = self.get_params(self." }, { "id": 162910, "commit_id": "07960766590650e516a75ce6ceba91b68a5fa551", "repo": "inter", "path": "misc/tools/postprocess-vf.py", "file_name": "postprocess-vf.py", "fun_name": "get_family_name", "commit_message": "UPM 2048 and opsz axis (#462)\n\n- UPM is adjusted to 2048\r\n- Additional opsz VF axis (multi master) added which will eventually replace the separate Display family\r\n- New tooling that uses fontmake instead of Inter's own fontbuild toolchain. (The old toolchain is still supported, i.e. `make -f Makefile_v1.make ...`)", "code": "def get_family_name(font):\n nameTable = font[\"name\"]\n r = None\n for plat_id, enc_id, lang_id in (WINDOWS_ENGLISH_IDS, MAC_ROMAN_IDS):\n for name_id in (PREFERRED_FAMILY, LEGACY_FAMILY):\n r = nameTable.getName(nameID=name_id, platformID=plat_id, platEncID=enc_id, langID=lang_id)\n if r is not None:\n break\n if r is not None:\n break\n if not r:\n raise ValueError(\"family name not found\")\n return r.toUnicode()\n\n", "url": "https://github.com/rsms/inter.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 83, "n_words": 48, "vocab_size": 33, "complexity": 6, "nloc": 13, "token_counts": 87, "n_ast_nodes": 134, "n_identifiers": 19, "random_cut": "def get_family_name(font):\n nameTable = font[\"name\"]\n r = None\n for plat_id, enc_id, lang_id in (WINDOWS_ENGLISH_IDS" }, { "id": 81109, "commit_id": "cb63d92bbf5d8e10834264f0eb8142c4eb5c9161", "repo": "awx", "path": "awx/main/tests/unit/test_capacity.py", "file_name": "test_capacity.py", "fun_name": "test_RBAC_reduced_filter", "commit_message": "Remove committed_capacity field, delete supporting code (#12086)\n\n* Remove committed_capacity field, delete supporting code\r\n\r\n* Track consumed capacity to solve the negatives problem\r\n\r\n* Use more verbose name for IG queryset", "code": "def test_RBAC_reduced_filter(sample_cluster, create_ig_manager):\n \n default, ig_large, ig_small = sample_cluster()\n tasks = [Job(status='waiting', execution_node='i1'), Job(status='waiting', execution_node='i2'), Job(status='waiting', execution_node='i3')]\n instance_groups_mgr = create_ig_manager([default], tasks)\n # Cross-links between groups not visible to current user,\n # so a naieve accounting of capacities is returned instead\n assert instance_groups_mgr.get_consumed_capacity('default') == 43\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 43, "vocab_size": 39, "complexity": 1, "nloc": 5, "token_counts": 72, "n_ast_nodes": 125, "n_identifiers": 12, "random_cut": "def test_RBAC_reduced_filter(sample_cluster, create_ig_manager):\n \n default, ig_large, ig_small = sample_cluster()\n tasks = [Job(status='waiting', execution_node='i1'), Job(status='waiting', execution_node='i2'), Job(status='waiting', execution_node='i3')]\n instance_groups_mgr = create_ig_manager([default], tasks)\n # Cross-links betwee" }, { "id": 215027, "commit_id": "1de7ddaf9cbfc3db7b043c532e6b8ec63807de1c", "repo": "flair", "path": "flair/embeddings/token.py", "file_name": "token.py", "fun_name": "__getstate__", "commit_message": "finalize token embeddings", "code": "def __getstate__(self):\n state = self.__dict__.copy()\n # save the sentence piece model as binary file (not as path which may change)\n state[\"spm_model_binary\"] = open(self.model_file, mode=\"rb\").read()\n state[\"spm\"] = None\n return state\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 63, "n_words": 29, "vocab_size": 25, "complexity": 1, "nloc": 5, "token_counts": 41, "n_ast_nodes": 72, "n_identifiers": 9, "random_cut": "def __getstate__(self):\n state = self.__dict__.copy()\n " }, { "id": 284758, "commit_id": "0e03b9e9e41aaa61cdec5d674a9f2c64ab8d3394", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/cryptocurrency/test_cryptocurrency_helpers.py", "file_name": "test_cryptocurrency_helpers.py", "fun_name": "test_load_none", "commit_message": "refactoring load, changed chart to candle (#1838)\n\n* refactoring load, changed chart to candle\r\n\r\n* updating load\r\n\r\n* refactor done, missing tests\r\n\r\n* fixed chart\r\n\r\n* refactor\r\n\r\n* linting\r\n\r\n* tests failing\r\n\r\n* fix minh issues\r\n\r\n* auto completion for load\r\n\r\n* linting\r\n\r\n* Tests : cryptocurrency/controller ; remove mocking of functions which are not used anymore\r\n\r\n* Cryptocurrency/Controller : call_headlines ; fix bug\r\n\r\n* Tests : cryptocurrency/controller ; mock function\r\n\r\n* Tests : cryptocurrency/due_diligence ; fix expected output\r\n\r\n* cryptocurrency/due_diligence ; mock functions\r\n\r\nCo-authored-by: Chavithra \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: James Maslek ", "code": "def test_load_none(coin, vs):\n df = load(symbol_search=coin, vs=vs)\n assert df is not None\n\n\n@pytest.fixture(name=\"get_bitcoin\")", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"get_bitcoin\")", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 17, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 53, "n_identifiers": 9, "random_cut": "def test_load_none(coin, vs):\n df = load(symbol_se" } ]